summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-07-16 21:57:38 +0200
committerThomas Gleixner <tglx@linutronix.de>2014-07-16 21:57:38 +0200
commitafdb094380889222583df9ef803587f6b8a82c8d (patch)
tree4a03c516568e5c8b994a5739f3d34f4552c78898 /drivers
parentbe11e6d86081aa6328eaa4fe6dd14ccf39a023c8 (diff)
parent1795cd9b3a91d4b5473c97f491d63892442212ab (diff)
Merge tag 'v3.16-rc5' into timers/core
Reason: Bring in upstream modifications, so the pending changes which depend on them can be queued.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ac.c130
-rw-r--r--drivers/acpi/acpi_lpss.c15
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/battery.c80
-rw-r--r--drivers/acpi/ec.c164
-rw-r--r--drivers/acpi/osl.c15
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/sleep.c3
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/acpi/video.c13
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/acard-ahci.c6
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ahci.h3
-rw-r--r--drivers/ata/ahci_da850.c3
-rw-r--r--drivers/ata/ahci_imx.c41
-rw-r--r--drivers/ata/ahci_mvebu.c128
-rw-r--r--drivers/ata/ahci_platform.c9
-rw-r--r--drivers/ata/ahci_st.c2
-rw-r--r--drivers/ata/ahci_sunxi.c9
-rw-r--r--drivers/ata/ahci_xgene.c67
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libahci.c12
-rw-r--r--drivers/ata/libahci_platform.c12
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/libata-sff.c9
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_ali.c4
-rw-r--r--drivers/ata/pata_amd.c4
-rw-r--r--drivers/ata/pata_artop.c4
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_atp867x.c4
-rw-r--r--drivers/ata/pata_bf54x.c2
-rw-r--r--drivers/ata/pata_cmd640.c4
-rw-r--r--drivers/ata/pata_cmd64x.c4
-rw-r--r--drivers/ata/pata_cs5520.c6
-rw-r--r--drivers/ata/pata_cs5530.c6
-rw-r--r--drivers/ata/pata_cs5535.c2
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_cypress.c2
-rw-r--r--drivers/ata/pata_efar.c2
-rw-r--r--drivers/ata/pata_ep93xx.c4
-rw-r--r--drivers/ata/pata_hpt366.c4
-rw-r--r--drivers/ata/pata_hpt3x3.c4
-rw-r--r--drivers/ata/pata_imx.c4
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_it821x.c4
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_macio.c22
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c8
-rw-r--r--drivers/ata/pata_mpiix.c2
-rw-r--r--drivers/ata/pata_netcell.c2
-rw-r--r--drivers/ata/pata_ninja32.c5
-rw-r--r--drivers/ata/pata_ns87410.c2
-rw-r--r--drivers/ata/pata_ns87415.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c50
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_opti.c2
-rw-r--r--drivers/ata/pata_optidma.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_rdc.c2
-rw-r--r--drivers/ata/pata_rz1000.c4
-rw-r--r--drivers/ata/pata_samsung_cf.c4
-rw-r--r--drivers/ata/pata_sc1200.c2
-rw-r--r--drivers/ata/pata_scc.c2
-rw-r--r--drivers/ata/pata_sch.c2
-rw-r--r--drivers/ata/pata_serverworks.c4
-rw-r--r--drivers/ata/pata_sil680.c4
-rw-r--r--drivers/ata/pata_sis.c4
-rw-r--r--drivers/ata/pata_sl82c105.c4
-rw-r--r--drivers/ata/pata_triflex.c4
-rw-r--r--drivers/ata/pata_via.c4
-rw-r--r--drivers/ata/sata_fsl.c18
-rw-r--r--drivers/ata/sata_inic162x.c4
-rw-r--r--drivers/ata/sata_mv.c8
-rw-r--r--drivers/ata/sata_nv.c6
-rw-r--r--drivers/ata/sata_rcar.c4
-rw-r--r--drivers/ata/sata_sil.c6
-rw-r--r--drivers/ata/sata_sil24.c10
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_via.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/idt77252.c8
-rw-r--r--drivers/base/dma-contiguous.c12
-rw-r--r--drivers/base/power/main.c30
-rw-r--r--drivers/base/syscore.c5
-rw-r--r--drivers/block/drbd/drbd_receiver.c5
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c15
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h8
-rw-r--r--drivers/block/null_blk.c7
-rw-r--r--drivers/block/nvme-core.c203
-rw-r--r--drivers/block/nvme-scsi.c36
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c252
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/bluetooth/ath3k.c3
-rw-r--r--drivers/bluetooth/btmrvl_drv.h4
-rw-r--r--drivers/bluetooth/btmrvl_main.c19
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c103
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h3
-rw-r--r--drivers/bluetooth/btusb.c155
-rw-r--r--drivers/bluetooth/hci_h4.c7
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/hw_random/virtio-rng.c105
-rw-r--r--drivers/char/i8k.c4
-rw-r--r--drivers/char/random.c17
-rw-r--r--drivers/char/raw.c8
-rw-r--r--drivers/clk/clk-s2mps11.c7
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c91
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c9
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c6
-rw-r--r--drivers/clk/spear/spear3xx_clock.c16
-rw-r--r--drivers/clk/sunxi/Makefile4
-rw-r--r--drivers/clk/sunxi/clk-a10-hosc.c73
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c119
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c99
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0.c77
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c233
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c239
-rw-r--r--drivers/clk/ti/Makefile4
-rw-r--r--drivers/clk/ti/apll.c187
-rw-r--r--drivers/clk/ti/clk-2xxx.c256
-rw-r--r--drivers/clk/ti/clk-43xx.c16
-rw-r--r--drivers/clk/ti/clk-54xx.c6
-rw-r--r--drivers/clk/ti/clk-7xx.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c312
-rw-r--r--drivers/clk/ti/dpll.c143
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/ti/interface.c11
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clocksource/exynos_mct.c29
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c2
-rw-r--r--drivers/cpufreq/cpufreq.c77
-rw-r--r--drivers/cpufreq/cpufreq_governor.c67
-rw-r--r--drivers/cpufreq/cpufreq_governor.h7
-rw-r--r--drivers/cpufreq/intel_pstate.c46
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c9
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c100
-rw-r--r--drivers/cpuidle/Kconfig5
-rw-r--r--drivers/cpuidle/Kconfig.mips17
-rw-r--r--drivers/cpuidle/Makefile4
-rw-r--r--drivers/cpuidle/cpuidle-armada-370-xp.c4
-rw-r--r--drivers/cpuidle/cpuidle-cps.c186
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c8
-rw-r--r--drivers/cpuidle/driver.c7
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/caam/jr.c8
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/cppi41.c13
-rw-r--r--drivers/dma/dw/core.c49
-rw-r--r--drivers/dma/dw/pci.c12
-rw-r--r--drivers/dma/dw/platform.c18
-rw-r--r--drivers/dma/fsldma.c306
-rw-r--r--drivers/dma/imx-sdma.c24
-rw-r--r--drivers/dma/mmp_pdma.c95
-rw-r--r--drivers/dma/mpc512x_dma.c342
-rw-r--r--drivers/dma/pch_dma.c3
-rw-r--r--drivers/dma/s3c24xx-dma.c113
-rw-r--r--drivers/dma/sh/Kconfig2
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c1
-rw-r--r--drivers/dma/sh/shdma-base.c98
-rw-r--r--drivers/dma/sh/shdmac.c15
-rw-r--r--drivers/dma/sh/sudmac.c7
-rw-r--r--drivers/dma/ste_dma40.c182
-rw-r--r--drivers/dma/xilinx/Makefile1
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c1379
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/firmware/efi/fdt.c2
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/ast/Makefile4
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c410
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h24
-rw-r--r--drivers/gpu/drm/ast/ast_main.c97
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c100
-rw-r--r--drivers/gpu/drm/ast/ast_post.c902
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h67
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c6
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c7
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c8
-rw-r--r--drivers/gpu/drm/drm_bufs.c34
-rw-r--r--drivers/gpu/drm/drm_cache.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c421
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c25
-rw-r--r--drivers/gpu/drm/drm_drv.c3
-rw-r--r--drivers/gpu/drm/drm_edid.c293
-rw-r--r--drivers/gpu/drm/drm_edid_load.c23
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c77
-rw-r--r--drivers/gpu/drm/drm_fops.c9
-rw-r--r--drivers/gpu/drm/drm_gem.c19
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c37
-rw-r--r--drivers/gpu/drm/drm_irq.c461
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c9
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c248
-rw-r--r--drivers/gpu/drm/drm_pci.c159
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c154
-rw-r--r--drivers/gpu/drm/drm_platform.c40
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c76
-rw-r--r--drivers/gpu/drm/drm_stub.c61
-rw-r--r--drivers/gpu/drm/drm_sysfs.c6
-rw-r--r--drivers/gpu/drm/drm_usb.c34
-rw-r--r--drivers/gpu/drm/exynos/Kconfig8
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c63
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c211
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h60
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_reg.c46
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c216
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c74
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c446
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h87
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c114
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c427
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c213
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c258
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c108
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c671
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c117
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h16
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c8
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c18
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig3
-rw-r--r--drivers/gpu/drm/i915/Makefile8
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c24
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c758
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c271
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c122
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c626
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h482
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c559
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c136
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c221
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h284
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c198
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c711
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c58
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1164
-rw-r--r--drivers/gpu/drm/i915/i915_params.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h783
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h101
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c337
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h64
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c83
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c97
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1904
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c623
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h102
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c217
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h19
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h5
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c589
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c40
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c260
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c23
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c13
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c16
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c181
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c833
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h48
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c289
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c253
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c479
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c820
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h170
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c49
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c59
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c245
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c221
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c111
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c2
-rw-r--r--drivers/gpu/drm/mga/mga_state.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c6
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c60
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c44
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c58
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h17
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c107
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h31
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c23
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h1
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c275
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c337
-rw-r--r--drivers/gpu/drm/nouveau/Makefile15
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.c172
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.h59
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c295
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c288
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c206
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.h59
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c278
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h65
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc18
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h460
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h460
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h188
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h188
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve4.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h84
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ibus.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/conn.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c152
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c152
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c212
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h58
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/port.h15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h85
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c108
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c1
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c3
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c99
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c4
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c208
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c52
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c17
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c39
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c43
-rw-r--r--drivers/gpu/drm/radeon/cikd.h3
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h8
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h4
-rw-r--r--drivers/gpu/drm/radeon/clearstate_si.h4
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c244
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c86
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c48
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h3
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c17
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/r100.c63
-rw-r--r--drivers/gpu/drm/radeon/r300.c7
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c15
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c341
-rw-r--r--drivers/gpu/drm/radeon/r600d.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon.h60
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h40
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c116
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c295
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c125
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770.c13
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c39
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c24
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c13
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c7
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c7
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c2
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/bus.c75
-rw-r--r--drivers/gpu/drm/tegra/dc.c657
-rw-r--r--drivers/gpu/drm/tegra/dc.h33
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c35
-rw-r--r--drivers/gpu/drm/tegra/drm.c36
-rw-r--r--drivers/gpu/drm/tegra/drm.h58
-rw-r--r--drivers/gpu/drm/tegra/dsi.c250
-rw-r--r--drivers/gpu/drm/tegra/dsi.h10
-rw-r--r--drivers/gpu/drm/tegra/fb.c7
-rw-r--r--drivers/gpu/drm/tegra/gem.c3
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c8
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c8
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c202
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h5
-rw-r--r--drivers/gpu/drm/tegra/rgb.c31
-rw-r--r--drivers/gpu/drm/tegra/sor.c478
-rw-r--r--drivers/gpu/drm/tegra/sor.h4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c1
-rw-r--r--drivers/gpu/drm/via/via_dma.c2
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c10
-rw-r--r--drivers/gpu/host1x/bus.c12
-rw-r--r--drivers/gpu/ipu-v3/Kconfig7
-rw-r--r--drivers/gpu/ipu-v3/Makefile (renamed from drivers/staging/imx-drm/ipu-v3/Makefile)4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-common.c)82
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-dc.c)3
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-di.c)2
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c)2
-rw-r--r--drivers/gpu/ipu-v3/ipu-dp.c (renamed from drivers/staging/imx-drm/ipu-v3/ipu-dp.c)2
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h (renamed from drivers/staging/imx-drm/ipu-v3/ipu-prv.h)8
-rw-r--r--drivers/gpu/ipu-v3/ipu-smfc.c97
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-lg4ff.c2
-rw-r--r--drivers/hid/hid-picolcd_fb.c2
-rw-r--r--drivers/hid/hid-rmi.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c25
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hsi/clients/Kconfig2
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c4
-rw-r--r--drivers/hv/channel_mgmt.c2
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv_kvp.c17
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/vmbus_drv.c10
-rw-r--r--drivers/hwmon/Kconfig16
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adc128d818.c28
-rw-r--r--drivers/hwmon/adm1021.c14
-rw-r--r--drivers/hwmon/adm1029.c3
-rw-r--r--drivers/hwmon/adm1031.c8
-rw-r--r--drivers/hwmon/amc6821.c2
-rw-r--r--drivers/hwmon/atxp1.c42
-rw-r--r--drivers/hwmon/emc2103.c15
-rw-r--r--drivers/hwmon/gpio-fan.c2
-rw-r--r--drivers/hwmon/ina2xx.c7
-rw-r--r--drivers/hwmon/lm85.c33
-rw-r--r--drivers/hwmon/ltc4151.c51
-rw-r--r--drivers/hwmon/ntc_thermistor.c16
-rw-r--r--drivers/hwmon/shtc1.c251
-rw-r--r--drivers/hwmon/vexpress.c82
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/i2c/busses/Kconfig23
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c763
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c344
-rw-r--r--drivers/i2c/muxes/Kconfig1
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c7
-rw-r--r--drivers/iio/adc/ad799x.c8
-rw-r--r--drivers/iio/adc/at91_adc.c16
-rw-r--r--drivers/iio/adc/men_z188_adc.c4
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/adc/twl4030-madc.c1
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c3
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c7
-rw-r--r--drivers/iio/inkern.c6
-rw-r--r--drivers/iio/light/hid-sensor-als.c7
-rw-r--r--drivers/iio/light/hid-sensor-prox.c7
-rw-r--r--drivers/iio/light/tcs3472.c11
-rw-r--r--drivers/iio/magnetometer/ak8975.c9
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c7
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c7
-rw-r--r--drivers/iio/pressure/mpl3115.c6
-rw-r--r--drivers/infiniband/Makefile19
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/iwpm_msg.c685
-rw-r--r--drivers/infiniband/core/iwpm_util.c607
-rw-r--r--drivers/infiniband/core/iwpm_util.h238
-rw-r--r--drivers/infiniband/core/netlink.c18
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c85
-rw-r--r--drivers/infiniband/core/user_mad.c75
-rw-r--r--drivers/infiniband/core/verbs.c8
-rw-r--r--drivers/infiniband/hw/Makefile12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c324
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c83
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h48
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c4
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c40
-rw-r--r--drivers/infiniband/hw/mlx4/main.c26
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c102
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c7
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c105
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h13
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c76
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c42
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c14
-rw-r--r--drivers/infiniband/hw/mlx5/user.h2
-rw-r--r--drivers/infiniband/hw/nes/nes.c25
-rw-r--r--drivers/infiniband/hw/nes/nes.h3
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c320
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c18
-rw-r--r--drivers/infiniband/ulp/Makefile5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c105
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h10
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c34
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c89
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c70
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c672
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h94
-rw-r--r--drivers/input/evdev.c18
-rw-r--r--drivers/input/input-polldev.c131
-rw-r--r--drivers/input/keyboard/Kconfig13
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5520-keys.c32
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c145
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c100
-rw-r--r--drivers/input/keyboard/imx_keypad.c2
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c36
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c2
-rw-r--r--drivers/input/keyboard/omap4-keypad.c32
-rw-r--r--drivers/input/keyboard/st-keyscan.c274
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c66
-rw-r--r--drivers/input/misc/88pm860x_onkey.c40
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/misc/ab8500-ponkey.c54
-rw-r--r--drivers/input/misc/gpio-beeper.c29
-rw-r--r--drivers/input/misc/ims-pcu.c1
-rw-r--r--drivers/input/misc/max8925_onkey.c55
-rw-r--r--drivers/input/misc/max8997_haptic.c18
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c8
-rw-r--r--drivers/input/misc/rotary_encoder.c2
-rw-r--r--drivers/input/misc/soc_button_array.c1
-rw-r--r--drivers/input/misc/twl6040-vibra.c83
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/elantech.c27
-rw-r--r--drivers/input/mouse/synaptics.c19
-rw-r--r--drivers/input/serio/apbps2.c2
-rw-r--r--drivers/input/serio/olpc_apsp.c2
-rw-r--r--drivers/input/tablet/wacom_sys.c19
-rw-r--r--drivers/input/tablet/wacom_wac.c92
-rw-r--r--drivers/input/tablet/wacom_wac.h6
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c41
-rw-r--r--drivers/input/touchscreen/Kconfig18
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7877.c5
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c862
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c2
-rw-r--r--drivers/input/touchscreen/da9034-ts.c39
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c2
-rw-r--r--drivers/input/touchscreen/egalax_ts.c2
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c47
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c2
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c83
-rw-r--r--drivers/input/touchscreen/mms114.c4
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c45
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c281
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c339
-rw-r--r--drivers/input/touchscreen/tsc2005.c157
-rw-r--r--drivers/input/touchscreen/zforce_ts.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c18
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c17
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c2
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/capi/Kconfig18
-rw-r--r--drivers/isdn/capi/capi.c4
-rw-r--r--drivers/isdn/capi/capidrv.c195
-rw-r--r--drivers/isdn/capi/capiutil.c200
-rw-r--r--drivers/isdn/hisax/Kconfig11
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c111
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c4
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c14
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/dell-led.c171
-rw-r--r--drivers/leds/leds-88pm860x.c5
-rw-r--r--drivers/leds/leds-adp5520.c5
-rw-r--r--drivers/leds/leds-bd2802.c4
-rw-r--r--drivers/leds/leds-da903x.c4
-rw-r--r--drivers/leds/leds-da9052.c3
-rw-r--r--drivers/leds/leds-lp5523.c3
-rw-r--r--drivers/leds/leds-pca9685.c213
-rw-r--r--drivers/leds/leds-pwm.c147
-rw-r--r--drivers/leds/leds-s3c24xx.c4
-rw-r--r--drivers/leds/leds-sunfire.c4
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c2
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/macintosh/windfarm_pm121.c16
-rw-r--r--drivers/md/bitmap.c6
-rw-r--r--drivers/md/dm-bio-prison.c70
-rw-r--r--drivers/md/dm-bio-prison.h2
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-era-target.c3
-rw-r--r--drivers/md/dm-io.c22
-rw-r--r--drivers/md/dm-mpath.c16
-rw-r--r--drivers/md/dm-snap.c67
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin.c93
-rw-r--r--drivers/md/dm-zero.c4
-rw-r--r--drivers/md/dm.c101
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/raid5.c158
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/omap3isp/Makefile2
-rw-r--r--drivers/media/platform/omap3isp/isp.c108
-rw-r--r--drivers/media/platform/omap3isp/isp.h8
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c107
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.h16
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c4
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c4
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c8
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.c1161
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.h188
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c8
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c197
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h3
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c325
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h29
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c24
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c1
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptctl.c6
-rw-r--r--drivers/message/fusion/mptfc.c12
-rw-r--r--drivers/message/fusion/mptsas.c10
-rw-r--r--drivers/message/fusion/mptscsih.c8
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/message/fusion/mptspi.c12
-rw-r--r--drivers/mfd/Kconfig5
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/twl4030-power.c286
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/vexpress-syscfg.c12
-rw-r--r--drivers/misc/vmw_balloon.c3
-rw-r--r--drivers/mmc/core/bus.c9
-rw-r--r--drivers/mmc/core/core.c52
-rw-r--r--drivers/mmc/core/debugfs.c8
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/mmc.c678
-rw-r--r--drivers/mmc/core/sd.c28
-rw-r--r--drivers/mmc/core/sd.h1
-rw-r--r--drivers/mmc/core/sdio.c53
-rw-r--r--drivers/mmc/core/sdio_bus.c14
-rw-r--r--drivers/mmc/core/sdio_irq.c41
-rw-r--r--drivers/mmc/core/slot-gpio.c4
-rw-r--r--drivers/mmc/host/Kconfig27
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c14
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c7
-rw-r--r--drivers/mmc/host/dw_mmc.c180
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/jz4740_mmc.c11
-rw-r--r--drivers/mmc/host/mmc_spi.c18
-rw-r--r--drivers/mmc/host/mmci.c6
-rw-r--r--drivers/mmc/host/moxart-mmc.c730
-rw-r--r--drivers/mmc/host/mvsdio.c22
-rw-r--r--drivers/mmc/host/mxcmmc.c140
-rw-r--r--drivers/mmc/host/mxs-mmc.c7
-rw-r--r--drivers/mmc/host/omap.c10
-rw-r--r--drivers/mmc/host/omap_hsmmc.c61
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c5
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c5
-rw-r--r--drivers/mmc/host/sdhci-acpi.c8
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c4
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c4
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c13
-rw-r--r--drivers/mmc/host/sdhci-dove.c78
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c83
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h4
-rw-r--r--drivers/mmc/host/sdhci-msm.c4
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c4
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c70
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c78
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.h3
-rw-r--r--drivers/mmc/host/sdhci-pci.c9
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c4
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c14
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c13
-rw-r--r--drivers/mmc/host/sdhci-s3c.c138
-rw-r--r--drivers/mmc/host/sdhci-sirf.c4
-rw-r--r--drivers/mmc/host/sdhci-spear.c5
-rw-r--r--drivers/mmc/host/sdhci-tegra.c67
-rw-r--r--drivers/mmc/host/sdhci.c749
-rw-r--r--drivers/mmc/host/sdhci.h20
-rw-r--r--drivers/mmc/host/sh_mmcif.c9
-rw-r--r--drivers/mmc/host/usdhi6rol0.c1847
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c2
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/Kconfig16
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c4
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/devices/Kconfig4
-rw-r--r--drivers/mtd/devices/docg3.c4
-rw-r--r--drivers/mtd/devices/elm.c38
-rw-r--r--drivers/mtd/devices/m25p80.c1305
-rw-r--r--drivers/mtd/devices/serial_flash_cmds.h44
-rw-r--r--drivers/mtd/devices/slram.c4
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c340
-rw-r--r--drivers/mtd/lpddr/Kconfig13
-rw-r--r--drivers/mtd/lpddr/Makefile1
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c507
-rw-r--r--drivers/mtd/maps/Kconfig4
-rw-r--r--drivers/mtd/maps/sc520cdp.c6
-rw-r--r--drivers/mtd/maps/solutionengine.c25
-rw-r--r--drivers/mtd/mtd_blkdevs.c6
-rw-r--r--drivers/mtd/mtdchar.c20
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c13
-rw-r--r--drivers/mtd/nand/denali.c7
-rw-r--r--drivers/mtd/nand/docg4.c6
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c14
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c21
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h12
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c11
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c72
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h32
-rw-r--r--drivers/mtd/nand/nand_base.c104
-rw-r--r--drivers/mtd/nand/nand_bbt.c13
-rw-r--r--drivers/mtd/nand/nand_ecc.c2
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/omap2.c108
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c44
-rw-r--r--drivers/mtd/nand/r852.c6
-rw-r--r--drivers/mtd/onenand/samsung.c8
-rw-r--r--drivers/mtd/spi-nor/Kconfig17
-rw-r--r--drivers/mtd/spi-nor/Makefile2
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c1009
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1107
-rw-r--r--drivers/mtd/tests/oobtest.c17
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/fastmap.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c62
-rw-r--r--drivers/net/bonding/bond_alb.c153
-rw-r--r--drivers/net/bonding/bond_alb.h1
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c227
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/bonding/bond_options.c66
-rw-r--r--drivers/net/bonding/bond_options.h2
-rw-r--r--drivers/net/bonding/bond_procfs.c16
-rw-r--r--drivers/net/bonding/bond_sysfs.c567
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/bonding/bonding.h143
-rw-r--r--drivers/net/can/Kconfig30
-rw-r--r--drivers/net/can/Makefile4
-rw-r--r--drivers/net/can/c_can/c_can.c15
-rw-r--r--drivers/net/can/c_can/c_can.h8
-rw-r--r--drivers/net/can/c_can/c_can_pci.c78
-rw-r--r--drivers/net/can/c_can/c_can_platform.c84
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/rcar_can.c876
-rw-r--r--drivers/net/can/slcan.c37
-rw-r--r--drivers/net/can/softing/softing_main.c20
-rw-r--r--drivers/net/can/spi/Kconfig10
-rw-r--r--drivers/net/can/spi/Makefile8
-rw-r--r--drivers/net/can/spi/mcp251x.c (renamed from drivers/net/can/mcp251x.c)95
-rw-r--r--drivers/net/can/usb/Kconfig12
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/gs_usb.c971
-rw-r--r--drivers/net/can/usb/kvaser_usb.c53
-rw-r--r--drivers/net/can/xilinx_can.c1208
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c2
-rw-r--r--drivers/net/dsa/mv88e6131.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c1
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig14
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c3
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/mvme147.c6
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h1007
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c375
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c556
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2182
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1351
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c510
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c512
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c433
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h676
-rw-r--r--drivers/net/ethernet/arc/emac_main.c49
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1654
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h678
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c29
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c110
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c279
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h32
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c67
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c323
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c66
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c6
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h19
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c610
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h87
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c194
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c585
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.h13
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c667
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile5
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1066
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h41
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c88
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h137
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c311
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1124
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c209
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c61
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h164
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c423
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c185
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c62
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c121
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h100
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c134
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c56
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h49
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c151
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c60
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c89
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c80
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c356
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c68
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c376
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c18
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c324
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c7
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c311
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c87
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c5
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c22
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c16
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h36
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c92
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c69
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c192
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c171
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c47
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c10
-rw-r--r--drivers/net/ethernet/sfc/io.h7
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c22
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c121
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c39
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c50
-rw-r--r--drivers/net/ethernet/tile/tilegx.c14
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c511
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h163
-rw-r--r--drivers/net/hyperv/netvsc.c529
-rw-r--r--drivers/net/hyperv/netvsc_drv.c129
-rw-r--r--drivers/net/hyperv/rndis_filter.c193
-rw-r--r--drivers/net/ieee802154/at86rf230.c136
-rw-r--r--drivers/net/ieee802154/fakelb.c6
-rw-r--r--drivers/net/ieee802154/mrf24j40.c33
-rw-r--r--drivers/net/irda/Kconfig3
-rw-r--r--drivers/net/irda/via-ircc.c7
-rw-r--r--drivers/net/irda/w83977af_ir.c33
-rw-r--r--drivers/net/macvlan.c262
-rw-r--r--drivers/net/ntb_netdev.c3
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c1357
-rw-r--r--drivers/net/phy/at803x.c234
-rw-r--r--drivers/net/phy/fixed.c81
-rw-r--r--drivers/net/phy/mdio_bus.c73
-rw-r--r--drivers/net/phy/micrel.c106
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c50
-rw-r--r--drivers/net/phy/realtek.c88
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/slip/slip.c36
-rw-r--r--drivers/net/slip/slip.h1
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/team/team_mode_loadbalance.c12
-rw-r--r--drivers/net/tun.c54
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c129
-rw-r--r--drivers/net/usb/cdc_ncm.c740
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c20
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c6
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c26
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h5
-rw-r--r--drivers/net/vxlan.c205
-rw-r--r--drivers/net/wan/farsync.c31
-rw-r--r--drivers/net/wan/sdla.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
-rw-r--r--drivers/net/wireless/at76c50x-usb.c180
-rw-r--r--drivers/net/wireless/at76c50x-usb.h26
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c383
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c366
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h26
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c109
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h37
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c587
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c990
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c336
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c183
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c90
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig30
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c31
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c253
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h72
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c214
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c555
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c163
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c8
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c45
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c28
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c36
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h50
-rw-r--r--drivers/net/wireless/b43/Kconfig42
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/bus.h10
-rw-r--r--drivers/net/wireless/b43/main.c499
-rw-r--r--drivers/net/wireless/b43/phy_common.c96
-rw-r--r--drivers/net/wireless/b43/phy_common.h8
-rw-r--r--drivers/net/wireless/b43/phy_g.c6
-rw-r--r--drivers/net/wireless/b43/phy_n.c321
-rw-r--r--drivers/net/wireless/b43/phy_n.h1
-rw-r--r--drivers/net/wireless/b43/radio_2056.c1336
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c150
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h3
-rw-r--r--drivers/net/wireless/b43/wa.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c283
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c332
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/nvram.h)24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c80
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.c94
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c270
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c213
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/d11.c93
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_d11.h14
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h1
-rw-r--r--drivers/net/wireless/cw1200/sta.c3
-rw-r--r--drivers/net/wireless/cw1200/sta.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c3
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c19
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c39
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c29
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h (renamed from drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h)32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c114
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c129
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c61
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h46
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h54
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c204
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c102
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c97
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c410
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c491
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c189
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c71
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c87
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c14
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h33
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c86
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c137
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c197
-rw-r--r--drivers/net/wireless/libertas/cfg.c7
-rw-r--r--drivers/net/wireless/libertas/defs.h3
-rw-r--r--drivers/net/wireless/libertas/rx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n.c45
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c28
-rw-r--r--drivers/net/wireless/mwifiex/README7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c19
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c25
-rw-r--r--drivers/net/wireless/mwifiex/decl.h8
-rw-r--r--drivers/net/wireless/mwifiex/fw.h25
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h2
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.h26
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c8
-rw-r--r--drivers/net/wireless/mwifiex/scan.c66
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c15
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h18
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c7
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c18
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c44
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c16
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c4
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c97
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/mwifiex/usb.c55
-rw-r--r--drivers/net/wireless/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/mwifiex/util.h43
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c22
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h5
-rw-r--r--drivers/net/wireless/orinoco/hw.c4
-rw-r--r--drivers/net/wireless/orinoco/hw.h4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/orinoco/wext.c4
-rw-r--r--drivers/net/wireless/p54/main.c3
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c27
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h6
-rw-r--r--drivers/net/wireless/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c68
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c44
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c28
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c69
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h4
-rw-r--r--drivers/net/xen-netback/common.h108
-rw-r--r--drivers/net/xen-netback/interface.c496
-rw-r--r--drivers/net/xen-netback/netback.c754
-rw-r--r--drivers/net/xen-netback/xenbus.c182
-rw-r--r--drivers/net/xen-netfront.c1164
-rw-r--r--drivers/nfc/Kconfig1
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/pn544/i2c.c154
-rw-r--r--drivers/nfc/port100.c36
-rw-r--r--drivers/nfc/st21nfca/Kconfig23
-rw-r--r--drivers/nfc/st21nfca/Makefile8
-rw-r--r--drivers/nfc/st21nfca/i2c.c724
-rw-r--r--drivers/nfc/st21nfca/st21nfca.c698
-rw-r--r--drivers/nfc/st21nfca/st21nfca.h87
-rw-r--r--drivers/nfc/trf7970a.c261
-rw-r--r--drivers/of/base.c7
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/of_mdio.c208
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/pci/access.c5
-rw-r--r--drivers/pci/bus.c6
-rw-r--r--drivers/pci/host/pci-exynos.c1
-rw-r--r--drivers/pci/host/pci-imx6.c1
-rw-r--r--drivers/pci/host/pci-mvebu.c31
-rw-r--r--drivers/pci/host/pcie-designware.c6
-rw-r--r--drivers/pci/host/pcie-designware.h1
-rw-r--r--drivers/pci/host/pcie-rcar.c10
-rw-r--r--drivers/pci/hotplug/acpiphp.h10
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c6
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c66
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h18
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c31
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c18
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c8
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c2
-rw-r--r--drivers/pci/hotplug/cpqphp.h4
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c12
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c26
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c32
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c26
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c44
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c10
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c6
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c36
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c15
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c106
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c4
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c16
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c52
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c5
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c38
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c13
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c35
-rw-r--r--drivers/pci/hotplug/shpchp.h4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c3
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c36
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c29
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c6
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c2
-rw-r--r--drivers/pci/htirq.c7
-rw-r--r--drivers/pci/msi.c5
-rw-r--r--drivers/pci/pci-driver.c69
-rw-r--r--drivers/pci/pci-label.c65
-rw-r--r--drivers/pci/pci-stub.c2
-rw-r--r--drivers/pci/pci-sysfs.c299
-rw-r--r--drivers/pci/pci.c225
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c3
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c10
-rw-r--r--drivers/pci/pcie/pme.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c6
-rw-r--r--drivers/pci/probe.c60
-rw-r--r--drivers/pci/proc.c16
-rw-r--r--drivers/pci/quirks.c232
-rw-r--r--drivers/pci/rom.c13
-rw-r--r--drivers/pci/search.c118
-rw-r--r--drivers/pci/setup-bus.c51
-rw-r--r--drivers/pci/setup-irq.c13
-rw-r--r--drivers/pci/setup-res.c13
-rw-r--r--drivers/pci/syscall.c2
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-core.c7
-rw-r--r--drivers/phy/phy-omap-usb2.c11
-rw-r--r--drivers/phy/phy-samsung-usb2.c1
-rw-r--r--drivers/pinctrl/berlin/berlin.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c4
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c33
-rw-r--r--drivers/platform/x86/Kconfig31
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/alienware-wmi.c121
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/dell-smo8800.c233
-rw-r--r--drivers/platform/x86/hp-wmi.c18
-rw-r--r--drivers/platform/x86/ideapad-laptop.c31
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c7
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c4
-rw-r--r--drivers/platform/x86/pvpanic.c1
-rw-r--r--drivers/platform/x86/samsung-laptop.c38
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c30
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/ptp_clock.c5
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c8
-rw-r--r--drivers/pwm/pwm-ab8500.c13
-rw-r--r--drivers/pwm/pwm-atmel.c1
-rw-r--r--drivers/pwm/pwm-bcm-kona.c318
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c1
-rw-r--r--drivers/pwm/pwm-imx.c4
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-lpss.c161
-rw-r--r--drivers/pwm/pwm-mxs.c1
-rw-r--r--drivers/pwm/pwm-pxa.c4
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c23
-rw-r--r--drivers/pwm/pwm-samsung.c3
-rw-r--r--drivers/pwm/pwm-spear.c6
-rw-r--r--drivers/pwm/pwm-tegra.c4
-rw-r--r--drivers/pwm/pwm-tiecap.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c10
-rw-r--r--drivers/pwm/pwm-twl.c8
-rw-r--r--drivers/pwm/pwm-vt8500.c4
-rw-r--r--drivers/regulator/as3722-regulator.c2
-rw-r--r--drivers/regulator/bcm590xx-regulator.c5
-rw-r--r--drivers/regulator/ltc3589.c2
-rw-r--r--drivers/regulator/palmas-regulator.c14
-rw-r--r--drivers/regulator/tps65218-regulator.c3
-rw-r--r--drivers/regulator/virtual.c10
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/rtc/Kconfig4
-rw-r--r--drivers/rtc/rtc-puv3.c4
-rw-r--r--drivers/rtc/rtc-s5m.c310
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/vmwatchdog.c338
-rw-r--r--drivers/s390/cio/airq.c13
-rw-r--r--drivers/s390/cio/ccwgroup.c28
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/device.c71
-rw-r--r--drivers/s390/cio/qdio_debug.c79
-rw-r--r--drivers/s390/cio/qdio_debug.h2
-rw-r--r--drivers/s390/cio/qdio_main.c16
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/kvm/virtio_ccw.c49
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_sysfs.c14
-rw-r--r--drivers/s390/net/lcs.c15
-rw-r--r--drivers/s390/net/qeth_core.h6
-rw-r--r--drivers/s390/net/qeth_core_main.c100
-rw-r--r--drivers/s390/net/qeth_core_sys.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c18
-rw-r--r--drivers/s390/net/qeth_l3_main.c21
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/NCR5380.c137
-rw-r--r--drivers/scsi/NCR5380.h32
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c18
-rw-r--r--drivers/scsi/arm/acornscsi.c53
-rw-r--r--drivers/scsi/arm/cumana_1.c3
-rw-r--r--drivers/scsi/arm/oak.c3
-rw-r--r--drivers/scsi/atari_NCR5380.c193
-rw-r--r--drivers/scsi/atari_scsi.c24
-rw-r--r--drivers/scsi/atari_scsi.h119
-rw-r--r--drivers/scsi/be2iscsi/be.h11
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h31
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c12
-rw-r--r--drivers/scsi/be2iscsi/be_main.c84
-rw-r--r--drivers/scsi/be2iscsi/be_main.h7
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c68
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h2
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c64
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/dtc.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/fnic/fnic.h5
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c238
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c61
-rw-r--r--drivers/scsi/fnic/fnic_main.c23
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c13
-rw-r--r--drivers/scsi/fnic/fnic_trace.c326
-rw-r--r--drivers/scsi/fnic/fnic_trace.h38
-rw-r--r--drivers/scsi/g_NCR5380.c4
-rw-r--r--drivers/scsi/g_NCR5380.h7
-rw-r--r--drivers/scsi/hpsa.c297
-rw-r--r--drivers/scsi/hpsa.h43
-rw-r--r--drivers/scsi/hpsa_cmd.h49
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c13
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c258
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c318
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/mac_scsi.c10
-rw-r--r--drivers/scsi/mac_scsi.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c15
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c24
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c24
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c10
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h58
-rw-r--r--drivers/scsi/mvsas/mv_init.c9
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/pas16.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c49
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c127
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h60
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c148
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c87
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c510
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h50
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c105
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c867
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h98
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c91
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c47
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h5
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c106
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h44
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c32
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c458
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c41
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c8
-rw-r--r--drivers/scsi/scsi_error.c30
-rw-r--r--drivers/scsi/scsi_lib.c227
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/sd.c53
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/sr.c19
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/sun3_NCR5380.c195
-rw-r--r--drivers/scsi/sun3_scsi.c241
-rw-r--r--drivers/scsi/sun3_scsi.h199
-rw-r--r--drivers/scsi/sun3_scsi_vme.c588
-rw-r--r--drivers/scsi/t128.c4
-rw-r--r--drivers/scsi/t128.h7
-rw-r--r--drivers/scsi/ufs/ufs.h36
-rw-r--r--drivers/scsi/ufs/ufshcd.c722
-rw-r--r--drivers/scsi/ufs/ufshcd.h22
-rw-r--r--drivers/scsi/ufs/ufshci.h32
-rw-r--r--drivers/scsi/virtio_scsi.c199
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c8
-rw-r--r--drivers/spi/spi-qup.c44
-rw-r--r--drivers/spi/spi-sh-sci.c4
-rw-r--r--drivers/staging/android/timed_output.c1
-rw-r--r--drivers/staging/comedi/Kconfig1
-rw-r--r--drivers/staging/et131x/et131x.c2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c2
-rw-r--r--drivers/staging/iio/Kconfig9
-rw-r--r--drivers/staging/iio/adc/ad7291.c4
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c12
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c8
-rw-r--r--drivers/staging/imx-drm/Kconfig11
-rw-r--r--drivers/staging/imx-drm/Makefile1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c9
-rw-r--r--drivers/staging/imx-drm/imx-drm.h2
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c3
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c1
-rw-r--r--drivers/staging/imx-drm/imx-tve.c7
-rw-r--r--drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h331
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c2
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.c2
-rw-r--r--drivers/staging/imx-drm/parallel-display.c8
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h11
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c112
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c102
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c29
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/netlogic/xlr_net.c2
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/rtl8192ee/core.c4
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c2
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c8
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c4
-rw-r--r--drivers/staging/rtl8821ae/core.c4
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c6
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c31
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c88
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c2
-rw-r--r--drivers/target/loopback/tcm_loop.c16
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/target/target_core_pscsi.c3
-rw-r--r--drivers/target/target_core_sbc.c68
-rw-r--r--drivers/target/target_core_spc.c18
-rw-r--r--drivers/target/target_core_transport.c37
-rw-r--r--drivers/target/target_core_xcopy.c10
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c19
-rw-r--r--drivers/target/tcm_fc/tfc_io.c17
-rw-r--r--drivers/tc/tc.c10
-rw-r--r--drivers/thermal/Kconfig14
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/armada_thermal.c158
-rw-r--r--drivers/thermal/imx_thermal.c18
-rw-r--r--drivers/thermal/int3403_thermal.c8
-rw-r--r--drivers/thermal/intel_powerclamp.c2
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c479
-rw-r--r--drivers/thermal/of-thermal.c7
-rw-r--r--drivers/thermal/rcar_thermal.c9
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c86
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h23
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c211
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h31
-rw-r--r--drivers/thermal/spear_thermal.c4
-rw-r--r--drivers/thermal/thermal_hwmon.c33
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c4
-rw-r--r--drivers/tty/hvc/hvc_tile.c2
-rw-r--r--drivers/tty/n_tty.c19
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_early.c5
-rw-r--r--drivers/tty/serial/altera_uart.c6
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c2
-rw-r--r--drivers/tty/serial/bfin_uart.c2
-rw-r--r--drivers/tty/serial/dz.c2
-rw-r--r--drivers/tty/serial/earlycon.c2
-rw-r--r--drivers/tty/serial/efm32-uart.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c2
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/tty/serial/ip22zilog.c4
-rw-r--r--drivers/tty/serial/m32r_sio.c10
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mcf.c6
-rw-r--r--drivers/tty/serial/mfd.c2
-rw-r--r--drivers/tty/serial/mpsc.c2
-rw-r--r--drivers/tty/serial/msm_serial.c4
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/netx-serial.c2
-rw-r--r--drivers/tty/serial/pmac_zilog.c5
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c2
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/samsung.c2
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sccnxp.c2
-rw-r--r--drivers/tty/serial/serial_ks8695.c2
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c2
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/sunsab.c5
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/sunzilog.c4
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/vr41xx_siu.c2
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/vt/vt.c24
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/chipidea/udc.c7
-rw-r--r--drivers/usb/core/hub.c33
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/port.c89
-rw-r--r--drivers/usb/dwc3/Kconfig1
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c17
-rw-r--r--drivers/usb/dwc3/gadget.c8
-rw-r--r--drivers/usb/gadget/configfs.c37
-rw-r--r--drivers/usb/gadget/configfs.h1
-rw-r--r--drivers/usb/gadget/f_fs.c12
-rw-r--r--drivers/usb/gadget/f_rndis.c6
-rw-r--r--drivers/usb/gadget/gr_udc.c5
-rw-r--r--drivers/usb/gadget/inode.c7
-rw-r--r--drivers/usb/gadget/storage_common.c4
-rw-r--r--drivers/usb/gadget/u_ether.c7
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c40
-rw-r--r--drivers/usb/host/pci-quirks.c19
-rw-r--r--drivers/usb/host/xhci-hub.c7
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/misc/usbtest.c16
-rw-r--r--drivers/usb/musb/musb_am335x.c23
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_cppi41.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c9
-rw-r--r--drivers/usb/musb/ux500.c1
-rw-r--r--drivers/usb/phy/phy-msm-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c8
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c12
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/option.c28
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vhost/net.c27
-rw-r--r--drivers/vhost/scsi.c346
-rw-r--r--drivers/vhost/test.c11
-rw-r--r--drivers/vhost/vhost.c97
-rw-r--r--drivers/vhost/vhost.h19
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/backlight/Kconfig8
-rw-r--r--drivers/video/backlight/gpio_backlight.c3
-rw-r--r--drivers/video/backlight/pwm_bl.c87
-rw-r--r--drivers/video/backlight/s6e63m0.c2
-rw-r--r--drivers/video/console/dummycon.c1
-rw-r--r--drivers/video/console/vgacon.c1
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c2
-rw-r--r--drivers/video/fbdev/bfin_adv7393fb.c2
-rw-r--r--drivers/video/fbdev/offb.c11
-rw-r--r--drivers/video/fbdev/omap2/dss/omapdss-boot-init.c8
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c2
-rw-r--r--drivers/virtio/virtio_ring.c15
-rw-r--r--drivers/w1/masters/mxc_w1.c2
-rw-r--r--drivers/watchdog/Kconfig56
-rw-r--r--drivers/watchdog/Makefile4
-rw-r--r--drivers/watchdog/ath79_wdt.c16
-rw-r--r--drivers/watchdog/booke_wdt.c51
-rw-r--r--drivers/watchdog/diag288_wdt.c316
-rw-r--r--drivers/watchdog/imx2_wdt.c320
-rw-r--r--drivers/watchdog/intel-mid_wdt.c184
-rw-r--r--drivers/watchdog/kempld_wdt.c2
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c2
-rw-r--r--drivers/watchdog/orion_wdt.c213
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/watchdog/sunxi_wdt.c22
-rw-r--r--drivers/watchdog/via_wdt.c2
-rw-r--r--drivers/watchdog/w83627hf_wdt.c15
-rw-r--r--drivers/watchdog/w83697hf_wdt.c460
-rw-r--r--drivers/watchdog/w83697ug_wdt.c397
-rw-r--r--drivers/xen/grant-table.c3
2164 files changed, 98870 insertions, 40932 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index c67f6f5ad611..36b0e61f9c09 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -30,6 +30,10 @@
#include <linux/types.h>
#include <linux/dmi.h>
#include <linux/delay.h>
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
@@ -52,6 +56,7 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
+
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
@@ -67,6 +72,13 @@ static int acpi_ac_resume(struct device *dev);
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_ac_dir(void);
+extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
+static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+#endif
+
+
static int ac_sleep_before_get_state_ms;
static struct acpi_driver acpi_ac_driver = {
@@ -91,6 +103,16 @@ struct acpi_ac {
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static const struct file_operations acpi_ac_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_ac_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
/* --------------------------------------------------------------------------
AC Adapter Management
-------------------------------------------------------------------------- */
@@ -143,6 +165,83 @@ static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
+#ifdef CONFIG_ACPI_PROCFS_POWER
+/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_ac_dir;
+
+static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_ac *ac = seq->private;
+
+
+ if (!ac)
+ return 0;
+
+ if (acpi_ac_get_state(ac)) {
+ seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
+ return 0;
+ }
+
+ seq_puts(seq, "state: ");
+ switch (ac->state) {
+ case ACPI_AC_STATUS_OFFLINE:
+ seq_puts(seq, "off-line\n");
+ break;
+ case ACPI_AC_STATUS_ONLINE:
+ seq_puts(seq, "on-line\n");
+ break;
+ default:
+ seq_puts(seq, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int acpi_ac_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
+}
+
+static int acpi_ac_add_fs(struct acpi_ac *ac)
+{
+ struct proc_dir_entry *entry = NULL;
+
+ printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
+ " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+ if (!acpi_device_dir(ac->device)) {
+ acpi_device_dir(ac->device) =
+ proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir);
+ if (!acpi_device_dir(ac->device))
+ return -ENODEV;
+ }
+
+ /* 'state' [R] */
+ entry = proc_create_data(ACPI_AC_FILE_STATE,
+ S_IRUGO, acpi_device_dir(ac->device),
+ &acpi_ac_fops, ac);
+ if (!entry)
+ return -ENODEV;
+ return 0;
+}
+
+static int acpi_ac_remove_fs(struct acpi_ac *ac)
+{
+
+ if (acpi_device_dir(ac->device)) {
+ remove_proc_entry(ACPI_AC_FILE_STATE,
+ acpi_device_dir(ac->device));
+ remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir);
+ acpi_device_dir(ac->device) = NULL;
+ }
+
+ return 0;
+}
+#endif
+
/* --------------------------------------------------------------------------
Driver Model
-------------------------------------------------------------------------- */
@@ -243,6 +342,11 @@ static int acpi_ac_add(struct acpi_device *device)
goto end;
ac->charger.name = acpi_device_bid(device);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ result = acpi_ac_add_fs(ac);
+ if (result)
+ goto end;
+#endif
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
@@ -258,8 +362,12 @@ static int acpi_ac_add(struct acpi_device *device)
ac->battery_nb.notifier_call = acpi_ac_battery_notify;
register_acpi_notifier(&ac->battery_nb);
end:
- if (result)
+ if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_remove_fs(ac);
+#endif
kfree(ac);
+ }
dmi_check_system(ac_dmi_table);
return result;
@@ -303,6 +411,10 @@ static int acpi_ac_remove(struct acpi_device *device)
power_supply_unregister(&ac->charger);
unregister_acpi_notifier(&ac->battery_nb);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_remove_fs(ac);
+#endif
+
kfree(ac);
return 0;
@@ -315,9 +427,20 @@ static int __init acpi_ac_init(void)
if (acpi_disabled)
return -ENODEV;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_ac_dir = acpi_lock_ac_dir();
+ if (!acpi_ac_dir)
+ return -ENODEV;
+#endif
+
+
result = acpi_bus_register_driver(&acpi_ac_driver);
- if (result < 0)
+ if (result < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
return -ENODEV;
+ }
return 0;
}
@@ -325,6 +448,9 @@ static int __init acpi_ac_init(void)
static void __exit acpi_ac_exit(void)
{
acpi_bus_unregister_driver(&acpi_ac_driver);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
}
module_init(acpi_ac_init);
module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 63407d264885..9cb65b0e7597 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -34,6 +34,9 @@ ACPI_MODULE_NAME("acpi_lpss");
/* Offsets relative to LPSS_PRIVATE_OFFSET */
#define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
+#define LPSS_RESETS 0x04
+#define LPSS_RESETS_RESET_FUNC BIT(0)
+#define LPSS_RESETS_RESET_APB BIT(1)
#define LPSS_GENERAL 0x08
#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
@@ -99,6 +102,17 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
}
+static void lpss_i2c_setup(struct lpss_private_data *pdata)
+{
+ unsigned int offset;
+ u32 val;
+
+ offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
+ val = readl(pdata->mmio_base + offset);
+ val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
+ writel(val, pdata->mmio_base + offset);
+}
+
static struct lpss_device_desc lpt_dev_desc = {
.clk_required = true,
.prv_offset = 0x800,
@@ -171,6 +185,7 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
.prv_offset = 0x800,
.save_ctx = true,
.shared_clock = &i2c_clock,
+ .setup = lpss_i2c_setup,
};
#else
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 6703c1fd993a..4ddb0dca56f6 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -14,6 +14,8 @@
#include <linux/module.h>
static const struct acpi_device_id acpi_pnp_device_ids[] = {
+ /* soc_button_array */
+ {"PNP0C40"},
/* pata_isapnp */
{"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */
/* floppy */
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index e48fc98e71c4..130f513e08c9 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -32,8 +32,10 @@
#include <linux/jiffies.h>
#include <linux/async.h>
#include <linux/dmi.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
#include <asm/unaligned.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -70,6 +72,7 @@ MODULE_DESCRIPTION("ACPI Battery Driver");
MODULE_LICENSE("GPL");
static int battery_bix_broken_package;
+static int battery_notification_delay_ms;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -532,6 +535,20 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
" invalid.\n");
}
+ /*
+ * When fully charged, some batteries wrongly report
+ * capacity_now = design_capacity instead of = full_charge_capacity
+ */
+ if (battery->capacity_now > battery->full_charge_capacity
+ && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
+ battery->capacity_now = battery->full_charge_capacity;
+ if (battery->capacity_now != battery->design_capacity)
+ printk_once(KERN_WARNING FW_BUG
+ "battery: reported current charge level (%d) "
+ "is higher than reported maximum charge level (%d).\n",
+ battery->capacity_now, battery->full_charge_capacity);
+ }
+
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
battery->capacity_now = (battery->capacity_now *
@@ -930,7 +947,10 @@ static ssize_t acpi_battery_write_alarm(struct file *file,
goto end;
}
alarm_string[count] = '\0';
- battery->alarm = simple_strtol(alarm_string, NULL, 0);
+ if (kstrtoint(alarm_string, 0, &battery->alarm)) {
+ result = -EINVAL;
+ goto end;
+ }
result = acpi_battery_set_alarm(battery);
end:
if (!result)
@@ -1062,6 +1082,14 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
if (!battery)
return;
old = battery->bat.dev;
+ /*
+ * On Acer Aspire V5-573G notifications are sometimes triggered too
+ * early. For example, when AC is unplugged and notification is
+ * triggered, battery state is still reported as "Full", and changes to
+ * "Discharging" only after short delay, without any notification.
+ */
+ if (battery_notification_delay_ms > 0)
+ msleep(battery_notification_delay_ms);
if (event == ACPI_BATTERY_NOTIFY_INFO)
acpi_battery_refresh(battery);
acpi_battery_update(battery, false);
@@ -1106,17 +1134,60 @@ static int battery_notify(struct notifier_block *nb,
return 0;
}
+static int battery_bix_broken_package_quirk(const struct dmi_system_id *d)
+{
+ battery_bix_broken_package = 1;
+ return 0;
+}
+
+static int battery_notification_delay_quirk(const struct dmi_system_id *d)
+{
+ battery_notification_delay_ms = 1000;
+ return 0;
+}
+
static struct dmi_system_id bat_dmi_table[] = {
{
+ .callback = battery_bix_broken_package_quirk,
.ident = "NEC LZ750/LS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
},
},
+ {
+ .callback = battery_notification_delay_quirk,
+ .ident = "Acer Aspire V5-573G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
+ },
+ },
{},
};
+/*
+ * Some machines'(E,G Lenovo Z480) ECs are not stable
+ * during boot up and this causes battery driver fails to be
+ * probed due to failure of getting battery information
+ * from EC sometimes. After several retries, the operation
+ * may work. So add retry code here and 20ms sleep between
+ * every retries.
+ */
+static int acpi_battery_update_retry(struct acpi_battery *battery)
+{
+ int retry, ret;
+
+ for (retry = 5; retry; retry--) {
+ ret = acpi_battery_update(battery, false);
+ if (!ret)
+ break;
+
+ msleep(20);
+ }
+ return ret;
+}
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
@@ -1135,9 +1206,11 @@ static int acpi_battery_add(struct acpi_device *device)
mutex_init(&battery->sysfs_lock);
if (acpi_has_method(battery->device->handle, "_BIX"))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
- result = acpi_battery_update(battery, false);
+
+ result = acpi_battery_update_retry(battery);
if (result)
goto fail;
+
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
#endif
@@ -1227,8 +1300,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
if (acpi_disabled)
return;
- if (dmi_check_system(bat_dmi_table))
- battery_bix_broken_package = 1;
+ dmi_check_system(bat_dmi_table);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_dir = acpi_lock_battery_dir();
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ad11ba4a412d..a66ab658abbc 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1,11 +1,14 @@
/*
- * ec.c - ACPI Embedded Controller Driver (v2.1)
+ * ec.c - ACPI Embedded Controller Driver (v2.2)
*
- * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de>
- * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
- * Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
- * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2001-2014 Intel Corporation
+ * Author: 2014 Lv Zheng <lv.zheng@intel.com>
+ * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
+ * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
+ * 2004 Luming Yu <luming.yu@intel.com>
+ * 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -52,6 +55,7 @@
/* EC status register */
#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
+#define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
@@ -78,6 +82,9 @@ enum {
EC_FLAGS_BLOCKED, /* Transactions are blocked */
};
+#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
+#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
+
/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
module_param(ec_delay, uint, 0644);
@@ -109,7 +116,7 @@ struct transaction {
u8 ri;
u8 wlen;
u8 rlen;
- bool done;
+ u8 flags;
};
struct acpi_ec *boot_ec, *first_ec;
@@ -127,83 +134,104 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
{
u8 x = inb(ec->command_addr);
- pr_debug("---> status = 0x%2.2x\n", x);
+ pr_debug("EC_SC(R) = 0x%2.2x "
+ "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
+ x,
+ !!(x & ACPI_EC_FLAG_SCI),
+ !!(x & ACPI_EC_FLAG_BURST),
+ !!(x & ACPI_EC_FLAG_CMD),
+ !!(x & ACPI_EC_FLAG_IBF),
+ !!(x & ACPI_EC_FLAG_OBF));
return x;
}
static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
{
u8 x = inb(ec->data_addr);
- pr_debug("---> data = 0x%2.2x\n", x);
+ pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
return x;
}
static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
{
- pr_debug("<--- command = 0x%2.2x\n", command);
+ pr_debug("EC_SC(W) = 0x%2.2x\n", command);
outb(command, ec->command_addr);
}
static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
{
- pr_debug("<--- data = 0x%2.2x\n", data);
+ pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
outb(data, ec->data_addr);
}
-static int ec_transaction_done(struct acpi_ec *ec)
+static int ec_transaction_completed(struct acpi_ec *ec)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ec->lock, flags);
- if (!ec->curr || ec->curr->done)
+ if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
ret = 1;
spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
-static void start_transaction(struct acpi_ec *ec)
+static bool advance_transaction(struct acpi_ec *ec)
{
- ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
- ec->curr->done = false;
- acpi_ec_write_cmd(ec, ec->curr->command);
-}
-
-static void advance_transaction(struct acpi_ec *ec, u8 status)
-{
- unsigned long flags;
struct transaction *t;
+ u8 status;
+ bool wakeup = false;
- spin_lock_irqsave(&ec->lock, flags);
+ pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK");
+ status = acpi_ec_read_status(ec);
t = ec->curr;
if (!t)
- goto unlock;
- if (t->wlen > t->wi) {
- if ((status & ACPI_EC_FLAG_IBF) == 0)
- acpi_ec_write_data(ec,
- t->wdata[t->wi++]);
- else
- goto err;
- } else if (t->rlen > t->ri) {
- if ((status & ACPI_EC_FLAG_OBF) == 1) {
- t->rdata[t->ri++] = acpi_ec_read_data(ec);
- if (t->rlen == t->ri)
- t->done = true;
+ goto err;
+ if (t->flags & ACPI_EC_COMMAND_POLL) {
+ if (t->wlen > t->wi) {
+ if ((status & ACPI_EC_FLAG_IBF) == 0)
+ acpi_ec_write_data(ec, t->wdata[t->wi++]);
+ else
+ goto err;
+ } else if (t->rlen > t->ri) {
+ if ((status & ACPI_EC_FLAG_OBF) == 1) {
+ t->rdata[t->ri++] = acpi_ec_read_data(ec);
+ if (t->rlen == t->ri) {
+ t->flags |= ACPI_EC_COMMAND_COMPLETE;
+ wakeup = true;
+ }
+ } else
+ goto err;
+ } else if (t->wlen == t->wi &&
+ (status & ACPI_EC_FLAG_IBF) == 0) {
+ t->flags |= ACPI_EC_COMMAND_COMPLETE;
+ wakeup = true;
+ }
+ return wakeup;
+ } else {
+ if ((status & ACPI_EC_FLAG_IBF) == 0) {
+ acpi_ec_write_cmd(ec, t->command);
+ t->flags |= ACPI_EC_COMMAND_POLL;
} else
goto err;
- } else if (t->wlen == t->wi &&
- (status & ACPI_EC_FLAG_IBF) == 0)
- t->done = true;
- goto unlock;
+ return wakeup;
+ }
err:
/*
* If SCI bit is set, then don't think it's a false IRQ
* otherwise will take a not handled IRQ as a false one.
*/
- if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI))
- ++t->irq_count;
+ if (!(status & ACPI_EC_FLAG_SCI)) {
+ if (in_interrupt() && t)
+ ++t->irq_count;
+ }
+ return wakeup;
+}
-unlock:
- spin_unlock_irqrestore(&ec->lock, flags);
+static void start_transaction(struct acpi_ec *ec)
+{
+ ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
+ ec->curr->flags = 0;
+ (void)advance_transaction(ec);
}
static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
@@ -228,15 +256,17 @@ static int ec_poll(struct acpi_ec *ec)
/* don't sleep with disabled interrupts */
if (EC_FLAGS_MSI || irqs_disabled()) {
udelay(ACPI_EC_MSI_UDELAY);
- if (ec_transaction_done(ec))
+ if (ec_transaction_completed(ec))
return 0;
} else {
if (wait_event_timeout(ec->wait,
- ec_transaction_done(ec),
+ ec_transaction_completed(ec),
msecs_to_jiffies(1)))
return 0;
}
- advance_transaction(ec, acpi_ec_read_status(ec));
+ spin_lock_irqsave(&ec->lock, flags);
+ (void)advance_transaction(ec);
+ spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
spin_lock_irqsave(&ec->lock, flags);
@@ -268,23 +298,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
return ret;
}
-static int ec_check_ibf0(struct acpi_ec *ec)
-{
- u8 status = acpi_ec_read_status(ec);
- return (status & ACPI_EC_FLAG_IBF) == 0;
-}
-
-static int ec_wait_ibf0(struct acpi_ec *ec)
-{
- unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
- /* interrupt wait manually if GPE mode is not active */
- while (time_before(jiffies, delay))
- if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
- msecs_to_jiffies(1)))
- return 0;
- return -ETIME;
-}
-
static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
{
int status;
@@ -305,12 +318,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
goto unlock;
}
}
- if (ec_wait_ibf0(ec)) {
- pr_err("input buffer is not empty, "
- "aborting transaction\n");
- status = -ETIME;
- goto end;
- }
pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
t->command, t->wdata ? t->wdata[0] : 0);
/* disable GPE during transaction if storm is detected */
@@ -334,7 +341,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
}
pr_debug("transaction end\n");
-end:
if (ec->global_lock)
acpi_release_global_lock(glk);
unlock:
@@ -634,17 +640,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
u32 gpe_number, void *data)
{
+ unsigned long flags;
struct acpi_ec *ec = data;
- u8 status = acpi_ec_read_status(ec);
- pr_debug("~~~> interrupt, status:0x%02x\n", status);
-
- advance_transaction(ec, status);
- if (ec_transaction_done(ec) &&
- (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
+ spin_lock_irqsave(&ec->lock, flags);
+ if (advance_transaction(ec))
wake_up(&ec->wait);
- ec_check_sci(ec, acpi_ec_read_status(ec));
- }
+ spin_unlock_irqrestore(&ec->lock, flags);
+ ec_check_sci(ec, acpi_ec_read_status(ec));
return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
}
@@ -1066,8 +1069,10 @@ int __init acpi_ec_ecdt_probe(void)
/* fall through */
}
- if (EC_FLAGS_SKIP_DSDT_SCAN)
+ if (EC_FLAGS_SKIP_DSDT_SCAN) {
+ kfree(saved_ec);
return -ENODEV;
+ }
/* This workaround is needed only on some broken machines,
* which require early EC, but fail to provide ECDT */
@@ -1105,6 +1110,7 @@ install:
}
error:
kfree(boot_ec);
+ kfree(saved_ec);
boot_ec = NULL;
return -ENODEV;
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 147bc1b91b42..bad25b070fe0 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -235,7 +235,8 @@ void acpi_os_vprintf(const char *fmt, va_list args)
static unsigned long acpi_rsdp;
static int __init setup_acpi_rsdp(char *arg)
{
- acpi_rsdp = simple_strtoul(arg, NULL, 16);
+ if (kstrtoul(arg, 16, &acpi_rsdp))
+ return -EINVAL;
return 0;
}
early_param("acpi_rsdp", setup_acpi_rsdp);
@@ -1810,6 +1811,16 @@ acpi_status __init acpi_os_initialize(void)
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
+ if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
+ /*
+ * Use acpi_os_map_generic_address to pre-map the reset
+ * register if it's in system memory.
+ */
+ int rv;
+
+ rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
+ pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
+ }
return AE_OK;
}
@@ -1838,6 +1849,8 @@ acpi_status acpi_os_terminate(void)
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
+ if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
destroy_workqueue(kacpid_wq);
destroy_workqueue(kacpi_notify_wq);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 0bdacc5e26a3..2ba8f02ced36 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &ares->data.memory24;
- if (!memory24->address_length)
+ if (!memory24->minimum && !memory24->address_length)
return false;
acpi_dev_get_memresource(res, memory24->minimum,
memory24->address_length,
@@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &ares->data.memory32;
- if (!memory32->address_length)
+ if (!memory32->minimum && !memory32->address_length)
return false;
acpi_dev_get_memresource(res, memory32->minimum,
memory32->address_length,
@@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &ares->data.fixed_memory32;
- if (!fixed_memory32->address_length)
+ if (!fixed_memory32->address && !fixed_memory32->address_length)
return false;
acpi_dev_get_memresource(res, fixed_memory32->address,
fixed_memory32->address_length,
@@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IO:
io = &ares->data.io;
- if (!io->address_length)
+ if (!io->minimum && !io->address_length)
return false;
acpi_dev_get_ioresource(res, io->minimum,
io->address_length,
@@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
fixed_io = &ares->data.fixed_io;
- if (!fixed_io->address_length)
+ if (!fixed_io->address && !fixed_io->address_length)
return false;
acpi_dev_get_ioresource(res, fixed_io->address,
fixed_io->address_length,
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index c11e3795431b..b3e3cc73ba79 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -19,6 +19,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <asm/io.h>
+#include <trace/events/power.h>
#include "internal.h"
#include "sleep.h"
@@ -501,6 +502,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
ACPI_FLUSH_CPU_CACHE();
+ trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
@@ -516,6 +518,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
pr_info(PREFIX "Low-level resume complete\n");
break;
}
+ trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
/* This violates the spec but is required for bug compatibility. */
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 05550ba44d32..6d5a6cda0734 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -360,7 +360,8 @@ static int __init acpi_parse_apic_instance(char *str)
if (!str)
return -EINVAL;
- acpi_apic_instance = simple_strtoul(str, NULL, 0);
+ if (kstrtoint(str, 0, &acpi_apic_instance))
+ return -EINVAL;
pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 101fb090dcb9..071c1dfb93f3 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -82,7 +82,7 @@ module_param(allow_duplicates, bool, 0644);
* For Windows 8 systems: used to decide if video module
* should skip registering backlight interface of its own.
*/
-static int use_native_backlight_param = -1;
+static int use_native_backlight_param = 1;
module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
static bool use_native_backlight_dmi = false;
@@ -241,13 +241,14 @@ static bool acpi_video_use_native_backlight(void)
return use_native_backlight_dmi;
}
-static bool acpi_video_verify_backlight_support(void)
+bool acpi_video_verify_backlight_support(void)
{
if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
backlight_device_registered(BACKLIGHT_RAW))
return false;
return acpi_video_backlight_support();
}
+EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support);
/* backlight device sysfs support */
static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -563,6 +564,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
},
{
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer TravelMate B113",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
+ },
+ },
+ {
.callback = video_set_use_native_backlight,
.ident = "HP ProBook 4340s",
.matches = {
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 33e3db548a29..c42feb2bacd0 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -166,6 +166,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Dell Inspiron 5737",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
+ },
+ },
{ },
};
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 0033fafc470b..7671dbac6015 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -123,6 +123,15 @@ config AHCI_IMX
If unsure, say N.
+config AHCI_MVEBU
+ tristate "Marvell EBU AHCI SATA support"
+ depends on ARCH_MVEBU
+ help
+ This option enables support for the Marvebu EBU SoC's
+ onboard AHCI SATA.
+
+ If unsure, say N.
+
config AHCI_SUNXI
tristate "Allwinner sunxi AHCI SATA support"
depends on ARCH_SUNXI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 44c8016e565c..5a02aeecef5b 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o libahci.o libahci_platform.o
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index b51605ac5974..0cd7c7a39e5b 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -77,7 +77,7 @@ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int acard_ahci_port_start(struct ata_port *ap);
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
#endif
@@ -118,13 +118,13 @@ static struct pci_driver acard_ahci_pci_driver = {
.id_table = acard_ahci_pci_tbl,
.probe = acard_ahci_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = acard_ahci_pci_device_suspend,
.resume = acard_ahci_pci_device_resume,
#endif
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 60707814a84b..dae5607e1115 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -445,10 +445,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
+ .driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
+ .driver_data = board_ahci_yes_fbs },
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index af63c75c2001..5513296e5e2e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -237,6 +237,7 @@ enum {
error-handling stage) */
AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
+ AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
/* ap->flags bits */
@@ -370,7 +371,9 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
int pmp, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
int ahci_stop_engine(struct ata_port *ap);
+void ahci_start_fis_rx(struct ata_port *ap);
void ahci_start_engine(struct ata_port *ap);
int ahci_check_ready(struct ata_link *link);
int ahci_kick_engine(struct ata_port *ap);
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 2c83613ce2db..2b77d53bccf8 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -85,7 +85,8 @@ static int ahci_da850_probe(struct platform_device *pdev)
da850_sata_init(dev, pwrdn_reg, hpriv->mmio);
- rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info, 0, 0);
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info,
+ 0, 0, 0);
if (rc)
goto disable_resources;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 8befeb69eeb1..cac4360f272a 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -58,6 +58,8 @@ enum ahci_imx_type {
struct imx_ahci_priv {
struct platform_device *ahci_pdev;
enum ahci_imx_type type;
+ struct clk *sata_clk;
+ struct clk *sata_ref_clk;
struct clk *ahb_clk;
struct regmap *gpr;
bool no_device;
@@ -224,7 +226,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
return ret;
}
- ret = ahci_platform_enable_clks(hpriv);
+ ret = clk_prepare_enable(imxpriv->sata_ref_clk);
if (ret < 0)
goto disable_regulator;
@@ -291,7 +293,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
!IMX6Q_GPR13_SATA_MPLL_CLK_EN);
}
- ahci_platform_disable_clks(hpriv);
+ clk_disable_unprepare(imxpriv->sata_ref_clk);
if (hpriv->target_pwr)
regulator_disable(hpriv->target_pwr);
@@ -324,6 +326,9 @@ static void ahci_imx_error_handler(struct ata_port *ap)
writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
imx_sata_disable(hpriv);
imxpriv->no_device = true;
+
+ dev_info(ap->dev, "no device found, disabling link.\n");
+ dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n");
}
static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
@@ -385,6 +390,19 @@ static int imx_ahci_probe(struct platform_device *pdev)
imxpriv->no_device = false;
imxpriv->first_time = true;
imxpriv->type = (enum ahci_imx_type)of_id->data;
+
+ imxpriv->sata_clk = devm_clk_get(dev, "sata");
+ if (IS_ERR(imxpriv->sata_clk)) {
+ dev_err(dev, "can't get sata clock.\n");
+ return PTR_ERR(imxpriv->sata_clk);
+ }
+
+ imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
+ if (IS_ERR(imxpriv->sata_ref_clk)) {
+ dev_err(dev, "can't get sata_ref clock.\n");
+ return PTR_ERR(imxpriv->sata_ref_clk);
+ }
+
imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(imxpriv->ahb_clk)) {
dev_err(dev, "can't get ahb clock.\n");
@@ -407,10 +425,14 @@ static int imx_ahci_probe(struct platform_device *pdev)
hpriv->plat_data = imxpriv;
- ret = imx_sata_enable(hpriv);
+ ret = clk_prepare_enable(imxpriv->sata_clk);
if (ret)
return ret;
+ ret = imx_sata_enable(hpriv);
+ if (ret)
+ goto disable_clk;
+
/*
* Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
* and IP vendor specific register IMX_TIMER1MS.
@@ -432,18 +454,27 @@ static int imx_ahci_probe(struct platform_device *pdev)
reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
- ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
+ ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
+ 0, 0, 0);
if (ret)
- imx_sata_disable(hpriv);
+ goto disable_sata;
+ return 0;
+
+disable_sata:
+ imx_sata_disable(hpriv);
+disable_clk:
+ clk_disable_unprepare(imxpriv->sata_clk);
return ret;
}
static void ahci_imx_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
imx_sata_disable(hpriv);
+ clk_disable_unprepare(imxpriv->sata_clk);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
new file mode 100644
index 000000000000..fd3dfd733b84
--- /dev/null
+++ b/drivers/ata/ahci_mvebu.c
@@ -0,0 +1,128 @@
+/*
+ * AHCI glue platform driver for Marvell EBU SOCs
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "ahci.h"
+
+#define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0
+#define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4
+
+#define AHCI_WINDOW_CTRL(win) (0x60 + ((win) << 4))
+#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4))
+#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4))
+
+static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
+ const struct mbus_dram_target_info *dram)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ writel(0, hpriv->mmio + AHCI_WINDOW_CTRL(i));
+ writel(0, hpriv->mmio + AHCI_WINDOW_BASE(i));
+ writel(0, hpriv->mmio + AHCI_WINDOW_SIZE(i));
+ }
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ writel((cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ hpriv->mmio + AHCI_WINDOW_CTRL(i));
+ writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i));
+ writel(((cs->size - 1) & 0xffff0000),
+ hpriv->mmio + AHCI_WINDOW_SIZE(i));
+ }
+}
+
+static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
+{
+ /*
+ * Enable the regret bit to allow the SATA unit to regret a
+ * request that didn't receive an acknowlegde and avoid a
+ * deadlock
+ */
+ writel(0x4, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR);
+ writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
+}
+
+static const struct ata_port_info ahci_mvebu_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
+static int ahci_mvebu_probe(struct platform_device *pdev)
+{
+ struct ahci_host_priv *hpriv;
+ const struct mbus_dram_target_info *dram;
+ int rc;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ dram = mv_mbus_dram_info();
+ if (!dram)
+ return -ENODEV;
+
+ ahci_mvebu_mbus_config(hpriv, dram);
+ ahci_mvebu_regret_option(hpriv);
+
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
+ 0, 0, 0);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+
+static const struct of_device_id ahci_mvebu_of_match[] = {
+ { .compatible = "marvell,armada-380-ahci", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
+
+/*
+ * We currently don't provide power management related operations,
+ * since there is no suspend/resume support at the platform level for
+ * Armada 38x for the moment.
+ */
+static struct platform_driver ahci_mvebu_driver = {
+ .probe = ahci_mvebu_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "ahci-mvebu",
+ .owner = THIS_MODULE,
+ .of_match_table = ahci_mvebu_of_match,
+ },
+};
+module_platform_driver(ahci_mvebu_driver);
+
+MODULE_DESCRIPTION("Marvell EBU AHCI SATA driver");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>, Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ahci_mvebu");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index ef67e79944f9..b10d81ddb528 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/device.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
@@ -33,6 +34,7 @@ static int ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
struct ahci_host_priv *hpriv;
+ unsigned long hflags = 0;
int rc;
hpriv = ahci_platform_get_resources(pdev);
@@ -55,7 +57,11 @@ static int ahci_probe(struct platform_device *pdev)
goto disable_resources;
}
- rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, 0, 0);
+ if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
+ hflags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
+
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
+ hflags, 0, 0);
if (rc)
goto pdata_exit;
@@ -76,6 +82,7 @@ static const struct of_device_id ahci_of_match[] = {
{ .compatible = "snps,exynos5440-ahci", },
{ .compatible = "ibm,476gtr-ahci", },
{ .compatible = "snps,dwc-ahci", },
+ { .compatible = "hisilicon,hisi-ahci", },
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 633222226c19..2595598df9ce 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -166,7 +166,7 @@ static int st_ahci_probe(struct platform_device *pdev)
if (err)
return err;
- err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 0, 0);
+ err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 0, 0, 0);
if (err) {
ahci_platform_disable_resources(hpriv);
return err;
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 42d3f64e74b3..02002f125bd4 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -157,8 +157,6 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
}
static const struct ata_port_info ahci_sunxi_port_info = {
- AHCI_HFLAGS(AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
- AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -169,6 +167,7 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
+ unsigned long hflags;
int rc;
hpriv = ahci_platform_get_resources(pdev);
@@ -185,7 +184,11 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
if (rc)
goto disable_resources;
- rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info, 0, 0);
+ hflags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
+ AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
+
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info,
+ hflags, 0, 0);
if (rc)
goto disable_resources;
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 77c89bf171f1..ee3a3659bd9e 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -78,6 +78,7 @@
struct xgene_ahci_context {
struct ahci_host_priv *hpriv;
struct device *dev;
+ u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
void __iomem *csr_core; /* Core CSR address of IP */
void __iomem *csr_diag; /* Diag CSR address of IP */
void __iomem *csr_axi; /* AXI CSR address of IP */
@@ -98,20 +99,62 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
}
/**
+ * xgene_ahci_restart_engine - Restart the dma engine.
+ * @ap : ATA port of interest
+ *
+ * Restarts the dma engine inside the controller.
+ */
+static int xgene_ahci_restart_engine(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ ahci_stop_engine(ap);
+ ahci_start_fis_rx(ap);
+ hpriv->start_engine(ap);
+
+ return 0;
+}
+
+/**
+ * xgene_ahci_qc_issue - Issue commands to the device
+ * @qc: Command to issue
+ *
+ * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
+ * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
+ * state machine goes into the CMFatalErrorUpdate state and locks up. By
+ * restarting the dma engine, it removes the controller out of lock up state.
+ */
+static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct xgene_ahci_context *ctx = hpriv->plat_data;
+ int rc = 0;
+
+ if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
+ xgene_ahci_restart_engine(ap);
+
+ rc = ahci_qc_issue(qc);
+
+ /* Save the last command issued */
+ ctx->last_cmd[ap->port_no] = qc->tf.command;
+
+ return rc;
+}
+
+/**
* xgene_ahci_read_id - Read ID data from the specified device
* @dev: device
* @tf: proposed taskfile
* @id: data buffer
*
* This custom read ID function is required due to the fact that the HW
- * does not support DEVSLP and the controller state machine may get stuck
- * after processing the ID query command.
+ * does not support DEVSLP.
*/
static unsigned int xgene_ahci_read_id(struct ata_device *dev,
struct ata_taskfile *tf, u16 *id)
{
u32 err_mask;
- void __iomem *port_mmio = ahci_port_base(dev->link->ap);
err_mask = ata_do_dev_read_id(dev, tf, id);
if (err_mask)
@@ -133,16 +176,6 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
*/
id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
- /*
- * Due to HW errata, restart the port if no other command active.
- * Otherwise the controller may get stuck.
- */
- if (!readl(port_mmio + PORT_CMD_ISSUE)) {
- writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* Force a barrier */
- writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* Force a barrier */
- }
return 0;
}
@@ -300,10 +333,10 @@ static struct ata_port_operations xgene_ahci_ops = {
.host_stop = xgene_ahci_host_stop,
.hardreset = xgene_ahci_hardreset,
.read_id = xgene_ahci_read_id,
+ .qc_issue = xgene_ahci_qc_issue,
};
static const struct ata_port_info xgene_ahci_port_info = {
- AHCI_HFLAGS(AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -382,6 +415,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
struct ahci_host_priv *hpriv;
struct xgene_ahci_context *ctx;
struct resource *res;
+ unsigned long hflags;
int rc;
hpriv = ahci_platform_get_resources(pdev);
@@ -450,7 +484,10 @@ static int xgene_ahci_probe(struct platform_device *pdev)
goto disable_resources;
}
- rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info, 0, 0);
+ hflags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
+
+ rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info,
+ hflags, 0, 0);
if (rc)
goto disable_resources;
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 9498a7d3846f..9ff545ce8da3 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -241,7 +241,7 @@ static struct pci_driver ata_generic_pci_driver = {
.id_table = ata_generic,
.probe = ata_generic_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 6334c8d7c3f1..893e30e9a9ef 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -830,7 +830,7 @@ static bool piix_irq_check(struct ata_port *ap)
return ap->ops->bmdma_status(ap) & ATA_DMA_INTR;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int piix_broken_suspend(void)
{
static const struct dmi_system_id sysids[] = {
@@ -1767,7 +1767,7 @@ static struct pci_driver piix_pci_driver = {
.id_table = piix_pci_tbl,
.probe = piix_init_one,
.remove = piix_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = piix_pci_device_suspend,
.resume = piix_pci_device_resume,
#endif
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index b9861453fc81..d72ce0470309 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -68,7 +68,6 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
@@ -464,6 +463,11 @@ void ahci_save_initial_config(struct device *dev,
cap |= HOST_CAP_FBS;
}
+ if ((cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_NO_FBS)) {
+ dev_info(dev, "controller can't do FBS, turning off CAP_FBS\n");
+ cap &= ~HOST_CAP_FBS;
+ }
+
if (force_port_map && port_map != force_port_map) {
dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, force_port_map);
@@ -615,7 +619,7 @@ int ahci_stop_engine(struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ahci_stop_engine);
-static void ahci_start_fis_rx(struct ata_port *ap)
+void ahci_start_fis_rx(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -641,6 +645,7 @@ static void ahci_start_fis_rx(struct ata_port *ap)
/* flush */
readl(port_mmio + PORT_CMD);
}
+EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
static int ahci_stop_fis_rx(struct ata_port *ap)
{
@@ -1940,7 +1945,7 @@ irqreturn_t ahci_interrupt(int irq, void *dev_instance)
}
EXPORT_SYMBOL_GPL(ahci_interrupt);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = ahci_port_base(ap);
@@ -1969,6 +1974,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
return 0;
}
+EXPORT_SYMBOL_GPL(ahci_qc_issue);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 7cb3a85719c0..b0077589f065 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -250,8 +250,13 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
if (IS_ERR(hpriv->phy)) {
rc = PTR_ERR(hpriv->phy);
switch (rc) {
- case -ENODEV:
case -ENOSYS:
+ /* No PHY support. Check if PHY is required. */
+ if (of_find_property(dev->of_node, "phys", NULL)) {
+ dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
+ goto err_out;
+ }
+ case -ENODEV:
/* continue normally */
hpriv->phy = NULL;
break;
@@ -283,6 +288,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
* @pdev: platform device pointer for the host
* @hpriv: ahci-host private data for the host
* @pi_template: template for the ata_port_info to use
+ * @host_flags: ahci host flags used in ahci_host_priv
* @force_port_map: param passed to ahci_save_initial_config
* @mask_port_map: param passed to ahci_save_initial_config
*
@@ -296,6 +302,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template,
+ unsigned long host_flags,
unsigned int force_port_map,
unsigned int mask_port_map)
{
@@ -312,7 +319,8 @@ int ahci_platform_init_host(struct platform_device *pdev,
}
/* prepare host */
- hpriv->flags |= (unsigned long)pi.private_data;
+ pi.private_data = (void *)host_flags;
+ hpriv->flags |= host_flags;
ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ef8567de6a75..72691fd93948 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1993,7 +1993,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
+
+ /* From SAT, use last 2 words from fw rev unless they are spaces */
+ ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
+ if (strncmp(&rbuf[32], " ", 4) == 0)
+ ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index b603720b877d..1121153f1ecd 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2433,15 +2433,6 @@ int ata_pci_sff_activate_host(struct ata_host *host,
mask = (1 << 2) | (1 << 0);
if ((tmp8 & mask) != mask)
legacy_mode = 1;
-#if defined(CONFIG_NO_ATA_LEGACY)
- /* Some platforms with PCI limits cannot address compat
- port space. In that case we punt if their firmware has
- left a device in compatibility mode */
- if (legacy_mode) {
- printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
- return -EOPNOTSUPP;
- }
-#endif
}
if (!devres_open_group(dev, NULL, GFP_KERNEL))
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 5108b8744dce..b70fce2a38eb 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -265,7 +265,7 @@ static struct pci_driver pacpi_pci_driver = {
.id_table = pacpi_pci_tbl,
.probe = pacpi_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 1b7b2ccabcff..d19cd88ed2d3 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -589,7 +589,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ali_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -616,7 +616,7 @@ static struct pci_driver ali_pci_driver = {
.id_table = ali,
.probe = ali_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ali_reinit_one,
#endif
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 1206fa6b62ca..8d4d959a821c 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int amd_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -625,7 +625,7 @@ static struct pci_driver amd_pci_driver = {
.id_table = amd,
.probe = amd_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = amd_reinit_one,
#endif
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 3aa4e655e3c6..96c05c9494fa 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -422,7 +422,7 @@ static const struct pci_device_id artop_pci_tbl[] = {
{ } /* terminate list */
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atp8xx_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -444,7 +444,7 @@ static struct pci_driver artop_pci_driver = {
.id_table = artop_pci_tbl,
.probe = artop_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = atp8xx_reinit_one,
#endif
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 30fa4ca4cef6..970f7767e5fd 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -298,7 +298,7 @@ static struct pci_driver atiixp_pci_driver = {
.id_table = atiixp,
.probe = atiixp_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.resume = ata_pci_device_resume,
.suspend = ata_pci_device_suspend,
#endif
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 7e73a0f1e323..a705cfca90f7 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -530,7 +530,7 @@ err_out:
return rc;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atp867x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -558,7 +558,7 @@ static struct pci_driver atp867x_driver = {
.id_table = atp867x_pci_tbl,
.probe = atp867x_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = atp867x_reinit_one,
#endif
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index ba0d8a29dc23..03f2f2bc83bd 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1619,7 +1619,7 @@ static int bfin_atapi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(pdev);
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 57f1be64dbf2..c47caa807fa9 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -231,7 +231,7 @@ static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ata_pci_sff_init_one(pdev, ppi, &cmd640_sht, NULL, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int cmd640_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -256,7 +256,7 @@ static struct pci_driver cmd640_pci_driver = {
.id_table = cmd640,
.probe = cmd640_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = cmd640_reinit_one,
#endif
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 6bca3505b9e9..13ca5883285b 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -487,7 +487,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int cmd64x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -518,7 +518,7 @@ static struct pci_driver cmd64x_pci_driver = {
.id_table = cmd64x,
.probe = cmd64x_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = cmd64x_reinit_one,
#endif
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index bcde4b786807..d65cb9d2fa8c 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -229,7 +229,7 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ata_host_register(host, &cs5520_sht);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/**
* cs5520_reinit_one - device resume
* @pdev: PCI device
@@ -278,7 +278,7 @@ static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
pci_save_state(pdev);
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
/* For now keep DMA off. We can set it for all but A rev CS5510 once the
core ATA code can handle it */
@@ -295,7 +295,7 @@ static struct pci_driver cs5520_pci_driver = {
.id_table = pata_cs5520,
.probe = cs5520_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = cs5520_pci_device_suspend,
.resume = cs5520_reinit_one,
#endif
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 8afe854a5a50..48ae4b434474 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -326,7 +326,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int cs5530_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -343,7 +343,7 @@ static int cs5530_reinit_one(struct pci_dev *pdev)
ata_host_resume(host);
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static const struct pci_device_id cs5530[] = {
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
@@ -356,7 +356,7 @@ static struct pci_driver cs5530_pci_driver = {
.id_table = cs5530,
.probe = cs5530_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = cs5530_reinit_one,
#endif
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 2c0986fa4bb2..97584e8456d9 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -200,7 +200,7 @@ static struct pci_driver cs5535_pci_driver = {
.id_table = cs5535,
.probe = cs5535_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 32ddcae5a360..6c15a554efbe 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -297,7 +297,7 @@ static struct pci_driver cs5536_pci_driver = {
.id_table = cs5536,
.probe = cs5536_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 3435bd6a5cc9..793018460d82 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -151,7 +151,7 @@ static struct pci_driver cy82c693_pci_driver = {
.id_table = cy82c693,
.probe = cy82c693_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index f440892225f4..4a57a6f032d9 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -288,7 +288,7 @@ static struct pci_driver efar_pci_driver = {
.id_table = efar_pci_tbl,
.probe = efar_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index cad9d45749c4..6ad5c072ce34 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -708,8 +708,8 @@ static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc)
struct dma_chan *channel = qc->dma_dir == DMA_TO_DEVICE
? drv_data->dma_tx_channel : drv_data->dma_rx_channel;
- txd = channel->device->device_prep_slave_sg(channel, qc->sg,
- qc->n_elem, qc->dma_dir, DMA_CTRL_ACK, NULL);
+ txd = dmaengine_prep_slave_sg(channel, qc->sg, qc->n_elem, qc->dma_dir,
+ DMA_CTRL_ACK);
if (!txd) {
dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n");
return;
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 8e76f79689d3..cbc3de793d1d 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -386,7 +386,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int hpt36x_reinit_one(struct pci_dev *dev)
{
struct ata_host *host = pci_get_drvdata(dev);
@@ -411,7 +411,7 @@ static struct pci_driver hpt36x_pci_driver = {
.id_table = hpt36x,
.probe = hpt36x_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = hpt36x_reinit_one,
#endif
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 255c5aaff3a8..d019cdd5bc9f 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -249,7 +249,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
IRQF_SHARED, &hpt3x3_sht);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int hpt3x3_reinit_one(struct pci_dev *dev)
{
struct ata_host *host = pci_get_drvdata(dev);
@@ -277,7 +277,7 @@ static struct pci_driver hpt3x3_pci_driver = {
.id_table = hpt3x3,
.probe = hpt3x3_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = hpt3x3_reinit_one,
#endif
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index e0872db913d6..af424573c2ff 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -185,7 +185,7 @@ static int pata_imx_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pata_imx_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -244,7 +244,7 @@ static struct platform_driver pata_imx_driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.pm = &pata_imx_pm_ops,
#endif
},
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 81369d187a5c..4f97d1e52f85 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -283,7 +283,7 @@ static struct pci_driver it8213_pci_driver = {
.id_table = it8213_pci_tbl,
.probe = it8213_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index dc3d7877f29d..a5088ecb349f 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -935,7 +935,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int it821x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -965,7 +965,7 @@ static struct pci_driver it821x_pci_driver = {
.id_table = it821x,
.probe = it821x_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = it821x_reinit_one,
#endif
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index b1cfa0258fd3..4d1a5d2c4287 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -157,7 +157,7 @@ static struct pci_driver jmicron_pci_driver = {
.id_table = jmicron_pci_tbl,
.probe = jmicron_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index c28d0645e851..a02f76fdcfcd 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -845,8 +845,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
return 0;
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
{
int rc;
@@ -907,8 +906,7 @@ static int pata_macio_do_resume(struct pata_macio_priv *priv)
return 0;
}
-
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static struct scsi_host_template pata_macio_sht = {
ATA_BASE_SHT(DRV_NAME),
@@ -1208,8 +1206,7 @@ static int pata_macio_detach(struct macio_dev *mdev)
return 0;
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
{
struct ata_host *host = macio_get_drvdata(mdev);
@@ -1223,8 +1220,7 @@ static int pata_macio_resume(struct macio_dev *mdev)
return pata_macio_do_resume(host->private_data);
}
-
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PMAC_MEDIABAY
static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
@@ -1316,8 +1312,7 @@ static void pata_macio_pci_detach(struct pci_dev *pdev)
ata_host_detach(host);
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -1331,8 +1326,7 @@ static int pata_macio_pci_resume(struct pci_dev *pdev)
return pata_macio_do_resume(host->private_data);
}
-
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static struct of_device_id pata_macio_match[] =
{
@@ -1360,7 +1354,7 @@ static struct macio_driver pata_macio_driver =
},
.probe = pata_macio_attach,
.remove = pata_macio_detach,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = pata_macio_suspend,
.resume = pata_macio_resume,
#endif
@@ -1383,7 +1377,7 @@ static struct pci_driver pata_macio_pci_driver = {
.id_table = pata_macio_pci_match,
.probe = pata_macio_pci_attach,
.remove = pata_macio_pci_detach,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = pata_macio_pci_suspend,
.resume = pata_macio_pci_resume,
#endif
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 6bad3df3a13c..ae9feb1ba8f7 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -171,7 +171,7 @@ static struct pci_driver marvell_pci_driver = {
.id_table = marvell_pci_tbl,
.probe = marvell_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 0024ced3e200..ccd1c83a05cc 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -819,9 +819,7 @@ mpc52xx_ata_remove(struct platform_device *op)
return 0;
}
-
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int
mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state)
{
@@ -847,10 +845,8 @@ mpc52xx_ata_resume(struct platform_device *op)
return 0;
}
-
#endif
-
static struct of_device_id mpc52xx_ata_of_match[] = {
{ .compatible = "fsl,mpc5200-ata", },
{ .compatible = "mpc5200-ata", },
@@ -861,7 +857,7 @@ static struct of_device_id mpc52xx_ata_of_match[] = {
static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
.remove = mpc52xx_ata_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = mpc52xx_ata_suspend,
.resume = mpc52xx_ata_resume,
#endif
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index f39a5379e816..202b4d601393 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -223,7 +223,7 @@ static struct pci_driver mpiix_pci_driver = {
.id_table = mpiix,
.probe = mpiix_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index e3b97093ef9a..0ea18331d466 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -92,7 +92,7 @@ static struct pci_driver netcell_pci_driver = {
.id_table = netcell_pci_tbl,
.probe = netcell_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 56201a69af12..efb272da8567 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -152,8 +152,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
IRQF_SHARED, &ninja32_sht);
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int ninja32_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -183,7 +182,7 @@ static struct pci_driver ninja32_pci_driver = {
.id_table = ninja32,
.probe = ninja32_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ninja32_reinit_one,
#endif
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 6154c3ee11a5..200e1eb23a20 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -161,7 +161,7 @@ static struct pci_driver ns87410_pci_driver = {
.id_table = ns87410,
.probe = ns87410_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index d44df7ccfe43..84c6b225b56e 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -385,7 +385,7 @@ static const struct pci_device_id ns87415_pci_tbl[] = {
{ } /* terminate list */
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ns87415_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -407,7 +407,7 @@ static struct pci_driver ns87415_pci_driver = {
.id_table = ns87415_pci_tbl,
.probe = ns87415_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ns87415_reinit_one,
#endif
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 83c4ddb1bc7f..2a97d3a531ec 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -865,7 +865,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (node == NULL)
return -EINVAL;
- cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
+ cf_port = devm_kzalloc(&pdev->dev, sizeof(*cf_port), GFP_KERNEL);
if (!cf_port)
return -ENOMEM;
@@ -881,10 +881,9 @@ static int octeon_cf_probe(struct platform_device *pdev)
n_size = of_n_size_cells(node);
reg_prop = of_find_property(node, "reg", &reg_len);
- if (!reg_prop || reg_len < sizeof(__be32)) {
- rv = -EINVAL;
- goto free_cf_port;
- }
+ if (!reg_prop || reg_len < sizeof(__be32))
+ return -EINVAL;
+
cs_num = reg_prop->value;
cf_port->cs0 = be32_to_cpup(cs_num);
@@ -901,16 +900,13 @@ static int octeon_cf_probe(struct platform_device *pdev)
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
if (!res_dma) {
of_node_put(dma_node);
- rv = -EINVAL;
- goto free_cf_port;
+ return -EINVAL;
}
cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
resource_size(res_dma));
-
if (!cf_port->dma_base) {
of_node_put(dma_node);
- rv = -EINVAL;
- goto free_cf_port;
+ return -EINVAL;
}
irq_handler = octeon_cf_interrupt;
@@ -921,41 +917,34 @@ static int octeon_cf_probe(struct platform_device *pdev)
of_node_put(dma_node);
}
res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_cs1) {
- rv = -EINVAL;
- goto free_cf_port;
- }
+ if (!res_cs1)
+ return -EINVAL;
+
cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
resource_size(res_cs1));
-
if (!cs1)
- goto free_cf_port;
+ return rv;
+
+ if (reg_len < (n_addr + n_size + 1) * sizeof(__be32))
+ return -EINVAL;
- if (reg_len < (n_addr + n_size + 1) * sizeof(__be32)) {
- rv = -EINVAL;
- goto free_cf_port;
- }
cs_num += n_addr + n_size;
cf_port->cs1 = be32_to_cpup(cs_num);
}
res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (!res_cs0) {
- rv = -EINVAL;
- goto free_cf_port;
- }
+ if (!res_cs0)
+ return -EINVAL;
cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
resource_size(res_cs0));
-
if (!cs0)
- goto free_cf_port;
+ return rv;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
- goto free_cf_port;
+ return rv;
ap = host->ports[0];
ap->private_data = cf_port;
@@ -1020,17 +1009,12 @@ static int octeon_cf_probe(struct platform_device *pdev)
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
-
dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
is_16bit ? 16 : 8,
cf_port->is_true_ide ? ", True IDE" : "");
return ata_host_activate(host, irq, irq_handler,
IRQF_SHARED, &octeon_cf_sht);
-
-free_cf_port:
- kfree(cf_port);
- return rv;
}
static void octeon_cf_shutdown(struct device *dev)
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 319b64491b7b..b9bf78b7d48d 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -258,7 +258,7 @@ static struct pci_driver oldpiix_pci_driver = {
.id_table = oldpiix_pci_tbl,
.probe = oldpiix_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index fb042e0519d0..3a944a029264 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -184,7 +184,7 @@ static struct pci_driver opti_pci_driver = {
.id_table = opti,
.probe = opti_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index bb71ea214b99..bdec7efa4643 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -440,7 +440,7 @@ static struct pci_driver optidma_pci_driver = {
.id_table = optidma,
.probe = optidma_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 1151f23177bb..4d06a5cda987 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -62,7 +62,7 @@ enum {
};
static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pdc2027x_reinit_one(struct pci_dev *pdev);
#endif
static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline);
@@ -128,7 +128,7 @@ static struct pci_driver pdc2027x_pci_driver = {
.id_table = pdc2027x_pci_tbl,
.probe = pdc2027x_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = pdc2027x_reinit_one,
#endif
@@ -761,7 +761,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
IRQF_SHARED, &pdc2027x_sht);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pdc2027x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index defa050e1784..9001991d2830 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -377,7 +377,7 @@ static struct pci_driver pdc202xx_pci_driver = {
.id_table = pdc202xx,
.probe = pdc202xx_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index 0b46be117051..35cb0e263237 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -110,7 +110,7 @@ static struct pci_driver ata_tosh_pci_driver = {
.id_table = ata_tosh,
.probe = ata_tosh_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index be3f10240dca..a3f1123d17aa 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -237,7 +237,7 @@ static struct pci_driver radisys_pci_driver = {
.id_table = radisys_pci_tbl,
.probe = radisys_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 521b2137ea3e..9ce5952216bc 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -382,7 +382,7 @@ static struct pci_driver rdc_pci_driver = {
.id_table = rdc_pci_tbl,
.probe = rdc_init_one,
.remove = rdc_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index caedc90855b2..b3ec18c6bcc6 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -101,7 +101,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
return -ENODEV;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int rz1000_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -133,7 +133,7 @@ static struct pci_driver rz1000_pci_driver = {
.id_table = pata_rz1000,
.probe = rz1000_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = rz1000_reinit_one,
#endif
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 0610e78c8a2a..fb528831fb92 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -619,7 +619,7 @@ static int __exit pata_s3c_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pata_s3c_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -670,7 +670,7 @@ static struct platform_driver pata_s3c_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.pm = &pata_s3c_pm_ops,
#endif
},
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 96a232fffae6..c71de5d680d1 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -254,7 +254,7 @@ static struct pci_driver sc1200_pci_driver = {
.id_table = sc1200,
.probe = sc1200_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index f1f5b5ae3382..4e006d74bef8 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -1096,7 +1096,7 @@ static struct pci_driver scc_pci_driver = {
.id_table = scc_pci_tbl,
.probe = scc_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index 5a1cde0ea360..b920c3407f8b 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -64,7 +64,7 @@ static struct pci_driver sch_pci_driver = {
.id_table = sch_pci_tbl,
.probe = sch_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index e27f31fe1b67..fc5f31d4828e 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -436,7 +436,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int serverworks_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -468,7 +468,7 @@ static struct pci_driver serverworks_pci_driver = {
.id_table = serverworks,
.probe = serverworks_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = serverworks_reinit_one,
#endif
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 73fe362d9716..f597edccedec 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -403,7 +403,7 @@ use_ioports:
return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sil680_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -429,7 +429,7 @@ static struct pci_driver sil680_pci_driver = {
.id_table = sil680,
.probe = sil680_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sil680_reinit_one,
#endif
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 78d913aa93c8..626f989d5c6a 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -869,7 +869,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sis_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -899,7 +899,7 @@ static struct pci_driver sis_pci_driver = {
.id_table = sis_pci_tbl,
.probe = sis_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sis_reinit_one,
#endif
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 900f0e4a1faf..4935f61f629c 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -337,7 +337,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sl82c105_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -365,7 +365,7 @@ static struct pci_driver sl82c105_pci_driver = {
.id_table = sl82c105,
.probe = sl82c105_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sl82c105_reinit_one,
#endif
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 7bc78e264f9e..d9364af8eb63 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -207,7 +207,7 @@ static const struct pci_device_id triflex[] = {
{ },
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int triflex_ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -233,7 +233,7 @@ static struct pci_driver triflex_pci_driver = {
.id_table = triflex,
.probe = triflex_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = triflex_ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index f6c9632bdff6..1ca6bcab369f 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -659,7 +659,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/**
* via_reinit_one - reinit after resume
* @pdev; PCI device
@@ -704,7 +704,7 @@ static struct pci_driver via_pci_driver = {
.id_table = via,
.probe = via_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = via_reinit_one,
#endif
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index fb0b40a191c2..616a6d2ac20c 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -774,20 +774,6 @@ static int sata_fsl_port_start(struct ata_port *ap)
VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
VPRINTK("CHBA = 0x%x\n", ioread32(hcr_base + CHBA));
-#ifdef CONFIG_MPC8315_DS
- /*
- * Workaround for 8315DS board 3gbps link-up issue,
- * currently limit SATA port to GEN1 speed
- */
- sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
- temp &= ~(0xF << 4);
- temp |= (0x1 << 4);
- sata_fsl_scr_write(&ap->link, SCR_CONTROL, temp);
-
- sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
- dev_warn(dev, "scr_control, speed limited to %x\n", temp);
-#endif
-
return 0;
}
@@ -1588,7 +1574,7 @@ static int sata_fsl_remove(struct platform_device *ofdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sata_fsl_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(op);
@@ -1644,7 +1630,7 @@ static struct platform_driver fsl_sata_driver = {
},
.probe = sata_fsl_probe,
.remove = sata_fsl_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = sata_fsl_suspend,
.resume = sata_fsl_resume,
#endif
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 5c54d957370a..069827826b20 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -785,7 +785,7 @@ static int init_controller(void __iomem *mmio_base, u16 hctl)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int inic_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -898,7 +898,7 @@ static const struct pci_device_id inic_pci_tbl[] = {
static struct pci_driver inic_pci_driver = {
.name = DRV_NAME,
.id_table = inic_pci_tbl,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = inic_pci_device_resume,
#endif
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 05c8a44adf8e..391cfda1e83f 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4222,7 +4222,7 @@ static int mv_platform_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(pdev);
@@ -4289,7 +4289,7 @@ static struct platform_driver mv_platform_driver = {
#ifdef CONFIG_PCI
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif
@@ -4299,7 +4299,7 @@ static struct pci_driver mv_pci_driver = {
.id_table = mv_pci_tbl,
.probe = mv_pci_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = mv_pci_device_resume,
#endif
@@ -4457,7 +4457,7 @@ static int mv_pci_init_one(struct pci_dev *pdev,
IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int mv_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index ba5f27120332..cdf99fac139a 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -295,7 +295,7 @@ struct nv_swncq_port_priv {
#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int nv_pci_device_resume(struct pci_dev *pdev);
#endif
static void nv_ck804_host_stop(struct ata_host *host);
@@ -379,7 +379,7 @@ static struct pci_driver nv_pci_driver = {
.name = DRV_NAME,
.id_table = nv_pci_tbl,
.probe = nv_init_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = nv_pci_device_resume,
#endif
@@ -2431,7 +2431,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int nv_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 2b25bd83fc9d..61eb6d77dac7 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -937,7 +937,7 @@ static int sata_rcar_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sata_rcar_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -991,7 +991,7 @@ static struct platform_driver sata_rcar_driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = sata_rcar_match,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.pm = &sata_rcar_pm_ops,
#endif
},
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3062f8605b29..40b76b2d18c6 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -112,7 +112,7 @@ enum {
};
static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sil_pci_device_resume(struct pci_dev *pdev);
#endif
static void sil_dev_config(struct ata_device *dev);
@@ -166,7 +166,7 @@ static struct pci_driver sil_pci_driver = {
.id_table = sil_pci_tbl,
.probe = sil_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sil_pci_device_resume,
#endif
@@ -802,7 +802,7 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&sil_sht);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sil_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index aa1051ba6d13..0534890f118a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -353,8 +353,10 @@ static void sil24_error_handler(struct ata_port *ap);
static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
static int sil24_port_start(struct ata_port *ap);
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sil24_pci_device_resume(struct pci_dev *pdev);
+#endif
+#ifdef CONFIG_PM
static int sil24_port_resume(struct ata_port *ap);
#endif
@@ -375,7 +377,7 @@ static struct pci_driver sil24_pci_driver = {
.id_table = sil24_pci_tbl,
.probe = sil24_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sil24_pci_device_resume,
#endif
@@ -1350,7 +1352,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&sil24_sht);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int sil24_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
@@ -1370,7 +1372,9 @@ static int sil24_pci_device_resume(struct pci_dev *pdev)
return 0;
}
+#endif
+#ifdef CONFIG_PM
static int sil24_port_resume(struct ata_port *ap)
{
sil24_config_pmp(ap, ap->nr_pmp_links);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index b513428171b3..d1637ac40a73 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -82,7 +82,7 @@ static struct pci_driver sis_pci_driver = {
.id_table = sis_pci_tbl,
.probe = sis_init_one,
.remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index f72e84228c5c..47bf89464cef 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -103,7 +103,7 @@ static struct pci_driver svia_pci_driver = {
.name = DRV_NAME,
.id_table = svia_pci_tbl,
.probe = svia_init_one,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 204814e88e46..d4725fc0395d 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2780,7 +2780,7 @@ static struct pci_driver fore200e_pca_driver = {
static int __init fore200e_module_init(void)
{
- int err;
+ int err = 0;
printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1bdf104e90bb..b621f56a36be 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2551,12 +2551,12 @@ done:
timeout = 5 * 1000;
while (atomic_read(&vc->scq->used) > 0) {
timeout = msleep_interruptible(timeout);
- if (!timeout)
+ if (!timeout) {
+ pr_warn("%s: SCQ drain timeout: %u used\n",
+ card->name, atomic_read(&vc->scq->used));
break;
+ }
}
- if (!timeout)
- printk("%s: SCQ drain timeout: %u used\n",
- card->name, atomic_read(&vc->scq->used));
writel(TCMDQ_HALT | vc->index, SAR_REG_TCMDQ);
clear_scd(card, vc->scq, vc->class);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 83969f8c5727..6467c919c509 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -176,14 +176,24 @@ static int __init cma_activate_area(struct cma *cma)
base_pfn = pfn;
for (j = pageblock_nr_pages; j; --j, pfn++) {
WARN_ON_ONCE(!pfn_valid(pfn));
+ /*
+ * alloc_contig_range requires the pfn range
+ * specified to be in the same zone. Make this
+ * simple by forcing the entire CMA resv range
+ * to be in the same zone.
+ */
if (page_zone(pfn_to_page(pfn)) != zone)
- return -EINVAL;
+ goto err;
}
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
} while (--i);
mutex_init(&cma->lock);
return 0;
+
+err:
+ kfree(cma->bitmap);
+ return -EINVAL;
}
static struct cma cma_areas[MAX_CMA_AREAS];
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 343ffad59377..bf412961a934 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -214,9 +214,6 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
error, (unsigned long long)nsecs >> 10);
}
-
- trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
- error);
}
/**
@@ -387,7 +384,9 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
calltime = initcall_debug_start(dev);
pm_dev_dbg(dev, state, info);
+ trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
+ trace_device_pm_callback_end(dev, error);
suspend_report_result(cb, error);
initcall_debug_report(dev, calltime, error, state, info);
@@ -545,6 +544,7 @@ static void dpm_resume_noirq(pm_message_t state)
struct device *dev;
ktime_t starttime = ktime_get();
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
@@ -587,6 +587,7 @@ static void dpm_resume_noirq(pm_message_t state)
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
cpuidle_resume();
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
/**
@@ -664,6 +665,7 @@ static void dpm_resume_early(pm_message_t state)
struct device *dev;
ktime_t starttime = ktime_get();
+ trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
@@ -703,6 +705,7 @@ static void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, "early");
+ trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
/**
@@ -834,6 +837,7 @@ void dpm_resume(pm_message_t state)
struct device *dev;
ktime_t starttime = ktime_get();
+ trace_suspend_resume(TPS("dpm_resume"), state.event, true);
might_sleep();
mutex_lock(&dpm_list_mtx);
@@ -875,6 +879,7 @@ void dpm_resume(pm_message_t state)
dpm_show_time(starttime, state, NULL);
cpufreq_resume();
+ trace_suspend_resume(TPS("dpm_resume"), state.event, false);
}
/**
@@ -913,7 +918,9 @@ static void device_complete(struct device *dev, pm_message_t state)
if (callback) {
pm_dev_dbg(dev, state, info);
+ trace_device_pm_callback_start(dev, info, state.event);
callback(dev);
+ trace_device_pm_callback_end(dev, 0);
}
device_unlock(dev);
@@ -932,6 +939,7 @@ void dpm_complete(pm_message_t state)
{
struct list_head list;
+ trace_suspend_resume(TPS("dpm_complete"), state.event, true);
might_sleep();
INIT_LIST_HEAD(&list);
@@ -951,6 +959,7 @@ void dpm_complete(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
+ trace_suspend_resume(TPS("dpm_complete"), state.event, false);
}
/**
@@ -1086,6 +1095,7 @@ static int dpm_suspend_noirq(pm_message_t state)
ktime_t starttime = ktime_get();
int error = 0;
+ trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
cpuidle_pause();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
@@ -1126,6 +1136,7 @@ static int dpm_suspend_noirq(pm_message_t state)
} else {
dpm_show_time(starttime, state, "noirq");
}
+ trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
@@ -1222,6 +1233,7 @@ static int dpm_suspend_late(pm_message_t state)
ktime_t starttime = ktime_get();
int error = 0;
+ trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
@@ -1257,6 +1269,7 @@ static int dpm_suspend_late(pm_message_t state)
} else {
dpm_show_time(starttime, state, "late");
}
+ trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
@@ -1295,7 +1308,9 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
calltime = initcall_debug_start(dev);
+ trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
+ trace_device_pm_callback_end(dev, error);
suspend_report_result(cb, error);
initcall_debug_report(dev, calltime, error, state, info);
@@ -1461,6 +1476,7 @@ int dpm_suspend(pm_message_t state)
ktime_t starttime = ktime_get();
int error = 0;
+ trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
cpufreq_suspend();
@@ -1498,6 +1514,7 @@ int dpm_suspend(pm_message_t state)
dpm_save_failed_step(SUSPEND_SUSPEND);
} else
dpm_show_time(starttime, state, NULL);
+ trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
@@ -1549,8 +1566,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
callback = dev->driver->pm->prepare;
}
- if (callback)
+ if (callback) {
+ trace_device_pm_callback_start(dev, info, state.event);
ret = callback(dev);
+ trace_device_pm_callback_end(dev, ret);
+ }
device_unlock(dev);
@@ -1582,6 +1602,7 @@ int dpm_prepare(pm_message_t state)
{
int error = 0;
+ trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
might_sleep();
mutex_lock(&dpm_list_mtx);
@@ -1612,6 +1633,7 @@ int dpm_prepare(pm_message_t state)
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
return error;
}
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index e8d11b6630ee..dbb8350ea8dc 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <trace/events/power.h>
static LIST_HEAD(syscore_ops_list);
static DEFINE_MUTEX(syscore_ops_lock);
@@ -49,6 +50,7 @@ int syscore_suspend(void)
struct syscore_ops *ops;
int ret = 0;
+ trace_suspend_resume(TPS("syscore_suspend"), 0, true);
pr_debug("Checking wakeup interrupts\n");
/* Return error code if there are any wakeup interrupts pending. */
@@ -70,6 +72,7 @@ int syscore_suspend(void)
"Interrupts enabled after %pF\n", ops->suspend);
}
+ trace_suspend_resume(TPS("syscore_suspend"), 0, false);
return 0;
err_out:
@@ -92,6 +95,7 @@ void syscore_resume(void)
{
struct syscore_ops *ops;
+ trace_suspend_resume(TPS("syscore_resume"), 0, true);
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core resume.\n");
@@ -103,6 +107,7 @@ void syscore_resume(void)
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled after %pF\n", ops->resume);
}
+ trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
EXPORT_SYMBOL_GPL(syscore_resume);
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index b6c8aaf4931b..5b17ec88ea05 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1337,8 +1337,11 @@ int drbd_submit_peer_request(struct drbd_device *device,
return 0;
}
+ /* Discards don't have any payload.
+ * But the scsi layer still expects a bio_vec it can use internally,
+ * see sd_setup_discard_cmnd() and blk_add_request_payload(). */
if (peer_req->flags & EE_IS_TRIM)
- nr_pages = 0; /* discards don't have any payload. */
+ nr_pages = 1;
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 677db049f55a..56d46ffb08e1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3777,7 +3777,7 @@ static void floppy_rb0_cb(struct bio *bio, int err)
int drive = cbdata->drive;
if (err) {
- pr_info("floppy: error %d while reading block 0", err);
+ pr_info("floppy: error %d while reading block 0\n", err);
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
}
complete(&cbdata->complete);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 74abd49fabdc..295f3afbbef5 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -39,6 +39,7 @@
#include <../drivers/ata/ahci.h>
#include <linux/export.h>
#include <linux/debugfs.h>
+#include <linux/prefetch.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -2380,6 +2381,8 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
/* Map the scatter list for DMA access */
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
+ prefetch(&port->flags);
+
command->scatter_ents = nents;
/*
@@ -2392,7 +2395,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
fis = command->command;
fis->type = 0x27;
fis->opts = 1 << 7;
- if (rq_data_dir(rq) == READ)
+ if (dma_dir == DMA_FROM_DEVICE)
fis->command = ATA_CMD_FPDMA_READ;
else
fis->command = ATA_CMD_FPDMA_WRITE;
@@ -2412,7 +2415,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
fis->res3 = 0;
fill_command_sg(dd, command, nents);
- if (command->unaligned)
+ if (unlikely(command->unaligned))
fis->device |= 1 << 7;
/* Populate the command header */
@@ -2433,7 +2436,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
* To prevent this command from being issued
* if an internal command is in progress or error handling is active.
*/
- if (port->flags & MTIP_PF_PAUSE_IO) {
+ if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
set_bit(rq->tag, port->cmds_to_issue);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
return;
@@ -3754,7 +3757,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
struct driver_data *dd = hctx->queue->queuedata;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
- if (!dd->unal_qdepth || rq_data_dir(rq) == READ)
+ if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
return false;
/*
@@ -3776,11 +3779,11 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
int ret;
- if (mtip_check_unal_depth(hctx, rq))
+ if (unlikely(mtip_check_unal_depth(hctx, rq)))
return BLK_MQ_RQ_QUEUE_BUSY;
ret = mtip_submit_request(hctx, rq);
- if (!ret)
+ if (likely(!ret))
return BLK_MQ_RQ_QUEUE_OK;
rq->errors = ret;
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 4b9b554234bc..ba1b31ee22ec 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -493,19 +493,19 @@ struct driver_data {
struct workqueue_struct *isr_workq;
- struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
-
atomic_t irq_workers_active;
+ struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
+
int isr_binding;
struct block_device *bdev;
- int unal_qdepth; /* qdepth of unaligned IO queue */
-
struct list_head online_list; /* linkage for online list */
struct list_head remove_list; /* linkage for removing list */
+
+ int unal_qdepth; /* qdepth of unaligned IO queue */
};
#endif
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 77087a29b127..a3b042c4d448 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -79,7 +79,7 @@ MODULE_PARM_DESC(home_node, "Home node for the device");
static int queue_mode = NULL_Q_MQ;
module_param(queue_mode, int, S_IRUGO);
-MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)");
+MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
static int gb = 250;
module_param(gb, int, S_IRUGO);
@@ -227,7 +227,10 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_softirq_done_fn(struct request *rq)
{
- end_cmd(blk_mq_rq_to_pdu(rq));
+ if (queue_mode == NULL_Q_MQ)
+ end_cmd(blk_mq_rq_to_pdu(rq));
+ else
+ end_cmd(rq->special);
}
static inline void null_handle_cmd(struct nullb_cmd *cmd)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index a842c71dcc21..02351e217165 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -10,10 +10,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/nvme.h>
@@ -46,16 +42,26 @@
#include <scsi/sg.h>
#include <asm-generic/io-64-nonatomic-lo-hi.h>
-#define NVME_Q_DEPTH 1024
+#include <trace/events/block.h>
+
+#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-#define ADMIN_TIMEOUT (60 * HZ)
-#define IOD_TIMEOUT (4 * NVME_IO_TIMEOUT)
+#define ADMIN_TIMEOUT (admin_timeout * HZ)
+#define IOD_TIMEOUT (retry_time * HZ)
+
+static unsigned char admin_timeout = 60;
+module_param(admin_timeout, byte, 0644);
+MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
-unsigned char io_timeout = 30;
-module_param(io_timeout, byte, 0644);
+unsigned char nvme_io_timeout = 30;
+module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
+static unsigned char retry_time = 30;
+module_param(retry_time, byte, 0644);
+MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
+
static int nvme_major;
module_param(nvme_major, int, 0);
@@ -67,6 +73,7 @@ static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
static struct workqueue_struct *nvme_workq;
static wait_queue_head_t nvme_kthread_wait;
+static struct notifier_block nvme_nb;
static void nvme_reset_failed_dev(struct work_struct *ws);
@@ -199,16 +206,13 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
-#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
-#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
+#define CMD_CTX_ABORT (0x318 + CMD_CTX_BASE)
static void special_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
if (ctx == CMD_CTX_CANCELLED)
return;
- if (ctx == CMD_CTX_FLUSH)
- return;
if (ctx == CMD_CTX_ABORT) {
++nvmeq->dev->abort_limit;
return;
@@ -247,8 +251,9 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
void *ctx;
struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- if (cmdid >= nvmeq->q_depth) {
- *fn = special_completion;
+ if (cmdid >= nvmeq->q_depth || !info[cmdid].fn) {
+ if (fn)
+ *fn = special_completion;
return CMD_CTX_INVALID;
}
if (fn)
@@ -281,9 +286,17 @@ static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
{
+ struct nvme_queue *nvmeq;
unsigned queue_id = get_cpu_var(*dev->io_queue);
+
rcu_read_lock();
- return rcu_dereference(dev->queues[queue_id]);
+ nvmeq = rcu_dereference(dev->queues[queue_id]);
+ if (nvmeq)
+ return nvmeq;
+
+ rcu_read_unlock();
+ put_cpu_var(*dev->io_queue);
+ return NULL;
}
static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
@@ -295,8 +308,15 @@ static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
__acquires(RCU)
{
+ struct nvme_queue *nvmeq;
+
rcu_read_lock();
- return rcu_dereference(dev->queues[q_idx]);
+ nvmeq = rcu_dereference(dev->queues[q_idx]);
+ if (nvmeq)
+ return nvmeq;
+
+ rcu_read_unlock();
+ return NULL;
}
static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
@@ -387,25 +407,30 @@ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
static void nvme_start_io_acct(struct bio *bio)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;
- const int rw = bio_data_dir(bio);
- int cpu = part_stat_lock();
- part_round_stats(cpu, &disk->part0);
- part_stat_inc(cpu, &disk->part0, ios[rw]);
- part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
- part_inc_in_flight(&disk->part0, rw);
- part_stat_unlock();
+ if (blk_queue_io_stat(disk->queue)) {
+ const int rw = bio_data_dir(bio);
+ int cpu = part_stat_lock();
+ part_round_stats(cpu, &disk->part0);
+ part_stat_inc(cpu, &disk->part0, ios[rw]);
+ part_stat_add(cpu, &disk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_inc_in_flight(&disk->part0, rw);
+ part_stat_unlock();
+ }
}
static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;
- const int rw = bio_data_dir(bio);
- unsigned long duration = jiffies - start_time;
- int cpu = part_stat_lock();
- part_stat_add(cpu, &disk->part0, ticks[rw], duration);
- part_round_stats(cpu, &disk->part0);
- part_dec_in_flight(&disk->part0, rw);
- part_stat_unlock();
+ if (blk_queue_io_stat(disk->queue)) {
+ const int rw = bio_data_dir(bio);
+ unsigned long duration = jiffies - start_time;
+ int cpu = part_stat_lock();
+ part_stat_add(cpu, &disk->part0, ticks[rw], duration);
+ part_round_stats(cpu, &disk->part0);
+ part_dec_in_flight(&disk->part0, rw);
+ part_stat_unlock();
+ }
}
static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
@@ -414,6 +439,7 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_iod *iod = ctx;
struct bio *bio = iod->private;
u16 status = le16_to_cpup(&cqe->status) >> 1;
+ int error = 0;
if (unlikely(status)) {
if (!(status & NVME_SC_DNR ||
@@ -426,6 +452,7 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
wake_up(&nvmeq->sq_full);
return;
}
+ error = -EIO;
}
if (iod->nents) {
dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
@@ -433,10 +460,9 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
nvme_end_io_acct(bio, iod->start_time);
}
nvme_free_iod(nvmeq->dev, iod);
- if (status)
- bio_endio(bio, -EIO);
- else
- bio_endio(bio, 0);
+
+ trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio, error);
+ bio_endio(bio, error);
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -525,6 +551,8 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
if (!split)
return -ENOMEM;
+ trace_block_split(bdev_get_queue(bio->bi_bdev), bio,
+ split->bi_iter.bi_sector);
bio_chain(split, bio);
if (!waitqueue_active(&nvmeq->sq_full))
@@ -627,16 +655,6 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return 0;
}
-int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
-{
- int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
- special_completion, NVME_IO_TIMEOUT);
- if (unlikely(cmdid < 0))
- return cmdid;
-
- return nvme_submit_flush(nvmeq, ns, cmdid);
-}
-
static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
{
struct bio *bio = iod->private;
@@ -652,7 +670,7 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
if (bio->bi_rw & REQ_DISCARD)
return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
- if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
+ if (bio->bi_rw & REQ_FLUSH)
return nvme_submit_flush(nvmeq, ns, cmdid);
control = 0;
@@ -686,6 +704,26 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
return 0;
}
+static int nvme_split_flush_data(struct nvme_queue *nvmeq, struct bio *bio)
+{
+ struct bio *split = bio_clone(bio, GFP_ATOMIC);
+ if (!split)
+ return -ENOMEM;
+
+ split->bi_iter.bi_size = 0;
+ split->bi_phys_segments = 0;
+ bio->bi_rw &= ~REQ_FLUSH;
+ bio_chain(split, bio);
+
+ if (!waitqueue_active(&nvmeq->sq_full))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, split);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ wake_up_process(nvme_thread);
+
+ return 0;
+}
+
/*
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
@@ -696,11 +734,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
int psegs = bio_phys_segments(ns->queue, bio);
int result;
- if ((bio->bi_rw & REQ_FLUSH) && psegs) {
- result = nvme_submit_flush_data(nvmeq, ns);
- if (result)
- return result;
- }
+ if ((bio->bi_rw & REQ_FLUSH) && psegs)
+ return nvme_split_flush_data(nvmeq, bio);
iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
if (!iod)
@@ -795,7 +830,6 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
int result = -EBUSY;
if (!nvmeq) {
- put_nvmeq(NULL);
bio_endio(bio, -EIO);
return;
}
@@ -870,10 +904,8 @@ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
struct nvme_queue *nvmeq;
nvmeq = lock_nvmeq(dev, q_idx);
- if (!nvmeq) {
- unlock_nvmeq(nvmeq);
+ if (!nvmeq)
return -ENODEV;
- }
cmdinfo.task = current;
cmdinfo.status = -EINTR;
@@ -898,9 +930,10 @@ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
if (cmdinfo.status == -EINTR) {
nvmeq = lock_nvmeq(dev, q_idx);
- if (nvmeq)
+ if (nvmeq) {
nvme_abort_command(nvmeq, cmdid);
- unlock_nvmeq(nvmeq);
+ unlock_nvmeq(nvmeq);
+ }
return -EINTR;
}
@@ -1358,7 +1391,8 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
return -EINTR;
if (time_after(jiffies, timeout)) {
dev_err(&dev->pci_dev->dev,
- "Device not ready; aborting initialisation\n");
+ "Device not ready; aborting %s\n", enabled ?
+ "initialisation" : "reset");
return -ENODEV;
}
}
@@ -1481,7 +1515,11 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
goto put_pages;
}
+ err = -ENOMEM;
iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+ if (!iod)
+ goto put_pages;
+
sg = iod->sg;
sg_init_table(sg, count);
for (i = 0; i < count; i++) {
@@ -1494,7 +1532,6 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
sg_mark_end(&sg[i - 1]);
iod->nents = count;
- err = -ENOMEM;
nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (!nents)
@@ -1894,6 +1931,8 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
if (dev->max_hw_sectors)
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
+ if (dev->vwc & NVME_CTRL_VWC_PRESENT)
+ blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
disk->major = nvme_major;
disk->first_minor = 0;
@@ -2062,8 +2101,13 @@ static int set_queue_count(struct nvme_dev *dev, int count)
status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
&result);
- if (status)
- return status < 0 ? -EIO : -EBUSY;
+ if (status < 0)
+ return status;
+ if (status > 0) {
+ dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n",
+ status);
+ return -EBUSY;
+ }
return min(result & 0xffff, result >> 16) + 1;
}
@@ -2072,14 +2116,25 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
+static void nvme_cpu_workfn(struct work_struct *work)
+{
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, cpu_work);
+ if (dev->initialized)
+ nvme_assign_io_queues(dev);
+}
+
static int nvme_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- struct nvme_dev *dev = container_of(self, struct nvme_dev, nb);
+ struct nvme_dev *dev;
+
switch (action) {
case CPU_ONLINE:
case CPU_DEAD:
- nvme_assign_io_queues(dev);
+ spin_lock(&dev_list_lock);
+ list_for_each_entry(dev, &dev_list, node)
+ schedule_work(&dev->cpu_work);
+ spin_unlock(&dev_list_lock);
break;
}
return NOTIFY_OK;
@@ -2148,11 +2203,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
nvme_free_queues(dev, nr_io_queues + 1);
nvme_assign_io_queues(dev);
- dev->nb.notifier_call = &nvme_cpu_notify;
- result = register_hotcpu_notifier(&dev->nb);
- if (result)
- goto free_queues;
-
return 0;
free_queues:
@@ -2184,6 +2234,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
res = nvme_identify(dev, 0, 1, dma_addr);
if (res) {
+ dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
res = -EIO;
goto out;
}
@@ -2192,6 +2243,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
nn = le32_to_cpup(&ctrl->nn);
dev->oncs = le16_to_cpup(&ctrl->oncs);
dev->abort_limit = ctrl->acl + 1;
+ dev->vwc = ctrl->vwc;
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
@@ -2450,8 +2502,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
int i;
dev->initialized = 0;
- unregister_hotcpu_notifier(&dev->nb);
-
nvme_dev_list_remove(dev);
if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
@@ -2722,6 +2772,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->namespaces);
dev->reset_workfn = nvme_reset_failed_dev;
INIT_WORK(&dev->reset_work, nvme_reset_workfn);
+ INIT_WORK(&dev->cpu_work, nvme_cpu_workfn);
dev->pci_dev = pdev;
pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
@@ -2801,6 +2852,7 @@ static void nvme_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
flush_work(&dev->reset_work);
+ flush_work(&dev->cpu_work);
misc_deregister(&dev->miscdev);
nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
@@ -2889,11 +2941,18 @@ static int __init nvme_init(void)
else if (result > 0)
nvme_major = result;
- result = pci_register_driver(&nvme_driver);
+ nvme_nb.notifier_call = &nvme_cpu_notify;
+ result = register_hotcpu_notifier(&nvme_nb);
if (result)
goto unregister_blkdev;
+
+ result = pci_register_driver(&nvme_driver);
+ if (result)
+ goto unregister_hotcpu;
return 0;
+ unregister_hotcpu:
+ unregister_hotcpu_notifier(&nvme_nb);
unregister_blkdev:
unregister_blkdev(nvme_major, "nvme");
kill_workq:
@@ -2904,9 +2963,11 @@ static int __init nvme_init(void)
static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
+ unregister_hotcpu_notifier(&nvme_nb);
unregister_blkdev(nvme_major, "nvme");
destroy_workqueue(nvme_workq);
BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
+ _nvme_check_size();
}
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 2c3f5be06da1..a4cd6d691c63 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -1,6 +1,6 @@
/*
* NVM Express device driver
- * Copyright (c) 2011, Intel Corporation.
+ * Copyright (c) 2011-2014, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -10,10 +10,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/*
@@ -243,8 +239,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#define READ_CAP_16_RESP_SIZE 32
/* NVMe Namespace and Command Defines */
-#define NVME_GET_SMART_LOG_PAGE 0x02
-#define NVME_GET_FEAT_TEMP_THRESH 0x04
#define BYTES_TO_DWORDS 4
#define NVME_MAX_FIRMWARE_SLOT 7
@@ -686,6 +680,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
u8 resp_data_format = 0x02;
u8 protect;
u8 cmdque = 0x01 << 1;
+ u8 fw_offset = sizeof(dev->firmware_rev);
mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
&dma_addr, GFP_KERNEL);
@@ -721,7 +716,11 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
strncpy(&inq_response[8], "NVMe ", 8);
strncpy(&inq_response[16], dev->model, 16);
- strncpy(&inq_response[32], dev->firmware_rev, 4);
+
+ while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
+ fw_offset--;
+ fw_offset -= 4;
+ strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@@ -1018,8 +1017,8 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
c.common.opcode = nvme_admin_get_log_page;
c.common.nsid = cpu_to_le32(0xFFFFFFFF);
c.common.prp1 = cpu_to_le64(dma_addr);
- c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
- BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+ c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
+ BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
res = nvme_submit_admin_cmd(dev, &c, NULL);
if (res != NVME_SC_SUCCESS) {
temp_c = LOG_TEMP_UNKNOWN;
@@ -1086,8 +1085,8 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.common.opcode = nvme_admin_get_log_page;
c.common.nsid = cpu_to_le32(0xFFFFFFFF);
c.common.prp1 = cpu_to_le64(dma_addr);
- c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
- BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+ c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
+ BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
res = nvme_submit_admin_cmd(dev, &c, NULL);
if (res != NVME_SC_SUCCESS) {
temp_c_cur = LOG_TEMP_UNKNOWN;
@@ -1477,7 +1476,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
goto out_dma;
}
id_ctrl = mem;
- lowest_pow_st = id_ctrl->npss - 1;
+ lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
switch (pc) {
case NVME_POWER_STATE_START_VALID:
@@ -1494,20 +1493,19 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
break;
case NVME_POWER_STATE_IDLE:
/* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
- /* min of desired state and (lps-1) because lps is STOP */
if (pcmod == 0x0)
- ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1));
+ ps_desired = POWER_STATE_1;
else if (pcmod == 0x1)
- ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1));
+ ps_desired = POWER_STATE_2;
else if (pcmod == 0x2)
- ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1));
+ ps_desired = POWER_STATE_3;
break;
case NVME_POWER_STATE_STANDBY:
/* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
if (pcmod == 0x0)
- ps_desired = max(0, (lowest_pow_st - 2));
+ ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
else if (pcmod == 0x1)
- ps_desired = max(0, (lowest_pow_st - 1));
+ ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
break;
case NVME_POWER_STATE_LU_CONTROL:
default:
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ef166ad2dbad..758ac442c5b5 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -704,6 +704,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
WRITE : READ, __GFP_WAIT);
+ blk_rq_set_block_pc(rq);
if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
@@ -716,7 +717,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
rq->timeout = 60*HZ;
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
if (cgc->quiet)
rq->cmd_flags |= REQ_QUIET;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4c95b503b09e..b2c98c1bc037 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -541,7 +541,6 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
return -ENOENT;
(void) get_device(&rbd_dev->dev);
- set_device_ro(bdev, rbd_dev->mapping.read_only);
return 0;
}
@@ -559,10 +558,76 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
put_device(&rbd_dev->dev);
}
+static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
+{
+ int ret = 0;
+ int val;
+ bool ro;
+ bool ro_changed = false;
+
+ /* get_user() may sleep, so call it before taking rbd_dev->lock */
+ if (get_user(val, (int __user *)(arg)))
+ return -EFAULT;
+
+ ro = val ? true : false;
+ /* Snapshot doesn't allow to write*/
+ if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
+ return -EROFS;
+
+ spin_lock_irq(&rbd_dev->lock);
+ /* prevent others open this device */
+ if (rbd_dev->open_count > 1) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (rbd_dev->mapping.read_only != ro) {
+ rbd_dev->mapping.read_only = ro;
+ ro_changed = true;
+ }
+
+out:
+ spin_unlock_irq(&rbd_dev->lock);
+ /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
+ if (ret == 0 && ro_changed)
+ set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
+
+ return ret;
+}
+
+static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case BLKROSET:
+ ret = rbd_ioctl_set_ro(rbd_dev, arg);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return rbd_ioctl(bdev, mode, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
static const struct block_device_operations rbd_bd_ops = {
.owner = THIS_MODULE,
.open = rbd_open,
.release = rbd_release,
+ .ioctl = rbd_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = rbd_compat_ioctl,
+#endif
};
/*
@@ -1366,6 +1431,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
}
+static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
+{
+ struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
+
+ return obj_request->img_offset <
+ round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
+}
+
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p (was %d)\n", __func__, obj_request,
@@ -1382,6 +1455,13 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
kref_put(&obj_request->kref, rbd_obj_request_destroy);
}
+static void rbd_img_request_get(struct rbd_img_request *img_request)
+{
+ dout("%s: img %p (was %d)\n", __func__, img_request,
+ atomic_read(&img_request->kref.refcount));
+ kref_get(&img_request->kref);
+}
+
static bool img_request_child_test(struct rbd_img_request *img_request);
static void rbd_parent_request_destroy(struct kref *kref);
static void rbd_img_request_destroy(struct kref *kref);
@@ -2142,6 +2222,7 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
img_request->next_completion = which;
out:
spin_unlock_irq(&img_request->completion_lock);
+ rbd_img_request_put(img_request);
if (!more)
rbd_img_request_complete(img_request);
@@ -2242,6 +2323,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
goto out_unwind;
obj_request->osd_req = osd_req;
obj_request->callback = rbd_img_obj_callback;
+ rbd_img_request_get(img_request);
if (write_request) {
osd_req_op_alloc_hint_init(osd_req, which,
@@ -2674,7 +2756,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
*/
if (!img_request_write_test(img_request) ||
!img_request_layered_test(img_request) ||
- rbd_dev->parent_overlap <= obj_request->img_offset ||
+ !obj_request_overlaps_parent(obj_request) ||
((known = obj_request_known_test(obj_request)) &&
obj_request_exists_test(obj_request))) {
@@ -2872,56 +2954,55 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
}
/*
- * Request sync osd watch/unwatch. The value of "start" determines
- * whether a watch request is being initiated or torn down.
+ * Initiate a watch request, synchronously.
*/
-static int __rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
int ret;
- rbd_assert(start ^ !!rbd_dev->watch_event);
- rbd_assert(start ^ !!rbd_dev->watch_request);
+ rbd_assert(!rbd_dev->watch_event);
+ rbd_assert(!rbd_dev->watch_request);
- if (start) {
- ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
- &rbd_dev->watch_event);
- if (ret < 0)
- return ret;
- rbd_assert(rbd_dev->watch_event != NULL);
- }
+ ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
+ &rbd_dev->watch_event);
+ if (ret < 0)
+ return ret;
+
+ rbd_assert(rbd_dev->watch_event);
- ret = -ENOMEM;
obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
- OBJ_REQUEST_NODATA);
- if (!obj_request)
+ OBJ_REQUEST_NODATA);
+ if (!obj_request) {
+ ret = -ENOMEM;
goto out_cancel;
+ }
obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
obj_request);
- if (!obj_request->osd_req)
- goto out_cancel;
+ if (!obj_request->osd_req) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
- if (start)
- ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
- else
- ceph_osdc_unregister_linger_request(osdc,
- rbd_dev->watch_request->osd_req);
+ ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
- rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
+ rbd_dev->watch_event->cookie, 0, 1);
rbd_osd_req_format_write(obj_request);
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
- goto out_cancel;
+ goto out_linger;
+
ret = rbd_obj_request_wait(obj_request);
if (ret)
- goto out_cancel;
+ goto out_linger;
+
ret = obj_request->result;
if (ret)
- goto out_cancel;
+ goto out_linger;
/*
* A watch request is set to linger, so the underlying osd
@@ -2931,36 +3012,84 @@ static int __rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
* it. We'll drop that reference (below) after we've
* unregistered it.
*/
- if (start) {
- rbd_dev->watch_request = obj_request;
+ rbd_dev->watch_request = obj_request;
- return 0;
+ return 0;
+
+out_linger:
+ ceph_osdc_unregister_linger_request(osdc, obj_request->osd_req);
+out_put:
+ rbd_obj_request_put(obj_request);
+out_cancel:
+ ceph_osdc_cancel_event(rbd_dev->watch_event);
+ rbd_dev->watch_event = NULL;
+
+ return ret;
+}
+
+/*
+ * Tear down a watch request, synchronously.
+ */
+static int __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct rbd_obj_request *obj_request;
+ int ret;
+
+ rbd_assert(rbd_dev->watch_event);
+ rbd_assert(rbd_dev->watch_request);
+
+ obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
+ OBJ_REQUEST_NODATA);
+ if (!obj_request) {
+ ret = -ENOMEM;
+ goto out_cancel;
+ }
+
+ obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
+ obj_request);
+ if (!obj_request->osd_req) {
+ ret = -ENOMEM;
+ goto out_put;
}
+ osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
+ rbd_dev->watch_event->cookie, 0, 0);
+ rbd_osd_req_format_write(obj_request);
+
+ ret = rbd_obj_request_submit(osdc, obj_request);
+ if (ret)
+ goto out_put;
+
+ ret = rbd_obj_request_wait(obj_request);
+ if (ret)
+ goto out_put;
+
+ ret = obj_request->result;
+ if (ret)
+ goto out_put;
+
/* We have successfully torn down the watch request */
+ ceph_osdc_unregister_linger_request(osdc,
+ rbd_dev->watch_request->osd_req);
rbd_obj_request_put(rbd_dev->watch_request);
rbd_dev->watch_request = NULL;
+
+out_put:
+ rbd_obj_request_put(obj_request);
out_cancel:
- /* Cancel the event if we're tearing down, or on error */
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
- if (obj_request)
- rbd_obj_request_put(obj_request);
return ret;
}
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
-{
- return __rbd_dev_header_watch_sync(rbd_dev, true);
-}
-
static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
{
int ret;
- ret = __rbd_dev_header_watch_sync(rbd_dev, false);
+ ret = __rbd_dev_header_unwatch_sync(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
ret);
@@ -3058,7 +3187,6 @@ static void rbd_request_fn(struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock)
{
struct rbd_device *rbd_dev = q->queuedata;
- bool read_only = rbd_dev->mapping.read_only;
struct request *rq;
int result;
@@ -3094,7 +3222,7 @@ static void rbd_request_fn(struct request_queue *q)
if (write_request) {
result = -EROFS;
- if (read_only)
+ if (rbd_dev->mapping.read_only)
goto end_request;
rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
}
@@ -4683,6 +4811,38 @@ out_err:
}
/*
+ * Return pool id (>= 0) or a negative error code.
+ */
+static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
+{
+ u64 newest_epoch;
+ unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
+ int tries = 0;
+ int ret;
+
+again:
+ ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
+ if (ret == -ENOENT && tries++ < 1) {
+ ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
+ &newest_epoch);
+ if (ret < 0)
+ return ret;
+
+ if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
+ ceph_monc_request_next_osdmap(&rbdc->client->monc);
+ (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
+ newest_epoch, timeout);
+ goto again;
+ } else {
+ /* the osdmap we have is new enough */
+ return -ENOENT;
+ }
+ }
+
+ return ret;
+}
+
+/*
* An rbd format 2 image has a unique identifier, distinct from the
* name given to it by the user. Internally, that identifier is
* what's used to specify the names of objects related to the image.
@@ -4752,7 +4912,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
image_id = ceph_extract_encoded_string(&p, p + ret,
NULL, GFP_NOIO);
- ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
+ ret = PTR_ERR_OR_ZERO(image_id);
if (!ret)
rbd_dev->image_format = 2;
} else {
@@ -4907,6 +5067,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
if (ret)
goto err_out_disk;
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
+ set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
ret = rbd_bus_add_dev(rbd_dev);
if (ret)
@@ -5053,7 +5214,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
struct rbd_options *rbd_opts = NULL;
struct rbd_spec *spec = NULL;
struct rbd_client *rbdc;
- struct ceph_osd_client *osdc;
bool read_only;
int rc = -ENOMEM;
@@ -5075,8 +5235,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
}
/* pick the pool */
- osdc = &rbdc->client->osdc;
- rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
+ rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
if (rc < 0)
goto err_out_client;
spec->pool_id = (u64)rc;
@@ -5387,6 +5546,7 @@ err_out_slab:
static void __exit rbd_exit(void)
{
+ ida_destroy(&rbd_dev_id_ida);
rbd_sysfs_cleanup();
if (single_major)
unregister_blkdev(rbd_major, RBD_DRV_NAME);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 48eccb350180..089e72cd37be 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,8 +622,10 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- if (reset_capacity)
+ if (reset_capacity) {
set_capacity(zram->disk, 0);
+ revalidate_disk(zram->disk);
+ }
up_write(&zram->init_lock);
}
@@ -664,6 +666,7 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ revalidate_disk(zram->disk);
up_write(&zram->init_lock);
return len;
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a83b57e57b63..f98380648cb3 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -193,9 +193,10 @@ static int ath3k_load_firmware(struct usb_device *udev,
sent += 20;
count -= 20;
+ pipe = usb_sndbulkpipe(udev, 0x02);
+
while (count) {
size = min_t(uint, count, BULK_SIZE);
- pipe = usb_sndbulkpipe(udev, 0x02);
memcpy(send_buf, firmware->data + sent, size);
err = usb_bulk_msg(udev, pipe, send_buf, size,
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 7399303d7d99..dc79f88f8717 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -59,6 +59,8 @@ struct btmrvl_device {
};
struct btmrvl_adapter {
+ void *hw_regs_buf;
+ u8 *hw_regs;
u32 int_count;
struct sk_buff_head tx_queue;
u8 psmode;
@@ -140,7 +142,7 @@ void btmrvl_interrupt(struct btmrvl_private *priv);
bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
int btmrvl_enable_ps(struct btmrvl_private *priv);
int btmrvl_prepare_command(struct btmrvl_private *priv);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 2c4997ce2484..e9dbddb0b8f1 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -24,6 +24,7 @@
#include <net/bluetooth/hci_core.h>
#include "btmrvl_drv.h"
+#include "btmrvl_sdio.h"
#define VERSION "1.0"
@@ -201,7 +202,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
return 0;
}
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
{
int ret;
@@ -337,10 +338,25 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
static void btmrvl_init_adapter(struct btmrvl_private *priv)
{
+ int buf_size;
+
skb_queue_head_init(&priv->adapter->tx_queue);
priv->adapter->ps_state = PS_AWAKE;
+ buf_size = ALIGN_SZ(SDIO_BLOCK_SIZE, BTSDIO_DMA_ALIGN);
+ priv->adapter->hw_regs_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!priv->adapter->hw_regs_buf) {
+ priv->adapter->hw_regs = NULL;
+ BT_ERR("Unable to allocate buffer for hw_regs.");
+ } else {
+ priv->adapter->hw_regs =
+ (u8 *)ALIGN_ADDR(priv->adapter->hw_regs_buf,
+ BTSDIO_DMA_ALIGN);
+ BT_DBG("hw_regs_buf=%p hw_regs=%p",
+ priv->adapter->hw_regs_buf, priv->adapter->hw_regs);
+ }
+
init_waitqueue_head(&priv->adapter->cmd_wait_q);
}
@@ -348,6 +364,7 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
{
skb_queue_purge(&priv->adapter->tx_queue);
+ kfree(priv->adapter->hw_regs_buf);
kfree(priv->adapter);
priv->adapter = NULL;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 1b52c9f5230d..9dedca516ff5 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -64,6 +64,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_0 = 0x00,
.io_port_1 = 0x01,
.io_port_2 = 0x02,
+ .int_read_to_clear = false,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
@@ -80,6 +81,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.io_port_0 = 0x78,
.io_port_1 = 0x79,
.io_port_2 = 0x7a,
+ .int_read_to_clear = false,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
@@ -97,6 +99,9 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
.io_port_0 = 0xd8,
.io_port_1 = 0xd9,
.io_port_2 = 0xda,
+ .int_read_to_clear = true,
+ .host_int_rsr = 0x01,
+ .card_misc_cfg = 0xcc,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
@@ -667,46 +672,78 @@ static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv)
return 0;
}
-static void btmrvl_sdio_interrupt(struct sdio_func *func)
+static int btmrvl_sdio_read_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
{
- struct btmrvl_private *priv;
- struct btmrvl_sdio_card *card;
- ulong flags;
- u8 ireg = 0;
+ struct btmrvl_adapter *adapter = card->priv->adapter;
int ret;
- card = sdio_get_drvdata(func);
- if (!card || !card->priv) {
- BT_ERR("sbi_interrupt(%p) card or priv is "
- "NULL, card=%p\n", func, card);
- return;
+ ret = sdio_readsb(card->func, adapter->hw_regs, 0, SDIO_BLOCK_SIZE);
+ if (ret) {
+ BT_ERR("sdio_readsb: read int hw_regs failed: %d", ret);
+ return ret;
}
- priv = card->priv;
+ *ireg = adapter->hw_regs[card->reg->host_intstatus];
+ BT_DBG("hw_regs[%#x]=%#x", card->reg->host_intstatus, *ireg);
+
+ return 0;
+}
- ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
+static int btmrvl_sdio_write_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
+{
+ int ret;
+
+ *ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
if (ret) {
- BT_ERR("sdio_readb: read int status register failed");
- return;
+ BT_ERR("sdio_readb: read int status failed: %d", ret);
+ return ret;
}
- if (ireg != 0) {
+ if (*ireg) {
/*
* DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
* Clear the interrupt status register and re-enable the
* interrupt.
*/
- BT_DBG("ireg = 0x%x", ireg);
+ BT_DBG("int_status = 0x%x", *ireg);
- sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS |
- UP_LD_HOST_INT_STATUS),
- card->reg->host_intstatus, &ret);
+ sdio_writeb(card->func, ~(*ireg) & (DN_LD_HOST_INT_STATUS |
+ UP_LD_HOST_INT_STATUS),
+ card->reg->host_intstatus, &ret);
if (ret) {
- BT_ERR("sdio_writeb: clear int status register failed");
- return;
+ BT_ERR("sdio_writeb: clear int status failed: %d", ret);
+ return ret;
}
}
+ return 0;
+}
+
+static void btmrvl_sdio_interrupt(struct sdio_func *func)
+{
+ struct btmrvl_private *priv;
+ struct btmrvl_sdio_card *card;
+ ulong flags;
+ u8 ireg = 0;
+ int ret;
+
+ card = sdio_get_drvdata(func);
+ if (!card || !card->priv) {
+ BT_ERR("sbi_interrupt(%p) card or priv is "
+ "NULL, card=%p\n", func, card);
+ return;
+ }
+
+ priv = card->priv;
+
+ if (card->reg->int_read_to_clear)
+ ret = btmrvl_sdio_read_to_clear(card, &ireg);
+ else
+ ret = btmrvl_sdio_write_to_clear(card, &ireg);
+
+ if (ret)
+ return;
+
spin_lock_irqsave(&priv->driver_lock, flags);
sdio_ireg |= ireg;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -777,6 +814,30 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport);
+ if (card->reg->int_read_to_clear) {
+ reg = sdio_readb(func, card->reg->host_int_rsr, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+ sdio_writeb(func, reg | 0x3f, card->reg->host_int_rsr, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+
+ reg = sdio_readb(func, card->reg->card_misc_cfg, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+ sdio_writeb(func, reg | 0x10, card->reg->card_misc_cfg, &ret);
+ if (ret < 0) {
+ ret = -EIO;
+ goto release_irq;
+ }
+ }
+
sdio_set_drvdata(func, card);
sdio_release_host(func);
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 43d35a609ca9..d4dd3b0fa53d 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -78,6 +78,9 @@ struct btmrvl_sdio_card_reg {
u8 io_port_0;
u8 io_port_1;
u8 io_port_2;
+ bool int_read_to_clear;
+ u8 host_int_rsr;
+ u8 card_misc_cfg;
};
struct btmrvl_sdio_card {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a7dfbf9a3afb..a1c80b0c7663 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_WRONG_SCO_MTU 0x40
#define BTUSB_ATH3012 0x80
#define BTUSB_INTEL 0x100
+#define BTUSB_BCM_PATCHRAM 0x200
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -111,7 +112,8 @@ static const struct usb_device_id btusb_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
/* Broadcom devices with vendor specific id */
- { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
/* Belkin F8065bf - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
@@ -1381,6 +1383,154 @@ exit_mfg_deactivate:
return 0;
}
+static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct usb_device *udev = data->udev;
+ char fw_name[64];
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ const struct hci_command_hdr *cmd;
+ const u8 *cmd_param;
+ u16 opcode;
+ struct sk_buff *skb;
+ struct hci_rp_read_local_version *ver;
+ long ret;
+
+ snprintf(fw_name, sizeof(fw_name), "brcm/%s-%04x-%04x.hcd",
+ udev->product ? udev->product : "BCM",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+
+ ret = request_firmware(&fw, fw_name, &hdev->dev);
+ if (ret < 0) {
+ BT_INFO("%s: BCM: patch %s not found", hdev->name,
+ fw_name);
+ return 0;
+ }
+
+ /* Reset */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret);
+ goto done;
+ }
+ kfree_skb(skb);
+
+ /* Read Local Version Info */
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
+ hdev->name, ret);
+ goto done;
+ }
+
+ if (skb->len != sizeof(*ver)) {
+ BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
+ hdev->name);
+ kfree_skb(skb);
+ ret = -EIO;
+ goto done;
+ }
+
+ ver = (struct hci_rp_read_local_version *) skb->data;
+ BT_INFO("%s: BCM: patching hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
+ "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev,
+ ver->lmp_ver, ver->lmp_subver);
+ kfree_skb(skb);
+
+ /* Start Download */
+ skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ BT_ERR("%s: BCM: Download Minidrv command failed (%ld)",
+ hdev->name, ret);
+ goto reset_fw;
+ }
+ kfree_skb(skb);
+
+ /* 50 msec delay after Download Minidrv completes */
+ msleep(50);
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ while (fw_size >= sizeof(*cmd)) {
+ cmd = (struct hci_command_hdr *) fw_ptr;
+ fw_ptr += sizeof(*cmd);
+ fw_size -= sizeof(*cmd);
+
+ if (fw_size < cmd->plen) {
+ BT_ERR("%s: BCM: patch %s is corrupted",
+ hdev->name, fw_name);
+ ret = -EINVAL;
+ goto reset_fw;
+ }
+
+ cmd_param = fw_ptr;
+ fw_ptr += cmd->plen;
+ fw_size -= cmd->plen;
+
+ opcode = le16_to_cpu(cmd->opcode);
+
+ skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ BT_ERR("%s: BCM: patch command %04x failed (%ld)",
+ hdev->name, opcode, ret);
+ goto reset_fw;
+ }
+ kfree_skb(skb);
+ }
+
+ /* 250 msec delay after Launch Ram completes */
+ msleep(250);
+
+reset_fw:
+ /* Reset */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret);
+ goto done;
+ }
+ kfree_skb(skb);
+
+ /* Read Local Version Info */
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
+ hdev->name, ret);
+ goto done;
+ }
+
+ if (skb->len != sizeof(*ver)) {
+ BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
+ hdev->name);
+ kfree_skb(skb);
+ ret = -EIO;
+ goto done;
+ }
+
+ ver = (struct hci_rp_read_local_version *) skb->data;
+ BT_INFO("%s: BCM: firmware hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
+ "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev,
+ ver->lmp_ver, ver->lmp_subver);
+ kfree_skb(skb);
+
+done:
+ release_firmware(fw);
+
+ return ret;
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -1486,6 +1636,9 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
+ if (id->driver_info & BTUSB_BCM_PATCHRAM)
+ hdev->setup = btusb_setup_bcm_patchram;
+
if (id->driver_info & BTUSB_INTEL)
hdev->setup = btusb_setup_intel;
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 7048a583fe51..66db9a803373 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -55,13 +55,6 @@ struct h4_struct {
struct sk_buff_head txq;
};
-/* H4 receiver States */
-#define H4_W4_PACKET_TYPE 0
-#define H4_W4_EVENT_HDR 1
-#define H4_W4_ACL_HDR 2
-#define H4_W4_SCO_HDR 3
-#define H4_W4_DATA 4
-
/* Initialize protocol */
static int h4_open(struct hci_uart *hu)
{
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index a118ec1650fa..1f37d9870e7a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -45,7 +45,7 @@ config OMAP_INTERCONNECT
config ARM_CCI
bool "ARM CCI driver support"
- depends on ARM
+ depends on ARM && OF && CPU_V7
help
Driver supporting the CCI cache coherent interconnect for ARM
platforms.
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 2a44767891f5..898b84bba28a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2184,6 +2184,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
ret = -ENOMEM;
break;
}
+ blk_rq_set_block_pc(rq);
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret) {
@@ -2203,7 +2204,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
rq->cmd[9] = 0xf8;
rq->cmd_len = 12;
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = 60 * HZ;
bio = rq->bio;
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 2ce0e225e58c..f3e71501de54 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -25,88 +25,115 @@
#include <linux/virtio_rng.h>
#include <linux/module.h>
-static struct virtqueue *vq;
-static unsigned int data_avail;
-static DECLARE_COMPLETION(have_data);
-static bool busy;
+static DEFINE_IDA(rng_index_ida);
+
+struct virtrng_info {
+ struct virtio_device *vdev;
+ struct hwrng hwrng;
+ struct virtqueue *vq;
+ unsigned int data_avail;
+ struct completion have_data;
+ bool busy;
+ char name[25];
+ int index;
+};
static void random_recv_done(struct virtqueue *vq)
{
+ struct virtrng_info *vi = vq->vdev->priv;
+
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!virtqueue_get_buf(vq, &data_avail))
+ if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
return;
- complete(&have_data);
+ complete(&vi->have_data);
}
/* The host will fill any buffer we give it with sweet, sweet randomness. */
-static void register_buffer(u8 *buf, size_t size)
+static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size)
{
struct scatterlist sg;
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL);
+ virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL);
- virtqueue_kick(vq);
+ virtqueue_kick(vi->vq);
}
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
{
int ret;
+ struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
- if (!busy) {
- busy = true;
- init_completion(&have_data);
- register_buffer(buf, size);
+ if (!vi->busy) {
+ vi->busy = true;
+ init_completion(&vi->have_data);
+ register_buffer(vi, buf, size);
}
if (!wait)
return 0;
- ret = wait_for_completion_killable(&have_data);
+ ret = wait_for_completion_killable(&vi->have_data);
if (ret < 0)
return ret;
- busy = false;
+ vi->busy = false;
- return data_avail;
+ return vi->data_avail;
}
static void virtio_cleanup(struct hwrng *rng)
{
- if (busy)
- wait_for_completion(&have_data);
-}
+ struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
-
-static struct hwrng virtio_hwrng = {
- .name = "virtio",
- .cleanup = virtio_cleanup,
- .read = virtio_read,
-};
+ if (vi->busy)
+ wait_for_completion(&vi->have_data);
+}
static int probe_common(struct virtio_device *vdev)
{
- int err;
+ int err, index;
+ struct virtrng_info *vi = NULL;
+
+ vi = kzalloc(sizeof(struct virtrng_info), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
- if (vq) {
- /* We only support one device for now */
- return -EBUSY;
+ vi->index = index = ida_simple_get(&rng_index_ida, 0, 0, GFP_KERNEL);
+ if (index < 0) {
+ kfree(vi);
+ return index;
}
+ sprintf(vi->name, "virtio_rng.%d", index);
+ init_completion(&vi->have_data);
+
+ vi->hwrng = (struct hwrng) {
+ .read = virtio_read,
+ .cleanup = virtio_cleanup,
+ .priv = (unsigned long)vi,
+ .name = vi->name,
+ };
+ vdev->priv = vi;
+
/* We expect a single virtqueue. */
- vq = virtio_find_single_vq(vdev, random_recv_done, "input");
- if (IS_ERR(vq)) {
- err = PTR_ERR(vq);
- vq = NULL;
+ vi->vq = virtio_find_single_vq(vdev, random_recv_done, "input");
+ if (IS_ERR(vi->vq)) {
+ err = PTR_ERR(vi->vq);
+ vi->vq = NULL;
+ kfree(vi);
+ ida_simple_remove(&rng_index_ida, index);
return err;
}
- err = hwrng_register(&virtio_hwrng);
+ err = hwrng_register(&vi->hwrng);
if (err) {
vdev->config->del_vqs(vdev);
- vq = NULL;
+ vi->vq = NULL;
+ kfree(vi);
+ ida_simple_remove(&rng_index_ida, index);
return err;
}
@@ -115,11 +142,13 @@ static int probe_common(struct virtio_device *vdev)
static void remove_common(struct virtio_device *vdev)
{
+ struct virtrng_info *vi = vdev->priv;
vdev->config->reset(vdev);
- busy = false;
- hwrng_unregister(&virtio_hwrng);
+ vi->busy = false;
+ hwrng_unregister(&vi->hwrng);
vdev->config->del_vqs(vdev);
- vq = NULL;
+ ida_simple_remove(&rng_index_ida, vi->index);
+ kfree(vi);
}
static int virtrng_probe(struct virtio_device *vdev)
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index d915707d2ba1..93dcad0c1cbe 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs)
if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(old_mask, &current->cpus_allowed);
- set_cpus_allowed_ptr(current, cpumask_of(0));
+ rc = set_cpus_allowed_ptr(current, cpumask_of(0));
+ if (rc)
+ goto out;
if (smp_processor_id() != 0) {
rc = -EBUSY;
goto out;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4ad71ef2cd59..0a7ac0a7b252 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -980,7 +980,6 @@ static void push_to_pool(struct work_struct *work)
static size_t account(struct entropy_store *r, size_t nbytes, int min,
int reserved)
{
- int have_bytes;
int entropy_count, orig;
size_t ibytes;
@@ -989,17 +988,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
/* Can we pull enough? */
retry:
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
ibytes = nbytes;
/* If limited, never pull more than available */
- if (r->limit)
- ibytes = min_t(size_t, ibytes, have_bytes - reserved);
+ if (r->limit) {
+ int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+
+ if ((have_bytes -= reserved) < 0)
+ have_bytes = 0;
+ ibytes = min_t(size_t, ibytes, have_bytes);
+ }
if (ibytes < min)
ibytes = 0;
- if (have_bytes >= ibytes + reserved)
- entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
- else
- entropy_count = reserved << (ENTROPY_SHIFT + 3);
+ if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0)
+ entropy_count = 0;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 6e8d65e9b1d3..0102dc788608 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -284,10 +284,10 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
#endif
static const struct file_operations raw_fops = {
- .read = do_sync_read,
- .aio_read = generic_file_aio_read,
- .write = do_sync_write,
- .aio_write = blkdev_aio_write,
+ .read = new_sync_read,
+ .read_iter = generic_file_read_iter,
+ .write = new_sync_write,
+ .write_iter = blkdev_write_iter,
.fsync = blkdev_fsync,
.open = raw_open,
.release = raw_release,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 9b7b5859a420..3757e9e72d37 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -230,16 +230,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
goto err_reg;
}
- s2mps11_clk->lookup = devm_kzalloc(&pdev->dev,
- sizeof(struct clk_lookup), GFP_KERNEL);
+ s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk,
+ s2mps11_name(s2mps11_clk), NULL);
if (!s2mps11_clk->lookup) {
ret = -ENOMEM;
goto err_lup;
}
- s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
- s2mps11_clk->lookup->clk = s2mps11_clk->clk;
-
clkdev_add(s2mps11_clk->lookup);
}
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 12f3c0b64fcd..4c449b3170f6 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -1209,7 +1209,7 @@ static struct clk_branch rot_clk = {
static u8 mmcc_pxo_hdmi_map[] = {
[P_PXO] = 0,
- [P_HDMI_PLL] = 2,
+ [P_HDMI_PLL] = 3,
};
static const char *mmcc_pxo_hdmi[] = {
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 4f150c9dd38c..7f4a473a7ad7 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -925,21 +925,13 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
0, 0),
GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
- GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "div_pwm_isp",
- E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp_pre",
- E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp_pre",
- E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp",
- E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "sclk_pwm_isp",
+ GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "div_pwm_isp",
E4X12_GATE_IP_ISP, 0, 0, 0),
- GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "sclk_spi0_isp",
+ GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "div_spi0_isp_pre",
E4X12_GATE_IP_ISP, 1, 0, 0),
- GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "sclk_spi1_isp",
+ GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "div_spi1_isp_pre",
E4X12_GATE_IP_ISP, 2, 0, 0),
- GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "sclk_uart_isp",
+ GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "div_uart_isp",
E4X12_GATE_IP_ISP, 3, 0, 0),
GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 1fad4c5e3f5d..184f64293b26 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -661,7 +661,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
- GATE_IP_DISP1, 2, 0, 0),
+ GATE_IP_DISP1, 9, 0, 0),
GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
GATE_IP_DISP1, 8, 0, 0),
GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0),
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 9d7d7eed03fd..a4e6cc782e5c 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -631,7 +631,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
SRC_TOP4, 16, 1),
MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1),
MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1),
- MUX(0, "mout_user_aclk333", mout_user_aclk333_p, SRC_TOP4, 28, 1),
+ MUX(CLK_MOUT_USER_ACLK333, "mout_user_aclk333", mout_user_aclk333_p,
+ SRC_TOP4, 28, 1),
MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p,
SRC_TOP5, 0, 1),
@@ -684,7 +685,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
SRC_TOP11, 12, 1),
MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1),
MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1),
- MUX(0, "mout_sw_aclk333", mout_sw_aclk333_p, SRC_TOP11, 28, 1),
+ MUX(CLK_MOUT_SW_ACLK333, "mout_sw_aclk333", mout_sw_aclk333_p,
+ SRC_TOP11, 28, 1),
MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p,
SRC_TOP12, 4, 1),
@@ -890,8 +892,6 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
- GATE(CLK_ACLK66_PERIC, "aclk66_peric", "mout_user_aclk66_peric",
- GATE_BUS_TOP, 11, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
GATE_BUS_TOP, 13, 0, 0),
GATE(0, "aclk166", "mout_user_aclk166",
@@ -994,34 +994,61 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
/* PERIC Block */
- GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_IP_PERIC, 0, 0, 0),
- GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_IP_PERIC, 1, 0, 0),
- GATE(CLK_UART2, "uart2", "aclk66_peric", GATE_IP_PERIC, 2, 0, 0),
- GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_IP_PERIC, 3, 0, 0),
- GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_IP_PERIC, 6, 0, 0),
- GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_IP_PERIC, 7, 0, 0),
- GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_IP_PERIC, 8, 0, 0),
- GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_IP_PERIC, 9, 0, 0),
- GATE(CLK_USI0, "usi0", "aclk66_peric", GATE_IP_PERIC, 10, 0, 0),
- GATE(CLK_USI1, "usi1", "aclk66_peric", GATE_IP_PERIC, 11, 0, 0),
- GATE(CLK_USI2, "usi2", "aclk66_peric", GATE_IP_PERIC, 12, 0, 0),
- GATE(CLK_USI3, "usi3", "aclk66_peric", GATE_IP_PERIC, 13, 0, 0),
- GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_IP_PERIC, 14, 0, 0),
- GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_IP_PERIC, 15, 0, 0),
- GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_IP_PERIC, 16, 0, 0),
- GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_IP_PERIC, 17, 0, 0),
- GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_IP_PERIC, 18, 0, 0),
- GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_IP_PERIC, 20, 0, 0),
- GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_IP_PERIC, 21, 0, 0),
- GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_IP_PERIC, 22, 0, 0),
- GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_IP_PERIC, 23, 0, 0),
- GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_IP_PERIC, 24, 0, 0),
- GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_IP_PERIC, 26, 0, 0),
- GATE(CLK_USI4, "usi4", "aclk66_peric", GATE_IP_PERIC, 28, 0, 0),
- GATE(CLK_USI5, "usi5", "aclk66_peric", GATE_IP_PERIC, 30, 0, 0),
- GATE(CLK_USI6, "usi6", "aclk66_peric", GATE_IP_PERIC, 31, 0, 0),
-
- GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
+ GATE(CLK_UART0, "uart0", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 0, 0, 0),
+ GATE(CLK_UART1, "uart1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 1, 0, 0),
+ GATE(CLK_UART2, "uart2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 2, 0, 0),
+ GATE(CLK_UART3, "uart3", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 3, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 6, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 7, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 8, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 9, 0, 0),
+ GATE(CLK_USI0, "usi0", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 10, 0, 0),
+ GATE(CLK_USI1, "usi1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 11, 0, 0),
+ GATE(CLK_USI2, "usi2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 12, 0, 0),
+ GATE(CLK_USI3, "usi3", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 13, 0, 0),
+ GATE(CLK_I2C_HDMI, "i2c_hdmi", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 14, 0, 0),
+ GATE(CLK_TSADC, "tsadc", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 15, 0, 0),
+ GATE(CLK_SPI0, "spi0", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 16, 0, 0),
+ GATE(CLK_SPI1, "spi1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 17, 0, 0),
+ GATE(CLK_SPI2, "spi2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 18, 0, 0),
+ GATE(CLK_I2S1, "i2s1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 20, 0, 0),
+ GATE(CLK_I2S2, "i2s2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 21, 0, 0),
+ GATE(CLK_PCM1, "pcm1", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 22, 0, 0),
+ GATE(CLK_PCM2, "pcm2", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 23, 0, 0),
+ GATE(CLK_PWM, "pwm", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 24, 0, 0),
+ GATE(CLK_SPDIF, "spdif", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 26, 0, 0),
+ GATE(CLK_USI4, "usi4", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 28, 0, 0),
+ GATE(CLK_USI5, "usi5", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 30, 0, 0),
+ GATE(CLK_USI6, "usi6", "mout_user_aclk66_peric",
+ GATE_IP_PERIC, 31, 0, 0),
+
+ GATE(CLK_KEYIF, "keyif", "mout_user_aclk66_peric",
+ GATE_BUS_PERIC, 22, 0, 0),
/* PERIS Block */
GATE(CLK_CHIPID, "chipid", "aclk66_psgen",
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index ba0716801db2..140f4733c02e 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -152,6 +152,11 @@ struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
ALIAS(HCLK, NULL, "hclk"),
ALIAS(MPLL, NULL, "mpll"),
ALIAS(FCLK, NULL, "fclk"),
+ ALIAS(PCLK, NULL, "watchdog"),
+ ALIAS(PCLK_SDI, NULL, "sdi"),
+ ALIAS(HCLK_NAND, NULL, "nand"),
+ ALIAS(PCLK_I2S, NULL, "iis"),
+ ALIAS(PCLK_I2C, NULL, "i2c"),
};
/* S3C2410 specific clocks */
@@ -378,7 +383,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
if (!np)
s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
- if (current_soc == 2410) {
+ if (current_soc == S3C2410) {
if (_get_rate("xti") == 12 * MHZ) {
s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
@@ -432,7 +437,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor,
ARRAY_SIZE(s3c2410_ffactor));
samsung_clk_register_alias(ctx, s3c2410_aliases,
- ARRAY_SIZE(s3c2410_common_aliases));
+ ARRAY_SIZE(s3c2410_aliases));
break;
case S3C2440:
samsung_clk_register_mux(ctx, s3c2440_muxes,
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index efa16ee592c8..8889ff1c10fc 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -418,8 +418,10 @@ static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"),
ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"),
ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"),
- ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"),
- ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"),
+ ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi_busclk0"),
+ ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi_busclk2"),
+ ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi_busclk0"),
+ ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi_busclk2"),
ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"),
ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"),
ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"),
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index c2d204315546..bb5f387774e2 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { }
/* array of all spear 320 clock lookups */
#ifdef CONFIG_MACH_SPEAR320
-#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000)
+#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010)
#define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018)
#define SPEAR320_UARTX_PCLK_MASK 0x1
@@ -245,7 +245,8 @@ static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
"ras_syn0_gclk", };
static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
-static void __init spear320_clk_init(void __iomem *soc_config_base)
+static void __init spear320_clk_init(void __iomem *soc_config_base,
+ struct clk *ras_apb_clk)
{
struct clk *clk;
@@ -342,6 +343,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK,
0, &_lock);
clk_register_clkdev(clk, NULL, "a3000000.serial");
+ /* Enforce ras_apb_clk */
+ clk_set_parent(clk, ras_apb_clk);
clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
ARRAY_SIZE(uartx_parents),
@@ -349,6 +352,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a4000000.serial");
+ /* Enforce ras_apb_clk */
+ clk_set_parent(clk, ras_apb_clk);
clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
ARRAY_SIZE(uartx_parents),
@@ -379,12 +384,12 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
clk_register_clkdev(clk, NULL, "60100000.serial");
}
#else
-static inline void spear320_clk_init(void __iomem *soc_config_base) { }
+static inline void spear320_clk_init(void __iomem *sb, struct clk *rc) { }
#endif
void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
{
- struct clk *clk, *clk1;
+ struct clk *clk, *clk1, *ras_apb_clk;
clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
32000);
@@ -613,6 +618,7 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
RAS_APB_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, "ras_apb_clk", NULL);
+ ras_apb_clk = clk;
clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
@@ -659,5 +665,5 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
else if (of_machine_is_compatible("st,spear310"))
spear310_clk_init();
else if (of_machine_is_compatible("st,spear320"))
- spear320_clk_init(soc_config_base);
+ spear320_clk_init(soc_config_base, ras_apb_clk);
}
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index b5bac917612c..762fd64dbd1f 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -3,3 +3,7 @@
#
obj-y += clk-sunxi.o clk-factors.o
+obj-y += clk-a10-hosc.o
+obj-y += clk-a20-gmac.o
+
+obj-$(CONFIG_MFD_SUN6I_PRCM) += clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o
diff --git a/drivers/clk/sunxi/clk-a10-hosc.c b/drivers/clk/sunxi/clk-a10-hosc.c
new file mode 100644
index 000000000000..0481d5d673d6
--- /dev/null
+++ b/drivers/clk/sunxi/clk-a10-hosc.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2013 Emilio López
+ *
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define SUNXI_OSC24M_GATE 0
+
+static DEFINE_SPINLOCK(hosc_lock);
+
+static void __init sun4i_osc_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_fixed_rate *fixed;
+ struct clk_gate *gate;
+ const char *clk_name = node->name;
+ u32 rate;
+
+ if (of_property_read_u32(node, "clock-frequency", &rate))
+ return;
+
+ /* allocate fixed-rate and gate clock structs */
+ fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
+ if (!fixed)
+ return;
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate)
+ goto err_free_fixed;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ /* set up gate and fixed rate properties */
+ gate->reg = of_iomap(node, 0);
+ gate->bit_idx = SUNXI_OSC24M_GATE;
+ gate->lock = &hosc_lock;
+ fixed->fixed_rate = rate;
+
+ clk = clk_register_composite(NULL, clk_name,
+ NULL, 0,
+ NULL, NULL,
+ &fixed->hw, &clk_fixed_rate_ops,
+ &gate->hw, &clk_gate_ops,
+ CLK_IS_ROOT);
+
+ if (IS_ERR(clk))
+ goto err_free_gate;
+
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+
+ return;
+
+err_free_gate:
+ kfree(gate);
+err_free_fixed:
+ kfree(fixed);
+}
+CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-a10-osc-clk", sun4i_osc_clk_setup);
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
new file mode 100644
index 000000000000..633ddc4389ef
--- /dev/null
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2013 Emilio López
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * Copyright 2013 Chen-Yu Tsai
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+static DEFINE_SPINLOCK(gmac_lock);
+
+/**
+ * sun7i_a20_gmac_clk_setup - Setup function for A20/A31 GMAC clock module
+ *
+ * This clock looks something like this
+ * ________________________
+ * MII TX clock from PHY >-----|___________ _________|----> to GMAC core
+ * GMAC Int. RGMII TX clk >----|___________\__/__gate---|----> to PHY
+ * Ext. 125MHz RGMII TX clk >--|__divider__/ |
+ * |________________________|
+ *
+ * The external 125 MHz reference is optional, i.e. GMAC can use its
+ * internal TX clock just fine. The A31 GMAC clock module does not have
+ * the divider controls for the external reference.
+ *
+ * To keep it simple, let the GMAC use either the MII TX clock for MII mode,
+ * and its internal TX clock for GMII and RGMII modes. The GMAC driver should
+ * select the appropriate source and gate/ungate the output to the PHY.
+ *
+ * Only the GMAC should use this clock. Altering the clock so that it doesn't
+ * match the GMAC's operation parameters will result in the GMAC not being
+ * able to send traffic out. The GMAC driver should set the clock rate and
+ * enable/disable this clock to configure the required state. The clock
+ * driver then responds by auto-reparenting the clock.
+ */
+
+#define SUN7I_A20_GMAC_GPIT 2
+#define SUN7I_A20_GMAC_MASK 0x3
+#define SUN7I_A20_GMAC_PARENTS 2
+
+static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_mux *mux;
+ struct clk_gate *gate;
+ const char *clk_name = node->name;
+ const char *parents[SUN7I_A20_GMAC_PARENTS];
+ void *reg;
+
+ if (of_property_read_string(node, "clock-output-names", &clk_name))
+ return;
+
+ /* allocate mux and gate clock structs */
+ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+ if (!mux)
+ return;
+
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate)
+ goto free_mux;
+
+ /* gmac clock requires exactly 2 parents */
+ parents[0] = of_clk_get_parent_name(node, 0);
+ parents[1] = of_clk_get_parent_name(node, 1);
+ if (!parents[0] || !parents[1])
+ goto free_gate;
+
+ reg = of_iomap(node, 0);
+ if (!reg)
+ goto free_gate;
+
+ /* set up gate and fixed rate properties */
+ gate->reg = reg;
+ gate->bit_idx = SUN7I_A20_GMAC_GPIT;
+ gate->lock = &gmac_lock;
+ mux->reg = reg;
+ mux->mask = SUN7I_A20_GMAC_MASK;
+ mux->flags = CLK_MUX_INDEX_BIT;
+ mux->lock = &gmac_lock;
+
+ clk = clk_register_composite(NULL, clk_name,
+ parents, SUN7I_A20_GMAC_PARENTS,
+ &mux->hw, &clk_mux_ops,
+ NULL, NULL,
+ &gate->hw, &clk_gate_ops,
+ 0);
+
+ if (IS_ERR(clk))
+ goto iounmap_reg;
+
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+
+ return;
+
+iounmap_reg:
+ iounmap(reg);
+free_gate:
+ kfree(gate);
+free_mux:
+ kfree(mux);
+}
+CLK_OF_DECLARE(sun7i_a20_gmac, "allwinner,sun7i-a20-gmac-clk",
+ sun7i_a20_gmac_clk_setup);
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
new file mode 100644
index 000000000000..670f90d629d7
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * Allwinner A31 APB0 clock gates driver
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define SUN6I_APB0_GATES_MAX_SIZE 32
+
+static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk_onecell_data *clk_data;
+ const char *clk_parent;
+ const char *clk_name;
+ struct resource *r;
+ void __iomem *reg;
+ int gate_id;
+ int ngates;
+ int i;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ clk_parent = of_clk_get_parent_name(np, 0);
+ if (!clk_parent)
+ return -EINVAL;
+
+ ngates = of_property_count_strings(np, "clock-output-names");
+ if (ngates < 0)
+ return ngates;
+
+ if (!ngates || ngates > SUN6I_APB0_GATES_MAX_SIZE)
+ return -EINVAL;
+
+ clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->clks = devm_kzalloc(&pdev->dev,
+ SUN6I_APB0_GATES_MAX_SIZE *
+ sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!clk_data->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < ngates; i++) {
+ of_property_read_string_index(np, "clock-output-names",
+ i, &clk_name);
+
+ gate_id = i;
+ of_property_read_u32_index(np, "clock-indices", i, &gate_id);
+
+ WARN_ON(gate_id >= SUN6I_APB0_GATES_MAX_SIZE);
+ if (gate_id >= SUN6I_APB0_GATES_MAX_SIZE)
+ continue;
+
+ clk_data->clks[gate_id] = clk_register_gate(&pdev->dev,
+ clk_name,
+ clk_parent, 0,
+ reg, gate_id,
+ 0, NULL);
+ WARN_ON(IS_ERR(clk_data->clks[gate_id]));
+ }
+
+ clk_data->clk_num = ngates;
+
+ return of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+}
+
+const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = {
+ { .compatible = "allwinner,sun6i-a31-apb0-gates-clk" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver sun6i_a31_apb0_gates_clk_driver = {
+ .driver = {
+ .name = "sun6i-a31-apb0-gates-clk",
+ .owner = THIS_MODULE,
+ .of_match_table = sun6i_a31_apb0_gates_clk_dt_ids,
+ },
+ .probe = sun6i_a31_apb0_gates_clk_probe,
+};
+module_platform_driver(sun6i_a31_apb0_gates_clk_driver);
+
+MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 APB0 gate clocks driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0.c b/drivers/clk/sunxi/clk-sun6i-apb0.c
new file mode 100644
index 000000000000..11f17c34c2ae
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun6i-apb0.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * Allwinner A31 APB0 clock driver
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/*
+ * The APB0 clk has a configurable divisor.
+ *
+ * We must use a clk_div_table and not a regular power of 2
+ * divisor here, because the first 2 values divide the clock
+ * by 2.
+ */
+static const struct clk_div_table sun6i_a31_apb0_divs[] = {
+ { .val = 0, .div = 2, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 4, },
+ { .val = 3, .div = 8, },
+ { /* sentinel */ },
+};
+
+static int sun6i_a31_apb0_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const char *clk_name = np->name;
+ const char *clk_parent;
+ struct resource *r;
+ void __iomem *reg;
+ struct clk *clk;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ clk_parent = of_clk_get_parent_name(np, 0);
+ if (!clk_parent)
+ return -EINVAL;
+
+ of_property_read_string(np, "clock-output-names", &clk_name);
+
+ clk = clk_register_divider_table(&pdev->dev, clk_name, clk_parent,
+ 0, reg, 0, 2, 0, sun6i_a31_apb0_divs,
+ NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+const struct of_device_id sun6i_a31_apb0_clk_dt_ids[] = {
+ { .compatible = "allwinner,sun6i-a31-apb0-clk" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver sun6i_a31_apb0_clk_driver = {
+ .driver = {
+ .name = "sun6i-a31-apb0-clk",
+ .owner = THIS_MODULE,
+ .of_match_table = sun6i_a31_apb0_clk_dt_ids,
+ },
+ .probe = sun6i_a31_apb0_clk_probe,
+};
+module_platform_driver(sun6i_a31_apb0_clk_driver);
+
+MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 APB0 clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
new file mode 100644
index 000000000000..f73cc051f0dd
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * Allwinner A31 AR100 clock driver
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define SUN6I_AR100_MAX_PARENTS 4
+#define SUN6I_AR100_SHIFT_MASK 0x3
+#define SUN6I_AR100_SHIFT_MAX SUN6I_AR100_SHIFT_MASK
+#define SUN6I_AR100_SHIFT_SHIFT 4
+#define SUN6I_AR100_DIV_MASK 0x1f
+#define SUN6I_AR100_DIV_MAX (SUN6I_AR100_DIV_MASK + 1)
+#define SUN6I_AR100_DIV_SHIFT 8
+#define SUN6I_AR100_MUX_MASK 0x3
+#define SUN6I_AR100_MUX_SHIFT 16
+
+struct ar100_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+};
+
+static inline struct ar100_clk *to_ar100_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct ar100_clk, hw);
+}
+
+static unsigned long ar100_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ar100_clk *clk = to_ar100_clk(hw);
+ u32 val = readl(clk->reg);
+ int shift = (val >> SUN6I_AR100_SHIFT_SHIFT) & SUN6I_AR100_SHIFT_MASK;
+ int div = (val >> SUN6I_AR100_DIV_SHIFT) & SUN6I_AR100_DIV_MASK;
+
+ return (parent_rate >> shift) / (div + 1);
+}
+
+static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_clk)
+{
+ int nparents = __clk_get_num_parents(hw->clk);
+ long best_rate = -EINVAL;
+ int i;
+
+ *best_parent_clk = NULL;
+
+ for (i = 0; i < nparents; i++) {
+ unsigned long parent_rate;
+ unsigned long tmp_rate;
+ struct clk *parent;
+ unsigned long div;
+ int shift;
+
+ parent = clk_get_parent_by_index(hw->clk, i);
+ parent_rate = __clk_get_rate(parent);
+ div = DIV_ROUND_UP(parent_rate, rate);
+
+ /*
+ * The AR100 clk contains 2 divisors:
+ * - one power of 2 divisor
+ * - one regular divisor
+ *
+ * First check if we can safely shift (or divide by a power
+ * of 2) without losing precision on the requested rate.
+ */
+ shift = ffs(div) - 1;
+ if (shift > SUN6I_AR100_SHIFT_MAX)
+ shift = SUN6I_AR100_SHIFT_MAX;
+
+ div >>= shift;
+
+ /*
+ * Then if the divisor is still bigger than what the HW
+ * actually supports, use a bigger shift (or power of 2
+ * divider) value and accept to lose some precision.
+ */
+ while (div > SUN6I_AR100_DIV_MAX) {
+ shift++;
+ div >>= 1;
+ if (shift > SUN6I_AR100_SHIFT_MAX)
+ break;
+ }
+
+ /*
+ * If the shift value (or power of 2 divider) is bigger
+ * than what the HW actually support, skip this parent.
+ */
+ if (shift > SUN6I_AR100_SHIFT_MAX)
+ continue;
+
+ tmp_rate = (parent_rate >> shift) / div;
+ if (!*best_parent_clk || tmp_rate > best_rate) {
+ *best_parent_clk = parent;
+ *best_parent_rate = parent_rate;
+ best_rate = tmp_rate;
+ }
+ }
+
+ return best_rate;
+}
+
+static int ar100_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ar100_clk *clk = to_ar100_clk(hw);
+ u32 val = readl(clk->reg);
+
+ if (index >= SUN6I_AR100_MAX_PARENTS)
+ return -EINVAL;
+
+ val &= ~(SUN6I_AR100_MUX_MASK << SUN6I_AR100_MUX_SHIFT);
+ val |= (index << SUN6I_AR100_MUX_SHIFT);
+ writel(val, clk->reg);
+
+ return 0;
+}
+
+static u8 ar100_get_parent(struct clk_hw *hw)
+{
+ struct ar100_clk *clk = to_ar100_clk(hw);
+ return (readl(clk->reg) >> SUN6I_AR100_MUX_SHIFT) &
+ SUN6I_AR100_MUX_MASK;
+}
+
+static int ar100_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ unsigned long div = parent_rate / rate;
+ struct ar100_clk *clk = to_ar100_clk(hw);
+ u32 val = readl(clk->reg);
+ int shift;
+
+ if (parent_rate % rate)
+ return -EINVAL;
+
+ shift = ffs(div) - 1;
+ if (shift > SUN6I_AR100_SHIFT_MAX)
+ shift = SUN6I_AR100_SHIFT_MAX;
+
+ div >>= shift;
+
+ if (div > SUN6I_AR100_DIV_MAX)
+ return -EINVAL;
+
+ val &= ~((SUN6I_AR100_SHIFT_MASK << SUN6I_AR100_SHIFT_SHIFT) |
+ (SUN6I_AR100_DIV_MASK << SUN6I_AR100_DIV_SHIFT));
+ val |= (shift << SUN6I_AR100_SHIFT_SHIFT) |
+ (div << SUN6I_AR100_DIV_SHIFT);
+ writel(val, clk->reg);
+
+ return 0;
+}
+
+struct clk_ops ar100_ops = {
+ .recalc_rate = ar100_recalc_rate,
+ .determine_rate = ar100_determine_rate,
+ .set_parent = ar100_set_parent,
+ .get_parent = ar100_get_parent,
+ .set_rate = ar100_set_rate,
+};
+
+static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
+{
+ const char *parents[SUN6I_AR100_MAX_PARENTS];
+ struct device_node *np = pdev->dev.of_node;
+ const char *clk_name = np->name;
+ struct clk_init_data init;
+ struct ar100_clk *ar100;
+ struct resource *r;
+ struct clk *clk;
+ int nparents;
+ int i;
+
+ ar100 = devm_kzalloc(&pdev->dev, sizeof(*ar100), GFP_KERNEL);
+ if (!ar100)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ar100->reg = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ar100->reg))
+ return PTR_ERR(ar100->reg);
+
+ nparents = of_clk_get_parent_count(np);
+ if (nparents > SUN6I_AR100_MAX_PARENTS)
+ nparents = SUN6I_AR100_MAX_PARENTS;
+
+ for (i = 0; i < nparents; i++)
+ parents[i] = of_clk_get_parent_name(np, i);
+
+ of_property_read_string(np, "clock-output-names", &clk_name);
+
+ init.name = clk_name;
+ init.ops = &ar100_ops;
+ init.parent_names = parents;
+ init.num_parents = nparents;
+ init.flags = 0;
+
+ ar100->hw.init = &init;
+
+ clk = clk_register(&pdev->dev, &ar100->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
+ { .compatible = "allwinner,sun6i-a31-ar100-clk" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver sun6i_a31_ar100_clk_driver = {
+ .driver = {
+ .name = "sun6i-a31-ar100-clk",
+ .owner = THIS_MODULE,
+ .of_match_table = sun6i_a31_ar100_clk_dt_ids,
+ },
+ .probe = sun6i_a31_ar100_clk_probe,
+};
+module_platform_driver(sun6i_a31_ar100_clk_driver);
+
+MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 AR100 clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 426483422d3d..fb2ce8440f0e 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -28,63 +28,6 @@ static DEFINE_SPINLOCK(clk_lock);
#define SUNXI_MAX_PARENTS 5
/**
- * sun4i_osc_clk_setup() - Setup function for gatable oscillator
- */
-
-#define SUNXI_OSC24M_GATE 0
-
-static void __init sun4i_osc_clk_setup(struct device_node *node)
-{
- struct clk *clk;
- struct clk_fixed_rate *fixed;
- struct clk_gate *gate;
- const char *clk_name = node->name;
- u32 rate;
-
- if (of_property_read_u32(node, "clock-frequency", &rate))
- return;
-
- /* allocate fixed-rate and gate clock structs */
- fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
- if (!fixed)
- return;
- gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
- if (!gate)
- goto err_free_fixed;
-
- of_property_read_string(node, "clock-output-names", &clk_name);
-
- /* set up gate and fixed rate properties */
- gate->reg = of_iomap(node, 0);
- gate->bit_idx = SUNXI_OSC24M_GATE;
- gate->lock = &clk_lock;
- fixed->fixed_rate = rate;
-
- clk = clk_register_composite(NULL, clk_name,
- NULL, 0,
- NULL, NULL,
- &fixed->hw, &clk_fixed_rate_ops,
- &gate->hw, &clk_gate_ops,
- CLK_IS_ROOT);
-
- if (IS_ERR(clk))
- goto err_free_gate;
-
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
-
- return;
-
-err_free_gate:
- kfree(gate);
-err_free_fixed:
- kfree(fixed);
-}
-CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-a10-osc-clk", sun4i_osc_clk_setup);
-
-
-
-/**
* sun4i_get_pll1_factors() - calculates n, k, m, p factors for PLL1
* PLL1 rate is calculated as follows
* rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
@@ -408,104 +351,6 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
*p = calcp;
}
-
-
-/**
- * sun7i_a20_gmac_clk_setup - Setup function for A20/A31 GMAC clock module
- *
- * This clock looks something like this
- * ________________________
- * MII TX clock from PHY >-----|___________ _________|----> to GMAC core
- * GMAC Int. RGMII TX clk >----|___________\__/__gate---|----> to PHY
- * Ext. 125MHz RGMII TX clk >--|__divider__/ |
- * |________________________|
- *
- * The external 125 MHz reference is optional, i.e. GMAC can use its
- * internal TX clock just fine. The A31 GMAC clock module does not have
- * the divider controls for the external reference.
- *
- * To keep it simple, let the GMAC use either the MII TX clock for MII mode,
- * and its internal TX clock for GMII and RGMII modes. The GMAC driver should
- * select the appropriate source and gate/ungate the output to the PHY.
- *
- * Only the GMAC should use this clock. Altering the clock so that it doesn't
- * match the GMAC's operation parameters will result in the GMAC not being
- * able to send traffic out. The GMAC driver should set the clock rate and
- * enable/disable this clock to configure the required state. The clock
- * driver then responds by auto-reparenting the clock.
- */
-
-#define SUN7I_A20_GMAC_GPIT 2
-#define SUN7I_A20_GMAC_MASK 0x3
-#define SUN7I_A20_GMAC_PARENTS 2
-
-static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
-{
- struct clk *clk;
- struct clk_mux *mux;
- struct clk_gate *gate;
- const char *clk_name = node->name;
- const char *parents[SUN7I_A20_GMAC_PARENTS];
- void *reg;
-
- if (of_property_read_string(node, "clock-output-names", &clk_name))
- return;
-
- /* allocate mux and gate clock structs */
- mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
- if (!mux)
- return;
-
- gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
- if (!gate)
- goto free_mux;
-
- /* gmac clock requires exactly 2 parents */
- parents[0] = of_clk_get_parent_name(node, 0);
- parents[1] = of_clk_get_parent_name(node, 1);
- if (!parents[0] || !parents[1])
- goto free_gate;
-
- reg = of_iomap(node, 0);
- if (!reg)
- goto free_gate;
-
- /* set up gate and fixed rate properties */
- gate->reg = reg;
- gate->bit_idx = SUN7I_A20_GMAC_GPIT;
- gate->lock = &clk_lock;
- mux->reg = reg;
- mux->mask = SUN7I_A20_GMAC_MASK;
- mux->flags = CLK_MUX_INDEX_BIT;
- mux->lock = &clk_lock;
-
- clk = clk_register_composite(NULL, clk_name,
- parents, SUN7I_A20_GMAC_PARENTS,
- &mux->hw, &clk_mux_ops,
- NULL, NULL,
- &gate->hw, &clk_gate_ops,
- 0);
-
- if (IS_ERR(clk))
- goto iounmap_reg;
-
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
-
- return;
-
-iounmap_reg:
- iounmap(reg);
-free_gate:
- kfree(gate);
-free_mux:
- kfree(mux);
-}
-CLK_OF_DECLARE(sun7i_a20_gmac, "allwinner,sun7i-a20-gmac-clk",
- sun7i_a20_gmac_clk_setup);
-
-
-
/**
* clk_sunxi_mmc_phase_control() - configures MMC clock phase control
*/
@@ -1009,6 +854,11 @@ static const struct gates_data sun5i_a13_usb_gates_data __initconst = {
.reset_mask = 0x03,
};
+static const struct gates_data sun6i_a31_usb_gates_data __initconst = {
+ .mask = { BIT(18) | BIT(17) | BIT(16) | BIT(10) | BIT(9) | BIT(8) },
+ .reset_mask = BIT(2) | BIT(1) | BIT(0),
+};
+
static void __init sunxi_gates_clk_setup(struct device_node *node,
struct gates_data *data)
{
@@ -1304,6 +1154,7 @@ static const struct of_device_id clk_gates_match[] __initconst = {
{.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
{.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
{.compatible = "allwinner,sun5i-a13-usb-clk", .data = &sun5i_a13_usb_gates_data,},
+ {.compatible = "allwinner,sun6i-a31-usb-clk", .data = &sun6i_a31_usb_gates_data,},
{}
};
@@ -1321,33 +1172,10 @@ static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_mat
}
}
-/**
- * System clock protection
- *
- * By enabling these critical clocks, we prevent their accidental gating
- * by the framework
- */
-static void __init sunxi_clock_protect(void)
+static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
{
- struct clk *clk;
-
- /* memory bus clock - sun5i+ */
- clk = clk_get(NULL, "mbus");
- if (!IS_ERR(clk)) {
- clk_prepare_enable(clk);
- clk_put(clk);
- }
-
- /* DDR clock - sun4i+ */
- clk = clk_get(NULL, "pll5_ddr");
- if (!IS_ERR(clk)) {
- clk_prepare_enable(clk);
- clk_put(clk);
- }
-}
+ unsigned int i;
-static void __init sunxi_init_clocks(struct device_node *np)
-{
/* Register factor clocks */
of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
@@ -1363,11 +1191,48 @@ static void __init sunxi_init_clocks(struct device_node *np)
/* Register gate clocks */
of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup);
- /* Enable core system clocks */
- sunxi_clock_protect();
+ /* Protect the clocks that needs to stay on */
+ for (i = 0; i < nclocks; i++) {
+ struct clk *clk = clk_get(NULL, clocks[i]);
+
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+ }
+}
+
+static const char *sun4i_a10_critical_clocks[] __initdata = {
+ "pll5_ddr",
+};
+
+static void __init sun4i_a10_init_clocks(struct device_node *node)
+{
+ sunxi_init_clocks(sun4i_a10_critical_clocks,
+ ARRAY_SIZE(sun4i_a10_critical_clocks));
+}
+CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks);
+
+static const char *sun5i_critical_clocks[] __initdata = {
+ "mbus",
+ "pll5_ddr",
+};
+
+static void __init sun5i_init_clocks(struct device_node *node)
+{
+ sunxi_init_clocks(sun5i_critical_clocks,
+ ARRAY_SIZE(sun5i_critical_clocks));
+}
+CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sun5i_init_clocks);
+CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sun5i_init_clocks);
+CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
+
+static const char *sun6i_critical_clocks[] __initdata = {
+ "cpu",
+ "ahb1_sdram",
+};
+
+static void __init sun6i_init_clocks(struct device_node *node)
+{
+ sunxi_init_clocks(sun6i_critical_clocks,
+ ARRAY_SIZE(sun6i_critical_clocks));
}
-CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sunxi_init_clocks);
-CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sunxi_init_clocks);
-CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sunxi_init_clocks);
-CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sunxi_init_clocks);
-CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sunxi_init_clocks);
+CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index 4319d4031aa3..ed4d0aaf8916 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -3,9 +3,11 @@ obj-y += clk.o autoidle.o clockdomain.o
clk-common = dpll.o composite.o divider.o gate.o \
fixed-factor.o mux.o apll.o
obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o
+obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o
obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o
obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o
obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o
-obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o
+obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \
+ clk-dra7-atl.o
obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o
endif
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index b986f61f5a77..72d97279eae1 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -77,13 +77,11 @@ static int dra7_apll_enable(struct clk_hw *hw)
if (i == MAX_APLL_WAIT_TRIES) {
pr_warn("clock: %s failed transition to '%s'\n",
clk_name, (state) ? "locked" : "bypassed");
- } else {
+ r = -EBUSY;
+ } else
pr_debug("clock: %s transition to '%s' in %d loops\n",
clk_name, (state) ? "locked" : "bypassed", i);
- r = 0;
- }
-
return r;
}
@@ -221,3 +219,184 @@ cleanup:
kfree(init);
}
CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
+
+#define OMAP2_EN_APLL_LOCKED 0x3
+#define OMAP2_EN_APLL_STOPPED 0x0
+
+static int omap2_apll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad = clk->dpll_data;
+ u32 v;
+
+ v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+ v &= ad->enable_mask;
+
+ v >>= __ffs(ad->enable_mask);
+
+ return v == OMAP2_EN_APLL_LOCKED ? 1 : 0;
+}
+
+static unsigned long omap2_apll_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
+ if (omap2_apll_is_enabled(hw))
+ return clk->fixed_rate;
+
+ return 0;
+}
+
+static int omap2_apll_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad = clk->dpll_data;
+ u32 v;
+ int i = 0;
+
+ v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, ad->control_reg);
+
+ while (1) {
+ v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
+ if (v & ad->idlest_mask)
+ break;
+ if (i > MAX_APLL_WAIT_TRIES)
+ break;
+ i++;
+ udelay(1);
+ }
+
+ if (i == MAX_APLL_WAIT_TRIES) {
+ pr_warn("%s failed to transition to locked\n",
+ __clk_get_name(clk->hw.clk));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void omap2_apll_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad = clk->dpll_data;
+ u32 v;
+
+ v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, ad->control_reg);
+}
+
+static struct clk_ops omap2_apll_ops = {
+ .enable = &omap2_apll_enable,
+ .disable = &omap2_apll_disable,
+ .is_enabled = &omap2_apll_is_enabled,
+ .recalc_rate = &omap2_apll_recalc,
+};
+
+static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val)
+{
+ struct dpll_data *ad = clk->dpll_data;
+ u32 v;
+
+ v = ti_clk_ll_ops->clk_readl(ad->autoidle_reg);
+ v &= ~ad->autoidle_mask;
+ v |= val << __ffs(ad->autoidle_mask);
+ ti_clk_ll_ops->clk_writel(v, ad->control_reg);
+}
+
+#define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3
+#define OMAP2_APLL_AUTOIDLE_DISABLE 0x0
+
+static void omap2_apll_allow_idle(struct clk_hw_omap *clk)
+{
+ omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP);
+}
+
+static void omap2_apll_deny_idle(struct clk_hw_omap *clk)
+{
+ omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE);
+}
+
+static struct clk_hw_omap_ops omap2_apll_hwops = {
+ .allow_idle = &omap2_apll_allow_idle,
+ .deny_idle = &omap2_apll_deny_idle,
+};
+
+static void __init of_omap2_apll_setup(struct device_node *node)
+{
+ struct dpll_data *ad = NULL;
+ struct clk_hw_omap *clk_hw = NULL;
+ struct clk_init_data *init = NULL;
+ struct clk *clk;
+ const char *parent_name;
+ u32 val;
+
+ ad = kzalloc(sizeof(*ad), GFP_KERNEL);
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+
+ if (!ad || !clk_hw || !init)
+ goto cleanup;
+
+ clk_hw->dpll_data = ad;
+ clk_hw->hw.init = init;
+ init->ops = &omap2_apll_ops;
+ init->name = node->name;
+ clk_hw->ops = &omap2_apll_hwops;
+
+ init->num_parents = of_clk_get_parent_count(node);
+ if (init->num_parents != 1) {
+ pr_err("%s must have one parent\n", node->name);
+ goto cleanup;
+ }
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ init->parent_names = &parent_name;
+
+ if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
+ pr_err("%s missing clock-frequency\n", node->name);
+ goto cleanup;
+ }
+ clk_hw->fixed_rate = val;
+
+ if (of_property_read_u32(node, "ti,bit-shift", &val)) {
+ pr_err("%s missing bit-shift\n", node->name);
+ goto cleanup;
+ }
+
+ clk_hw->enable_bit = val;
+ ad->enable_mask = 0x3 << val;
+ ad->autoidle_mask = 0x3 << val;
+
+ if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
+ pr_err("%s missing idlest-shift\n", node->name);
+ goto cleanup;
+ }
+
+ ad->idlest_mask = 1 << val;
+
+ ad->control_reg = ti_clk_get_reg_addr(node, 0);
+ ad->autoidle_reg = ti_clk_get_reg_addr(node, 1);
+ ad->idlest_reg = ti_clk_get_reg_addr(node, 2);
+
+ if (!ad->control_reg || !ad->autoidle_reg || !ad->idlest_reg)
+ goto cleanup;
+
+ clk = clk_register(NULL, &clk_hw->hw);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(init);
+ return;
+ }
+cleanup:
+ kfree(ad);
+ kfree(clk_hw);
+ kfree(init);
+}
+CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock",
+ of_omap2_apll_setup);
diff --git a/drivers/clk/ti/clk-2xxx.c b/drivers/clk/ti/clk-2xxx.c
new file mode 100644
index 000000000000..c808ab3d2bb2
--- /dev/null
+++ b/drivers/clk/ti/clk-2xxx.c
@@ -0,0 +1,256 @@
+/*
+ * OMAP2 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk omap2xxx_clks[] = {
+ DT_CLK(NULL, "func_32k_ck", "func_32k_ck"),
+ DT_CLK(NULL, "secure_32k_ck", "secure_32k_ck"),
+ DT_CLK(NULL, "virt_12m_ck", "virt_12m_ck"),
+ DT_CLK(NULL, "virt_13m_ck", "virt_13m_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_26m_ck", "virt_26m_ck"),
+ DT_CLK(NULL, "aplls_clkin_ck", "aplls_clkin_ck"),
+ DT_CLK(NULL, "aplls_clkin_x2_ck", "aplls_clkin_x2_ck"),
+ DT_CLK(NULL, "osc_ck", "osc_ck"),
+ DT_CLK(NULL, "sys_ck", "sys_ck"),
+ DT_CLK(NULL, "alt_ck", "alt_ck"),
+ DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"),
+ DT_CLK(NULL, "dpll_ck", "dpll_ck"),
+ DT_CLK(NULL, "apll96_ck", "apll96_ck"),
+ DT_CLK(NULL, "apll54_ck", "apll54_ck"),
+ DT_CLK(NULL, "func_54m_ck", "func_54m_ck"),
+ DT_CLK(NULL, "core_ck", "core_ck"),
+ DT_CLK(NULL, "func_96m_ck", "func_96m_ck"),
+ DT_CLK(NULL, "func_48m_ck", "func_48m_ck"),
+ DT_CLK(NULL, "func_12m_ck", "func_12m_ck"),
+ DT_CLK(NULL, "sys_clkout_src", "sys_clkout_src"),
+ DT_CLK(NULL, "sys_clkout", "sys_clkout"),
+ DT_CLK(NULL, "emul_ck", "emul_ck"),
+ DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+ DT_CLK(NULL, "dsp_fck", "dsp_fck"),
+ DT_CLK(NULL, "gfx_3d_fck", "gfx_3d_fck"),
+ DT_CLK(NULL, "gfx_2d_fck", "gfx_2d_fck"),
+ DT_CLK(NULL, "gfx_ick", "gfx_ick"),
+ DT_CLK("omapdss_dss", "ick", "dss_ick"),
+ DT_CLK(NULL, "dss_ick", "dss_ick"),
+ DT_CLK(NULL, "dss1_fck", "dss1_fck"),
+ DT_CLK(NULL, "dss2_fck", "dss2_fck"),
+ DT_CLK(NULL, "dss_54m_fck", "dss_54m_fck"),
+ DT_CLK(NULL, "core_l3_ck", "core_l3_ck"),
+ DT_CLK(NULL, "ssi_fck", "ssi_ssr_sst_fck"),
+ DT_CLK(NULL, "usb_l4_ick", "usb_l4_ick"),
+ DT_CLK(NULL, "l4_ck", "l4_ck"),
+ DT_CLK(NULL, "ssi_l4_ick", "ssi_l4_ick"),
+ DT_CLK(NULL, "gpt1_ick", "gpt1_ick"),
+ DT_CLK(NULL, "gpt1_fck", "gpt1_fck"),
+ DT_CLK(NULL, "gpt2_ick", "gpt2_ick"),
+ DT_CLK(NULL, "gpt2_fck", "gpt2_fck"),
+ DT_CLK(NULL, "gpt3_ick", "gpt3_ick"),
+ DT_CLK(NULL, "gpt3_fck", "gpt3_fck"),
+ DT_CLK(NULL, "gpt4_ick", "gpt4_ick"),
+ DT_CLK(NULL, "gpt4_fck", "gpt4_fck"),
+ DT_CLK(NULL, "gpt5_ick", "gpt5_ick"),
+ DT_CLK(NULL, "gpt5_fck", "gpt5_fck"),
+ DT_CLK(NULL, "gpt6_ick", "gpt6_ick"),
+ DT_CLK(NULL, "gpt6_fck", "gpt6_fck"),
+ DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
+ DT_CLK(NULL, "gpt7_fck", "gpt7_fck"),
+ DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
+ DT_CLK(NULL, "gpt8_fck", "gpt8_fck"),
+ DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
+ DT_CLK(NULL, "gpt9_fck", "gpt9_fck"),
+ DT_CLK(NULL, "gpt10_ick", "gpt10_ick"),
+ DT_CLK(NULL, "gpt10_fck", "gpt10_fck"),
+ DT_CLK(NULL, "gpt11_ick", "gpt11_ick"),
+ DT_CLK(NULL, "gpt11_fck", "gpt11_fck"),
+ DT_CLK(NULL, "gpt12_ick", "gpt12_ick"),
+ DT_CLK(NULL, "gpt12_fck", "gpt12_fck"),
+ DT_CLK("omap-mcbsp.1", "ick", "mcbsp1_ick"),
+ DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"),
+ DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"),
+ DT_CLK("omap-mcbsp.2", "ick", "mcbsp2_ick"),
+ DT_CLK(NULL, "mcbsp2_ick", "mcbsp2_ick"),
+ DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"),
+ DT_CLK("omap2_mcspi.1", "ick", "mcspi1_ick"),
+ DT_CLK(NULL, "mcspi1_ick", "mcspi1_ick"),
+ DT_CLK(NULL, "mcspi1_fck", "mcspi1_fck"),
+ DT_CLK("omap2_mcspi.2", "ick", "mcspi2_ick"),
+ DT_CLK(NULL, "mcspi2_ick", "mcspi2_ick"),
+ DT_CLK(NULL, "mcspi2_fck", "mcspi2_fck"),
+ DT_CLK(NULL, "uart1_ick", "uart1_ick"),
+ DT_CLK(NULL, "uart1_fck", "uart1_fck"),
+ DT_CLK(NULL, "uart2_ick", "uart2_ick"),
+ DT_CLK(NULL, "uart2_fck", "uart2_fck"),
+ DT_CLK(NULL, "uart3_ick", "uart3_ick"),
+ DT_CLK(NULL, "uart3_fck", "uart3_fck"),
+ DT_CLK(NULL, "gpios_ick", "gpios_ick"),
+ DT_CLK(NULL, "gpios_fck", "gpios_fck"),
+ DT_CLK("omap_wdt", "ick", "mpu_wdt_ick"),
+ DT_CLK(NULL, "mpu_wdt_ick", "mpu_wdt_ick"),
+ DT_CLK(NULL, "mpu_wdt_fck", "mpu_wdt_fck"),
+ DT_CLK(NULL, "sync_32k_ick", "sync_32k_ick"),
+ DT_CLK(NULL, "wdt1_ick", "wdt1_ick"),
+ DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"),
+ DT_CLK("omap24xxcam", "fck", "cam_fck"),
+ DT_CLK(NULL, "cam_fck", "cam_fck"),
+ DT_CLK("omap24xxcam", "ick", "cam_ick"),
+ DT_CLK(NULL, "cam_ick", "cam_ick"),
+ DT_CLK(NULL, "mailboxes_ick", "mailboxes_ick"),
+ DT_CLK(NULL, "wdt4_ick", "wdt4_ick"),
+ DT_CLK(NULL, "wdt4_fck", "wdt4_fck"),
+ DT_CLK(NULL, "mspro_ick", "mspro_ick"),
+ DT_CLK(NULL, "mspro_fck", "mspro_fck"),
+ DT_CLK(NULL, "fac_ick", "fac_ick"),
+ DT_CLK(NULL, "fac_fck", "fac_fck"),
+ DT_CLK("omap_hdq.0", "ick", "hdq_ick"),
+ DT_CLK(NULL, "hdq_ick", "hdq_ick"),
+ DT_CLK("omap_hdq.0", "fck", "hdq_fck"),
+ DT_CLK(NULL, "hdq_fck", "hdq_fck"),
+ DT_CLK("omap_i2c.1", "ick", "i2c1_ick"),
+ DT_CLK(NULL, "i2c1_ick", "i2c1_ick"),
+ DT_CLK("omap_i2c.2", "ick", "i2c2_ick"),
+ DT_CLK(NULL, "i2c2_ick", "i2c2_ick"),
+ DT_CLK(NULL, "gpmc_fck", "gpmc_fck"),
+ DT_CLK(NULL, "sdma_fck", "sdma_fck"),
+ DT_CLK(NULL, "sdma_ick", "sdma_ick"),
+ DT_CLK(NULL, "sdrc_ick", "sdrc_ick"),
+ DT_CLK(NULL, "des_ick", "des_ick"),
+ DT_CLK("omap-sham", "ick", "sha_ick"),
+ DT_CLK(NULL, "sha_ick", "sha_ick"),
+ DT_CLK("omap_rng", "ick", "rng_ick"),
+ DT_CLK(NULL, "rng_ick", "rng_ick"),
+ DT_CLK("omap-aes", "ick", "aes_ick"),
+ DT_CLK(NULL, "aes_ick", "aes_ick"),
+ DT_CLK(NULL, "pka_ick", "pka_ick"),
+ DT_CLK(NULL, "usb_fck", "usb_fck"),
+ DT_CLK(NULL, "timer_32k_ck", "func_32k_ck"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_ck"),
+ DT_CLK(NULL, "timer_ext_ck", "alt_ck"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap2420_clks[] = {
+ DT_CLK(NULL, "sys_clkout2_src", "sys_clkout2_src"),
+ DT_CLK(NULL, "sys_clkout2", "sys_clkout2"),
+ DT_CLK(NULL, "dsp_ick", "dsp_ick"),
+ DT_CLK(NULL, "iva1_ifck", "iva1_ifck"),
+ DT_CLK(NULL, "iva1_mpu_int_ifck", "iva1_mpu_int_ifck"),
+ DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
+ DT_CLK(NULL, "wdt3_fck", "wdt3_fck"),
+ DT_CLK("mmci-omap.0", "ick", "mmc_ick"),
+ DT_CLK(NULL, "mmc_ick", "mmc_ick"),
+ DT_CLK("mmci-omap.0", "fck", "mmc_fck"),
+ DT_CLK(NULL, "mmc_fck", "mmc_fck"),
+ DT_CLK(NULL, "eac_ick", "eac_ick"),
+ DT_CLK(NULL, "eac_fck", "eac_fck"),
+ DT_CLK(NULL, "i2c1_fck", "i2c1_fck"),
+ DT_CLK(NULL, "i2c2_fck", "i2c2_fck"),
+ DT_CLK(NULL, "vlynq_ick", "vlynq_ick"),
+ DT_CLK(NULL, "vlynq_fck", "vlynq_fck"),
+ DT_CLK("musb-hdrc", "fck", "osc_ck"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap2430_clks[] = {
+ DT_CLK("twl", "fck", "osc_ck"),
+ DT_CLK(NULL, "iva2_1_ick", "iva2_1_ick"),
+ DT_CLK(NULL, "mdm_ick", "mdm_ick"),
+ DT_CLK(NULL, "mdm_osc_ck", "mdm_osc_ck"),
+ DT_CLK("omap-mcbsp.3", "ick", "mcbsp3_ick"),
+ DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"),
+ DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"),
+ DT_CLK("omap-mcbsp.4", "ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp4_ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"),
+ DT_CLK("omap-mcbsp.5", "ick", "mcbsp5_ick"),
+ DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"),
+ DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"),
+ DT_CLK("omap2_mcspi.3", "ick", "mcspi3_ick"),
+ DT_CLK(NULL, "mcspi3_ick", "mcspi3_ick"),
+ DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"),
+ DT_CLK(NULL, "icr_ick", "icr_ick"),
+ DT_CLK(NULL, "i2chs1_fck", "i2chs1_fck"),
+ DT_CLK(NULL, "i2chs2_fck", "i2chs2_fck"),
+ DT_CLK("musb-omap2430", "ick", "usbhs_ick"),
+ DT_CLK(NULL, "usbhs_ick", "usbhs_ick"),
+ DT_CLK("omap_hsmmc.0", "ick", "mmchs1_ick"),
+ DT_CLK(NULL, "mmchs1_ick", "mmchs1_ick"),
+ DT_CLK(NULL, "mmchs1_fck", "mmchs1_fck"),
+ DT_CLK("omap_hsmmc.1", "ick", "mmchs2_ick"),
+ DT_CLK(NULL, "mmchs2_ick", "mmchs2_ick"),
+ DT_CLK(NULL, "mmchs2_fck", "mmchs2_fck"),
+ DT_CLK(NULL, "gpio5_ick", "gpio5_ick"),
+ DT_CLK(NULL, "gpio5_fck", "gpio5_fck"),
+ DT_CLK(NULL, "mdm_intc_ick", "mdm_intc_ick"),
+ DT_CLK("omap_hsmmc.0", "mmchsdb_fck", "mmchsdb1_fck"),
+ DT_CLK(NULL, "mmchsdb1_fck", "mmchsdb1_fck"),
+ DT_CLK("omap_hsmmc.1", "mmchsdb_fck", "mmchsdb2_fck"),
+ DT_CLK(NULL, "mmchsdb2_fck", "mmchsdb2_fck"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ "apll96_ck",
+ "apll54_ck",
+ "sync_32k_ick",
+ "omapctrl_ick",
+ "gpmc_fck",
+ "sdrc_ick",
+};
+
+enum {
+ OMAP2_SOC_OMAP2420,
+ OMAP2_SOC_OMAP2430,
+};
+
+static int __init omap2xxx_dt_clk_init(int soc_type)
+{
+ ti_dt_clocks_register(omap2xxx_clks);
+
+ if (soc_type == OMAP2_SOC_OMAP2420)
+ ti_dt_clocks_register(omap2420_clks);
+ else
+ ti_dt_clocks_register(omap2430_clks);
+
+ omap2xxx_clkt_vps_init();
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n",
+ (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 100000) % 10,
+ (clk_get_rate(clk_get_sys(NULL, "dpll_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "mpu_ck")) / 1000000));
+
+ return 0;
+}
+
+int __init omap2420_dt_clk_init(void)
+{
+ return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2420);
+}
+
+int __init omap2430_dt_clk_init(void)
+{
+ return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2430);
+}
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 527a43da3d33..3795fce8a830 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -116,9 +116,25 @@ static struct ti_dt_clk am43xx_clks[] = {
int __init am43xx_dt_clk_init(void)
{
+ struct clk *clk1, *clk2;
+
ti_dt_clocks_register(am43xx_clks);
omap2_clk_disable_autoidle_all();
+ /*
+ * cpsw_cpts_rft_clk has got the choice of 3 clocksources
+ * dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
+ * By default dpll_core_m4_ck is selected, witn this as clock
+ * source the CPTS doesnot work properly. It gives clockcheck errors
+ * while running PTP.
+ * clockcheck: clock jumped backward or running slower than expected!
+ * By selecting dpll_core_m5_ck as the clocksource fixes this issue.
+ * In AM335x dpll_core_m5_ck is the default clocksource.
+ */
+ clk1 = clk_get_sys(NULL, "cpsw_cpts_rft_clk");
+ clk2 = clk_get_sys(NULL, "dpll_core_m5_ck");
+ clk_set_parent(clk1, clk2);
+
return 0;
}
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 08f3d1b915b3..5e183993e3ec 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -240,6 +240,12 @@ int __init omap5xxx_dt_clk_init(void)
if (rc)
pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+ abe_dpll = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
+ if (!rc)
+ rc = clk_set_rate(abe_dpll, OMAP5_DPLL_ABE_DEFFREQ * 2);
+ if (rc)
+ pr_err("%s: failed to configure ABE m2x2 DPLL!\n", __func__);
+
usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ);
if (rc)
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index f7e40734c819..e1581335937d 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -24,7 +24,7 @@ static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"),
DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"),
DT_CLK(NULL, "atl_clkin2_ck", "atl_clkin2_ck"),
- DT_CLK(NULL, "atlclkin3_ck", "atlclkin3_ck"),
+ DT_CLK(NULL, "atl_clkin3_ck", "atl_clkin3_ck"),
DT_CLK(NULL, "hdmi_clkin_ck", "hdmi_clkin_ck"),
DT_CLK(NULL, "mlb_clkin_ck", "mlb_clkin_ck"),
DT_CLK(NULL, "mlbp_clkin_ck", "mlbp_clkin_ck"),
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
new file mode 100644
index 000000000000..4a65b410e4d5
--- /dev/null
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -0,0 +1,312 @@
+/*
+ * DRA7 ATL (Audio Tracking Logic) clock driver
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define DRA7_ATL_INSTANCES 4
+
+#define DRA7_ATL_PPMR_REG(id) (0x200 + (id * 0x80))
+#define DRA7_ATL_BBSR_REG(id) (0x204 + (id * 0x80))
+#define DRA7_ATL_ATLCR_REG(id) (0x208 + (id * 0x80))
+#define DRA7_ATL_SWEN_REG(id) (0x210 + (id * 0x80))
+#define DRA7_ATL_BWSMUX_REG(id) (0x214 + (id * 0x80))
+#define DRA7_ATL_AWSMUX_REG(id) (0x218 + (id * 0x80))
+#define DRA7_ATL_PCLKMUX_REG(id) (0x21c + (id * 0x80))
+
+#define DRA7_ATL_SWEN BIT(0)
+#define DRA7_ATL_DIVIDER_MASK (0x1f)
+#define DRA7_ATL_PCLKMUX BIT(0)
+struct dra7_atl_clock_info;
+
+struct dra7_atl_desc {
+ struct clk *clk;
+ struct clk_hw hw;
+ struct dra7_atl_clock_info *cinfo;
+ int id;
+
+ bool probed; /* the driver for the IP has been loaded */
+ bool valid; /* configured */
+ bool enabled;
+ u32 bws; /* Baseband Word Select Mux */
+ u32 aws; /* Audio Word Select Mux */
+ u32 divider; /* Cached divider value */
+};
+
+struct dra7_atl_clock_info {
+ struct device *dev;
+ void __iomem *iobase;
+
+ struct dra7_atl_desc *cdesc;
+};
+
+#define to_atl_desc(_hw) container_of(_hw, struct dra7_atl_desc, hw)
+
+static inline void atl_write(struct dra7_atl_clock_info *cinfo, u32 reg,
+ u32 val)
+{
+ __raw_writel(val, cinfo->iobase + reg);
+}
+
+static inline int atl_read(struct dra7_atl_clock_info *cinfo, u32 reg)
+{
+ return __raw_readl(cinfo->iobase + reg);
+}
+
+static int atl_clk_enable(struct clk_hw *hw)
+{
+ struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+
+ if (!cdesc->probed)
+ goto out;
+
+ if (unlikely(!cdesc->valid))
+ dev_warn(cdesc->cinfo->dev, "atl%d has not been configured\n",
+ cdesc->id);
+ pm_runtime_get_sync(cdesc->cinfo->dev);
+
+ atl_write(cdesc->cinfo, DRA7_ATL_ATLCR_REG(cdesc->id),
+ cdesc->divider - 1);
+ atl_write(cdesc->cinfo, DRA7_ATL_SWEN_REG(cdesc->id), DRA7_ATL_SWEN);
+
+out:
+ cdesc->enabled = true;
+
+ return 0;
+}
+
+static void atl_clk_disable(struct clk_hw *hw)
+{
+ struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+
+ if (!cdesc->probed)
+ goto out;
+
+ atl_write(cdesc->cinfo, DRA7_ATL_SWEN_REG(cdesc->id), 0);
+ pm_runtime_put_sync(cdesc->cinfo->dev);
+
+out:
+ cdesc->enabled = false;
+}
+
+static int atl_clk_is_enabled(struct clk_hw *hw)
+{
+ struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+
+ return cdesc->enabled;
+}
+
+static unsigned long atl_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+
+ return parent_rate / cdesc->divider;
+}
+
+static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned divider;
+
+ divider = (*parent_rate + rate / 2) / rate;
+ if (divider > DRA7_ATL_DIVIDER_MASK + 1)
+ divider = DRA7_ATL_DIVIDER_MASK + 1;
+
+ return *parent_rate / divider;
+}
+
+static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+ u32 divider;
+
+ divider = ((parent_rate + rate / 2) / rate) - 1;
+ if (divider > DRA7_ATL_DIVIDER_MASK)
+ divider = DRA7_ATL_DIVIDER_MASK;
+
+ cdesc->divider = divider + 1;
+
+ return 0;
+}
+
+const struct clk_ops atl_clk_ops = {
+ .enable = atl_clk_enable,
+ .disable = atl_clk_disable,
+ .is_enabled = atl_clk_is_enabled,
+ .recalc_rate = atl_clk_recalc_rate,
+ .round_rate = atl_clk_round_rate,
+ .set_rate = atl_clk_set_rate,
+};
+
+static void __init of_dra7_atl_clock_setup(struct device_node *node)
+{
+ struct dra7_atl_desc *clk_hw = NULL;
+ struct clk_init_data init = { 0 };
+ const char **parent_names = NULL;
+ struct clk *clk;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw) {
+ pr_err("%s: could not allocate dra7_atl_desc\n", __func__);
+ return;
+ }
+
+ clk_hw->hw.init = &init;
+ clk_hw->divider = 1;
+ init.name = node->name;
+ init.ops = &atl_clk_ops;
+ init.flags = CLK_IGNORE_UNUSED;
+ init.num_parents = of_clk_get_parent_count(node);
+
+ if (init.num_parents != 1) {
+ pr_err("%s: atl clock %s must have 1 parent\n", __func__,
+ node->name);
+ goto cleanup;
+ }
+
+ parent_names = kzalloc(sizeof(char *), GFP_KERNEL);
+
+ if (!parent_names)
+ goto cleanup;
+
+ parent_names[0] = of_clk_get_parent_name(node, 0);
+
+ init.parent_names = parent_names;
+
+ clk = clk_register(NULL, &clk_hw->hw);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return;
+ }
+cleanup:
+ kfree(parent_names);
+ kfree(clk_hw);
+}
+CLK_OF_DECLARE(dra7_atl_clock, "ti,dra7-atl-clock", of_dra7_atl_clock_setup);
+
+static int of_dra7_atl_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct dra7_atl_clock_info *cinfo;
+ int i;
+ int ret = 0;
+
+ if (!node)
+ return -ENODEV;
+
+ cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ cinfo->iobase = of_iomap(node, 0);
+ cinfo->dev = &pdev->dev;
+ pm_runtime_enable(cinfo->dev);
+
+ pm_runtime_get_sync(cinfo->dev);
+ atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
+
+ for (i = 0; i < DRA7_ATL_INSTANCES; i++) {
+ struct device_node *cfg_node;
+ char prop[5];
+ struct dra7_atl_desc *cdesc;
+ struct of_phandle_args clkspec;
+ struct clk *clk;
+ int rc;
+
+ rc = of_parse_phandle_with_args(node, "ti,provided-clocks",
+ NULL, i, &clkspec);
+
+ if (rc) {
+ pr_err("%s: failed to lookup atl clock %d\n", __func__,
+ i);
+ return -EINVAL;
+ }
+
+ clk = of_clk_get_from_provider(&clkspec);
+
+ cdesc = to_atl_desc(__clk_get_hw(clk));
+ cdesc->cinfo = cinfo;
+ cdesc->id = i;
+
+ /* Get configuration for the ATL instances */
+ snprintf(prop, sizeof(prop), "atl%u", i);
+ cfg_node = of_find_node_by_name(node, prop);
+ if (cfg_node) {
+ ret = of_property_read_u32(cfg_node, "bws",
+ &cdesc->bws);
+ ret |= of_property_read_u32(cfg_node, "aws",
+ &cdesc->aws);
+ if (!ret) {
+ cdesc->valid = true;
+ atl_write(cinfo, DRA7_ATL_BWSMUX_REG(i),
+ cdesc->bws);
+ atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i),
+ cdesc->aws);
+ }
+ }
+
+ cdesc->probed = true;
+ /*
+ * Enable the clock if it has been asked prior to loading the
+ * hw driver
+ */
+ if (cdesc->enabled)
+ atl_clk_enable(__clk_get_hw(clk));
+ }
+ pm_runtime_put_sync(cinfo->dev);
+
+ return ret;
+}
+
+static int of_dra7_atl_clk_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct of_device_id of_dra7_atl_clk_match_tbl[] = {
+ { .compatible = "ti,dra7-atl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_dra7_atl_clk_match_tbl);
+
+static struct platform_driver dra7_atl_clk_driver = {
+ .driver = {
+ .name = "dra7-atl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_dra7_atl_clk_match_tbl,
+ },
+ .probe = of_dra7_atl_clk_probe,
+ .remove = of_dra7_atl_clk_remove,
+};
+
+module_platform_driver(dra7_atl_clk_driver);
+
+MODULE_DESCRIPTION("Clock driver for DRA7 Audio Tracking Logic");
+MODULE_ALIAS("platform:dra7-atl-clock");
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 7e498a44f97d..79791e1bf282 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -25,8 +25,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
-#define DPLL_HAS_AUTOIDLE 0x1
-
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
defined(CONFIG_SOC_DRA7XX)
static const struct clk_ops dpll_m4xen_ck_ops = {
@@ -37,21 +35,18 @@ static const struct clk_ops dpll_m4xen_ck_ops = {
.set_rate = &omap3_noncore_dpll_set_rate,
.get_parent = &omap2_init_dpll_parent,
};
+#else
+static const struct clk_ops dpll_m4xen_ck_ops = {};
#endif
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
+ defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
+ defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
static const struct clk_ops dpll_core_ck_ops = {
.recalc_rate = &omap3_dpll_recalc,
.get_parent = &omap2_init_dpll_parent,
};
-#ifdef CONFIG_ARCH_OMAP3
-static const struct clk_ops omap3_dpll_core_ck_ops = {
- .get_parent = &omap2_init_dpll_parent,
- .recalc_rate = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
-};
-#endif
-
static const struct clk_ops dpll_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
@@ -67,6 +62,33 @@ static const struct clk_ops dpll_no_gate_ck_ops = {
.round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
};
+#else
+static const struct clk_ops dpll_core_ck_ops = {};
+static const struct clk_ops dpll_ck_ops = {};
+static const struct clk_ops dpll_no_gate_ck_ops = {};
+const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2
+static const struct clk_ops omap2_dpll_core_ck_ops = {
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap2_dpllcore_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap2_reprogram_dpllcore,
+};
+#else
+static const struct clk_ops omap2_dpll_core_ck_ops = {};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static const struct clk_ops omap3_dpll_core_ck_ops = {
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+};
+#else
+static const struct clk_ops omap3_dpll_core_ck_ops = {};
+#endif
#ifdef CONFIG_ARCH_OMAP3
static const struct clk_ops omap3_dpll_ck_ops = {
@@ -139,7 +161,8 @@ cleanup:
}
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
- defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX)
+ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
+ defined(CONFIG_SOC_AM43XX)
/**
* ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
* @node: device node for this clock
@@ -193,14 +216,12 @@ static void ti_clk_register_dpll_x2(struct device_node *node,
* @node: device node containing the DPLL info
* @ops: ops for the DPLL
* @ddt: DPLL data template to use
- * @init_flags: flags for controlling init types
*
* Initializes a DPLL clock from device tree data.
*/
static void __init of_ti_dpll_setup(struct device_node *node,
const struct clk_ops *ops,
- const struct dpll_data *ddt,
- u8 init_flags)
+ const struct dpll_data *ddt)
{
struct clk_hw_omap *clk_hw = NULL;
struct clk_init_data *init = NULL;
@@ -241,13 +262,30 @@ static void __init of_ti_dpll_setup(struct device_node *node,
init->parent_names = parent_names;
dd->control_reg = ti_clk_get_reg_addr(node, 0);
- dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
- dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
- if (!dd->control_reg || !dd->idlest_reg || !dd->mult_div1_reg)
+ /*
+ * Special case for OMAP2 DPLL, register order is different due to
+ * missing idlest_reg, also clkhwops is different. Detected from
+ * missing idlest_mask.
+ */
+ if (!dd->idlest_mask) {
+ dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1);
+#ifdef CONFIG_ARCH_OMAP2
+ clk_hw->ops = &clkhwops_omap2xxx_dpll;
+ omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
+#endif
+ } else {
+ dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
+ if (!dd->idlest_reg)
+ goto cleanup;
+
+ dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
+ }
+
+ if (!dd->control_reg || !dd->mult_div1_reg)
goto cleanup;
- if (init_flags & DPLL_HAS_AUTOIDLE) {
+ if (dd->autoidle_mask) {
dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
if (!dd->autoidle_reg)
goto cleanup;
@@ -285,7 +323,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
of_ti_omap4_dpll_x2_setup);
#endif
-#ifdef CONFIG_SOC_AM33XX
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
{
ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
@@ -310,7 +348,7 @@ static void __init of_ti_omap3_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+ of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
of_ti_omap3_dpll_setup);
@@ -329,7 +367,7 @@ static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
.freqsel_mask = 0xf0,
};
- of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+ of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
of_ti_omap3_core_dpll_setup);
@@ -349,7 +387,7 @@ static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+ of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
of_ti_omap3_per_dpll_setup);
@@ -371,7 +409,7 @@ static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+ of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
of_ti_omap3_per_jtype_dpll_setup);
@@ -391,11 +429,32 @@ static void __init of_ti_omap4_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
of_ti_omap4_dpll_setup);
+static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .dcc_mask = BIT(22),
+ .dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
+}
+CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
+ of_ti_omap5_mpu_dpll_setup);
+
static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
{
const struct dpll_data dd = {
@@ -410,7 +469,7 @@ static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+ of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
of_ti_omap4_core_dpll_setup);
@@ -433,7 +492,7 @@ static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+ of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
of_ti_omap4_m4xen_dpll_setup);
@@ -454,7 +513,7 @@ static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+ of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
of_ti_omap4_jtype_dpll_setup);
@@ -465,7 +524,6 @@ static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
const struct dpll_data dd = {
.idlest_mask = 0x1,
.enable_mask = 0x7,
- .autoidle_mask = 0x7,
.mult_mask = 0x7ff << 8,
.div1_mask = 0x7f,
.max_multiplier = 2047,
@@ -474,7 +532,7 @@ static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0);
+ of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
of_ti_am3_no_gate_dpll_setup);
@@ -484,7 +542,6 @@ static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
const struct dpll_data dd = {
.idlest_mask = 0x1,
.enable_mask = 0x7,
- .autoidle_mask = 0x7,
.mult_mask = 0x7ff << 8,
.div1_mask = 0x7f,
.max_multiplier = 4095,
@@ -494,7 +551,7 @@ static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0);
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
of_ti_am3_jtype_dpll_setup);
@@ -504,7 +561,6 @@ static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
const struct dpll_data dd = {
.idlest_mask = 0x1,
.enable_mask = 0x7,
- .autoidle_mask = 0x7,
.mult_mask = 0x7ff << 8,
.div1_mask = 0x7f,
.max_multiplier = 2047,
@@ -514,7 +570,7 @@ static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0);
+ of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
"ti,am3-dpll-no-gate-j-type-clock",
@@ -525,7 +581,6 @@ static void __init of_ti_am3_dpll_setup(struct device_node *node)
const struct dpll_data dd = {
.idlest_mask = 0x1,
.enable_mask = 0x7,
- .autoidle_mask = 0x7,
.mult_mask = 0x7ff << 8,
.div1_mask = 0x7f,
.max_multiplier = 2047,
@@ -534,7 +589,7 @@ static void __init of_ti_am3_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0);
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
@@ -543,7 +598,6 @@ static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
const struct dpll_data dd = {
.idlest_mask = 0x1,
.enable_mask = 0x7,
- .autoidle_mask = 0x7,
.mult_mask = 0x7ff << 8,
.div1_mask = 0x7f,
.max_multiplier = 2047,
@@ -552,7 +606,22 @@ static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, 0);
+ of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
of_ti_am3_core_dpll_setup);
+
+static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .enable_mask = 0x3,
+ .mult_mask = 0x3ff << 12,
+ .div1_mask = 0xf << 8,
+ .max_divider = 16,
+ .min_divider = 1,
+ };
+
+ of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
+ of_ti_omap2_core_dpll_setup);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 58734817d502..b326d2797feb 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -185,7 +185,7 @@ of_ti_composite_no_wait_gate_clk_setup(struct device_node *node)
CLK_OF_DECLARE(ti_composite_no_wait_gate_clk, "ti,composite-no-wait-gate-clock",
of_ti_composite_no_wait_gate_clk_setup);
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
static void __init of_ti_composite_interface_clk_setup(struct device_node *node)
{
_of_ti_composite_gate_clk_setup(node, &clkhwops_iclk_wait);
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 320a2b168bb2..9c3e8c4aaa40 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -94,6 +94,7 @@ static void __init of_ti_no_wait_interface_clk_setup(struct device_node *node)
CLK_OF_DECLARE(ti_no_wait_interface_clk, "ti,omap3-no-wait-interface-clock",
of_ti_no_wait_interface_clk_setup);
+#ifdef CONFIG_ARCH_OMAP3
static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node)
{
_of_ti_interface_clk_setup(node,
@@ -123,3 +124,13 @@ static void __init of_ti_am35xx_interface_clk_setup(struct device_node *node)
}
CLK_OF_DECLARE(ti_am35xx_interface_clk, "ti,am35xx-interface-clock",
of_ti_am35xx_interface_clk_setup);
+#endif
+
+#ifdef CONFIG_SOC_OMAP2430
+static void __init of_ti_omap2430_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_omap2430_i2chs_wait);
+}
+CLK_OF_DECLARE(ti_omap2430_interface_clk, "ti,omap2430-interface-clock",
+ of_ti_omap2430_interface_clk_setup);
+#endif
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 0197a478720c..e9d650e51287 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -160,7 +160,7 @@ static void of_mux_clk_setup(struct device_node *node)
u8 clk_mux_flags = 0;
u32 mask = 0;
u32 shift = 0;
- u32 flags = 0;
+ u32 flags = CLK_SET_RATE_NO_REPARENT;
num_parents = of_clk_get_parent_count(node);
if (num_parents < 2) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 8d6420013a04..ab51bf20a3ed 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -153,19 +153,16 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
}
/* Clocksource handling */
-static void exynos4_mct_frc_start(u32 hi, u32 lo)
+static void exynos4_mct_frc_start(void)
{
u32 reg;
- exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
- exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
-
reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
reg |= MCT_G_TCON_START;
exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
}
-static cycle_t exynos4_frc_read(struct clocksource *cs)
+static cycle_t notrace _exynos4_frc_read(void)
{
unsigned int lo, hi;
u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
@@ -179,9 +176,14 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
return ((cycle_t)hi << 32) | lo;
}
+static cycle_t exynos4_frc_read(struct clocksource *cs)
+{
+ return _exynos4_frc_read();
+}
+
static void exynos4_frc_resume(struct clocksource *cs)
{
- exynos4_mct_frc_start(0, 0);
+ exynos4_mct_frc_start();
}
struct clocksource mct_frc = {
@@ -195,12 +197,23 @@ struct clocksource mct_frc = {
static u64 notrace exynos4_read_sched_clock(void)
{
- return exynos4_frc_read(&mct_frc);
+ return _exynos4_frc_read();
+}
+
+static struct delay_timer exynos4_delay_timer;
+
+static cycles_t exynos4_read_current_timer(void)
+{
+ return _exynos4_frc_read();
}
static void __init exynos4_clocksource_init(void)
{
- exynos4_mct_frc_start(0, 0);
+ exynos4_mct_frc_start();
+
+ exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
+ exynos4_delay_timer.freq = clk_rate;
+ register_current_timer_delay(&exynos4_delay_timer);
if (clocksource_register_hz(&mct_frc, clk_rate))
panic("%s: can't register clocksource\n", mct_frc.name);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 1fbe11f2a146..ffe350f86bca 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -185,7 +185,9 @@ config CPU_FREQ_GOV_CONSERVATIVE
config GENERIC_CPUFREQ_CPU0
tristate "Generic CPU0 cpufreq driver"
- depends on HAVE_CLK && REGULATOR && OF && THERMAL && CPU_THERMAL
+ depends on HAVE_CLK && OF
+ # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y:
+ depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This adds a generic cpufreq driver for CPU0 frequency management.
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 36d20d0fce27..ebac67115009 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,8 +5,7 @@
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on (BIG_LITTLE && ARM_CPU_TOPOLOGY) || (ARM64 && SMP)
- depends on HAVE_CLK
+ depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 738c8b7b17dc..db6d9a2fea4d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
-obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o
+obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 09b9129c7bd3..ee1ae303a07c 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -104,7 +104,7 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver cpu0_cpufreq_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cpu0_set_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ae11dd51f81d..62259d27f03e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1816,20 +1816,55 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
* GOVERNORS *
*********************************************************************/
+/* Must set freqs->new to intermediate frequency */
+static int __target_intermediate(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int index)
+{
+ int ret;
+
+ freqs->new = cpufreq_driver->get_intermediate(policy, index);
+
+ /* We don't need to switch to intermediate freq */
+ if (!freqs->new)
+ return 0;
+
+ pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
+ __func__, policy->cpu, freqs->old, freqs->new);
+
+ cpufreq_freq_transition_begin(policy, freqs);
+ ret = cpufreq_driver->target_intermediate(policy, index);
+ cpufreq_freq_transition_end(policy, freqs, ret);
+
+ if (ret)
+ pr_err("%s: Failed to change to intermediate frequency: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
static int __target_index(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *freq_table, int index)
{
- struct cpufreq_freqs freqs;
+ struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
+ unsigned int intermediate_freq = 0;
int retval = -EINVAL;
bool notify;
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
-
if (notify) {
- freqs.old = policy->cur;
- freqs.new = freq_table[index].frequency;
- freqs.flags = 0;
+ /* Handle switching to intermediate frequency */
+ if (cpufreq_driver->get_intermediate) {
+ retval = __target_intermediate(policy, &freqs, index);
+ if (retval)
+ return retval;
+
+ intermediate_freq = freqs.new;
+ /* Set old freq to intermediate */
+ if (intermediate_freq)
+ freqs.old = freqs.new;
+ }
+ freqs.new = freq_table[index].frequency;
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
__func__, policy->cpu, freqs.old, freqs.new);
@@ -1841,9 +1876,23 @@ static int __target_index(struct cpufreq_policy *policy,
pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
retval);
- if (notify)
+ if (notify) {
cpufreq_freq_transition_end(policy, &freqs, retval);
+ /*
+ * Failed after setting to intermediate freq? Driver should have
+ * reverted back to initial frequency and so should we. Check
+ * here for intermediate_freq instead of get_intermediate, in
+ * case we have't switched to intermediate freq at all.
+ */
+ if (unlikely(retval && intermediate_freq)) {
+ freqs.old = intermediate_freq;
+ freqs.new = policy->restore_freq;
+ cpufreq_freq_transition_begin(policy, &freqs);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ }
+ }
+
return retval;
}
@@ -1875,6 +1924,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (target_freq == policy->cur)
return 0;
+ /* Save last value to restore later on errors */
+ policy->restore_freq = policy->cur;
+
if (cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
else if (cpufreq_driver->target_index) {
@@ -2190,10 +2242,8 @@ int cpufreq_update_policy(unsigned int cpu)
struct cpufreq_policy new_policy;
int ret;
- if (!policy) {
- ret = -ENODEV;
- goto no_policy;
- }
+ if (!policy)
+ return -ENODEV;
down_write(&policy->rwsem);
@@ -2212,7 +2262,7 @@ int cpufreq_update_policy(unsigned int cpu)
new_policy.cur = cpufreq_driver->get(cpu);
if (WARN_ON(!new_policy.cur)) {
ret = -EIO;
- goto no_policy;
+ goto unlock;
}
if (!policy->cur) {
@@ -2227,10 +2277,10 @@ int cpufreq_update_policy(unsigned int cpu)
ret = cpufreq_set_policy(policy, &new_policy);
+unlock:
up_write(&policy->rwsem);
cpufreq_cpu_put(policy);
-no_policy:
return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
@@ -2361,7 +2411,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
!(driver_data->setpolicy || driver_data->target_index ||
driver_data->target) ||
(driver_data->setpolicy && (driver_data->target_index ||
- driver_data->target)))
+ driver_data->target)) ||
+ (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index e1c6433b16e0..1b44496b2d2b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -36,14 +36,29 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
struct cpufreq_policy *policy;
+ unsigned int sampling_rate;
unsigned int max_load = 0;
unsigned int ignore_nice;
unsigned int j;
- if (dbs_data->cdata->governor == GOV_ONDEMAND)
+ if (dbs_data->cdata->governor == GOV_ONDEMAND) {
+ struct od_cpu_dbs_info_s *od_dbs_info =
+ dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+
+ /*
+ * Sometimes, the ondemand governor uses an additional
+ * multiplier to give long delays. So apply this multiplier to
+ * the 'sampling_rate', so as to keep the wake-up-from-idle
+ * detection logic a bit conservative.
+ */
+ sampling_rate = od_tuners->sampling_rate;
+ sampling_rate *= od_dbs_info->rate_mult;
+
ignore_nice = od_tuners->ignore_nice_load;
- else
+ } else {
+ sampling_rate = cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice_load;
+ }
policy = cdbs->cur_policy;
@@ -96,7 +111,46 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
if (unlikely(!wall_time || wall_time < idle_time))
continue;
- load = 100 * (wall_time - idle_time) / wall_time;
+ /*
+ * If the CPU had gone completely idle, and a task just woke up
+ * on this CPU now, it would be unfair to calculate 'load' the
+ * usual way for this elapsed time-window, because it will show
+ * near-zero load, irrespective of how CPU intensive that task
+ * actually is. This is undesirable for latency-sensitive bursty
+ * workloads.
+ *
+ * To avoid this, we reuse the 'load' from the previous
+ * time-window and give this task a chance to start with a
+ * reasonably high CPU frequency. (However, we shouldn't over-do
+ * this copy, lest we get stuck at a high load (high frequency)
+ * for too long, even when the current system load has actually
+ * dropped down. So we perform the copy only once, upon the
+ * first wake-up from idle.)
+ *
+ * Detecting this situation is easy: the governor's deferrable
+ * timer would not have fired during CPU-idle periods. Hence
+ * an unusually large 'wall_time' (as compared to the sampling
+ * rate) indicates this scenario.
+ *
+ * prev_load can be zero in two cases and we must recalculate it
+ * for both cases:
+ * - during long idle intervals
+ * - explicitly set to zero
+ */
+ if (unlikely(wall_time > (2 * sampling_rate) &&
+ j_cdbs->prev_load)) {
+ load = j_cdbs->prev_load;
+
+ /*
+ * Perform a destructive copy, to ensure that we copy
+ * the previous load only once, upon the first wake-up
+ * from idle.
+ */
+ j_cdbs->prev_load = 0;
+ } else {
+ load = 100 * (wall_time - idle_time) / wall_time;
+ j_cdbs->prev_load = load;
+ }
if (load > max_load)
max_load = load;
@@ -318,11 +372,18 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs =
dbs_data->cdata->get_cpu_cdbs(j);
+ unsigned int prev_load;
j_cdbs->cpu = j;
j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
&j_cdbs->prev_cpu_wall, io_busy);
+
+ prev_load = (unsigned int)
+ (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
+ j_cdbs->prev_load = 100 * prev_load /
+ (unsigned int) j_cdbs->prev_cpu_wall;
+
if (ignore_nice)
j_cdbs->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index bfb9ae14142c..cc401d147e72 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -134,6 +134,13 @@ struct cpu_dbs_common_info {
u64 prev_cpu_idle;
u64 prev_cpu_wall;
u64 prev_cpu_nice;
+ /*
+ * Used to keep track of load in the previous interval. However, when
+ * explicitly set to zero, it is used as a flag to ensure that we copy
+ * the previous load to the current interval only once, upon the first
+ * wake-up from idle.
+ */
+ unsigned int prev_load;
struct cpufreq_policy *cur_policy;
struct delayed_work work;
/*
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index aebd4572eb6d..86631cb6f7de 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -128,6 +128,7 @@ static struct pstate_funcs pstate_funcs;
struct perf_limits {
int no_turbo;
+ int turbo_disabled;
int max_perf_pct;
int min_perf_pct;
int32_t max_perf;
@@ -196,10 +197,7 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
pid->last_err = fp_error;
result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
- if (result >= 0)
- result = result + (1 << (FRAC_BITS-1));
- else
- result = result - (1 << (FRAC_BITS-1));
+ result = result + (1 << (FRAC_BITS-1));
return (signed int)fp_toint(result);
}
@@ -290,7 +288,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
limits.no_turbo = clamp_t(int, input, 0 , 1);
-
+ if (limits.turbo_disabled) {
+ pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+ limits.no_turbo = limits.turbo_disabled;
+ }
return count;
}
@@ -360,21 +361,21 @@ static int byt_get_min_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return (value >> 8) & 0x3F;
+ return (value >> 8) & 0x7F;
}
static int byt_get_max_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return (value >> 16) & 0x3F;
+ return (value >> 16) & 0x7F;
}
static int byt_get_turbo_pstate(void)
{
u64 value;
rdmsrl(BYT_TURBO_RATIOS, value);
- return value & 0x3F;
+ return value & 0x7F;
}
static void byt_set_pstate(struct cpudata *cpudata, int pstate)
@@ -384,7 +385,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
u32 vid;
val = pstate << 8;
- if (limits.no_turbo)
+ if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -408,8 +409,8 @@ static void byt_get_vid(struct cpudata *cpudata)
rdmsrl(BYT_VIDS, value);
- cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
- cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
+ cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
+ cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
cpudata->vid.ratio = div_fp(
cpudata->vid.max - cpudata->vid.min,
int_tofp(cpudata->pstate.max_pstate -
@@ -451,7 +452,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
u64 val;
val = pstate << 8;
- if (limits.no_turbo)
+ if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32;
wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
@@ -691,23 +692,16 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
static int intel_pstate_init_cpu(unsigned int cpunum)
{
-
- const struct x86_cpu_id *id;
struct cpudata *cpu;
- id = x86_match_cpu(intel_pstate_cpu_ids);
- if (!id)
- return -ENODEV;
-
all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
if (!all_cpu_data[cpunum])
return -ENOMEM;
cpu = all_cpu_data[cpunum];
- intel_pstate_get_cpu_pstates(cpu);
-
cpu->cpu = cpunum;
+ intel_pstate_get_cpu_pstates(cpu);
init_timer_deferrable(&cpu->timer);
cpu->timer.function = intel_pstate_timer_func;
@@ -750,7 +744,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.min_perf = int_tofp(1);
limits.max_perf_pct = 100;
limits.max_perf = int_tofp(1);
- limits.no_turbo = 0;
+ limits.no_turbo = limits.turbo_disabled;
return 0;
}
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -793,6 +787,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
int rc;
+ u64 misc_en;
rc = intel_pstate_init_cpu(policy->cpu);
if (rc)
@@ -800,8 +795,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
- if (!limits.no_turbo &&
- limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+ if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
+ cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
+ limits.turbo_disabled = 1;
+ limits.no_turbo = 1;
+ }
+ if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 0af618abebaf..3607070797af 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -138,7 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct cpufreq_frequency_table *table;
struct cpu_data *data;
unsigned int cpu = policy->cpu;
- u64 transition_latency_hz;
+ u64 u64temp;
np = of_get_cpu_node(cpu, NULL);
if (!np)
@@ -206,9 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
for_each_cpu(i, per_cpu(cpu_mask, cpu))
per_cpu(cpu_data, i) = data;
- transition_latency_hz = 12ULL * NSEC_PER_SEC;
- policy->cpuinfo.transition_latency =
- do_div(transition_latency_hz, fsl_get_sys_freq());
+ /* Minimum transition latency is 12 platform clocks */
+ u64temp = 12ULL * NSEC_PER_SEC;
+ do_div(u64temp, fsl_get_sys_freq());
+ policy->cpuinfo.transition_latency = u64temp + 1;
of_node_put(np);
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index 6e774c6ac20b..8084c7f7e206 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -45,46 +45,54 @@ static struct clk *cpu_clk;
static struct clk *pll_x_clk;
static struct clk *pll_p_clk;
static struct clk *emc_clk;
+static bool pll_x_prepared;
-static int tegra_cpu_clk_set_rate(unsigned long rate)
+static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
+
+ /*
+ * Don't switch to intermediate freq if:
+ * - we are already at it, i.e. policy->cur == ifreq
+ * - index corresponds to ifreq
+ */
+ if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq))
+ return 0;
+
+ return ifreq;
+}
+
+static int tegra_target_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
{
int ret;
/*
* Take an extra reference to the main pll so it doesn't turn
- * off when we move the cpu off of it
+ * off when we move the cpu off of it as enabling it again while we
+ * switch to it from tegra_target() would take additional time.
+ *
+ * When target-freq is equal to intermediate freq we don't need to
+ * switch to an intermediate freq and so this routine isn't called.
+ * Also, we wouldn't be using pll_x anymore and must not take extra
+ * reference to it, as it can be disabled now to save some power.
*/
clk_prepare_enable(pll_x_clk);
ret = clk_set_parent(cpu_clk, pll_p_clk);
- if (ret) {
- pr_err("Failed to switch cpu to clock pll_p\n");
- goto out;
- }
-
- if (rate == clk_get_rate(pll_p_clk))
- goto out;
-
- ret = clk_set_rate(pll_x_clk, rate);
- if (ret) {
- pr_err("Failed to change pll_x to %lu\n", rate);
- goto out;
- }
-
- ret = clk_set_parent(cpu_clk, pll_x_clk);
- if (ret) {
- pr_err("Failed to switch cpu to clock pll_x\n");
- goto out;
- }
+ if (ret)
+ clk_disable_unprepare(pll_x_clk);
+ else
+ pll_x_prepared = true;
-out:
- clk_disable_unprepare(pll_x_clk);
return ret;
}
static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned long rate = freq_table[index].frequency;
+ unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
int ret = 0;
/*
@@ -98,10 +106,30 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
else
clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
- ret = tegra_cpu_clk_set_rate(rate * 1000);
+ /*
+ * target freq == pll_p, don't need to take extra reference to pll_x_clk
+ * as it isn't used anymore.
+ */
+ if (rate == ifreq)
+ return clk_set_parent(cpu_clk, pll_p_clk);
+
+ ret = clk_set_rate(pll_x_clk, rate * 1000);
+ /* Restore to earlier frequency on error, i.e. pll_x */
if (ret)
- pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n",
- rate);
+ pr_err("Failed to change pll_x to %lu\n", rate);
+
+ ret = clk_set_parent(cpu_clk, pll_x_clk);
+ /* This shouldn't fail while changing or restoring */
+ WARN_ON(ret);
+
+ /*
+ * Drop count to pll_x clock only if we switched to intermediate freq
+ * earlier while transitioning to a target frequency.
+ */
+ if (pll_x_prepared) {
+ clk_disable_unprepare(pll_x_clk);
+ pll_x_prepared = false;
+ }
return ret;
}
@@ -137,16 +165,18 @@ static int tegra_cpu_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver tegra_cpufreq_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = tegra_target,
- .get = cpufreq_generic_get,
- .init = tegra_cpu_init,
- .exit = tegra_cpu_exit,
- .name = "tegra",
- .attr = cpufreq_generic_attr,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .get_intermediate = tegra_get_intermediate,
+ .target_intermediate = tegra_target_intermediate,
+ .target_index = tegra_target,
+ .get = cpufreq_generic_get,
+ .init = tegra_cpu_init,
+ .exit = tegra_cpu_exit,
+ .name = "tegra",
+ .attr = cpufreq_generic_attr,
#ifdef CONFIG_PM
- .suspend = cpufreq_generic_suspend,
+ .suspend = cpufreq_generic_suspend,
#endif
};
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index f04e25f6c98d..1b96fb91d32c 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -35,6 +35,11 @@ depends on ARM
source "drivers/cpuidle/Kconfig.arm"
endmenu
+menu "MIPS CPU Idle Drivers"
+depends on MIPS
+source "drivers/cpuidle/Kconfig.mips"
+endmenu
+
menu "POWERPC CPU Idle Drivers"
depends on PPC
source "drivers/cpuidle/Kconfig.powerpc"
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips
new file mode 100644
index 000000000000..0e70ee28a5ca
--- /dev/null
+++ b/drivers/cpuidle/Kconfig.mips
@@ -0,0 +1,17 @@
+#
+# MIPS CPU Idle Drivers
+#
+config MIPS_CPS_CPUIDLE
+ bool "CPU Idle driver for MIPS CPS platforms"
+ depends on CPU_IDLE
+ depends on SYS_SUPPORTS_MIPS_CPS
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select MIPS_CPS_PM
+ default y
+ help
+ Select this option to enable processor idle state management
+ through cpuidle for systems built around the MIPS Coherent
+ Processing System (CPS) architecture. In order to make use of
+ the deepest idle states you will need to ensure that you are
+ also using the CONFIG_MIPS_CPS SMP implementation.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 9b5b2b560d70..d8bb1ff72561 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -18,6 +18,10 @@ obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
###############################################################################
+# MIPS drivers
+obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o
+
+###############################################################################
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
diff --git a/drivers/cpuidle/cpuidle-armada-370-xp.c b/drivers/cpuidle/cpuidle-armada-370-xp.c
index 28587d0f3947..a5fba0287bfb 100644
--- a/drivers/cpuidle/cpuidle-armada-370-xp.c
+++ b/drivers/cpuidle/cpuidle-armada-370-xp.c
@@ -55,7 +55,7 @@ static struct cpuidle_driver armada_370_xp_idle_driver = {
.power_usage = 50,
.target_residency = 100,
.flags = CPUIDLE_FLAG_TIME_VALID,
- .name = "MV CPU IDLE",
+ .name = "Idle",
.desc = "CPU power down",
},
.states[2] = {
@@ -65,7 +65,7 @@ static struct cpuidle_driver armada_370_xp_idle_driver = {
.target_residency = 1000,
.flags = CPUIDLE_FLAG_TIME_VALID |
ARMADA_370_XP_FLAG_DEEP_IDLE,
- .name = "MV CPU DEEP IDLE",
+ .name = "Deep idle",
.desc = "CPU and L2 Fabric power down",
},
.state_count = ARMADA_370_XP_MAX_STATES,
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
new file mode 100644
index 000000000000..fc7b62720deb
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/cpuidle.h>
+#include <linux/init.h>
+
+#include <asm/idle.h>
+#include <asm/pm-cps.h>
+
+/* Enumeration of the various idle states this driver may enter */
+enum cps_idle_state {
+ STATE_WAIT = 0, /* MIPS wait instruction, coherent */
+ STATE_NC_WAIT, /* MIPS wait instruction, non-coherent */
+ STATE_CLOCK_GATED, /* Core clock gated */
+ STATE_POWER_GATED, /* Core power gated */
+ STATE_COUNT
+};
+
+static int cps_nc_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ enum cps_pm_state pm_state;
+ int err;
+
+ /*
+ * At least one core must remain powered up & clocked in order for the
+ * system to have any hope of functioning.
+ *
+ * TODO: don't treat core 0 specially, just prevent the final core
+ * TODO: remap interrupt affinity temporarily
+ */
+ if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT))
+ index = STATE_NC_WAIT;
+
+ /* Select the appropriate cps_pm_state */
+ switch (index) {
+ case STATE_NC_WAIT:
+ pm_state = CPS_PM_NC_WAIT;
+ break;
+ case STATE_CLOCK_GATED:
+ pm_state = CPS_PM_CLOCK_GATED;
+ break;
+ case STATE_POWER_GATED:
+ pm_state = CPS_PM_POWER_GATED;
+ break;
+ default:
+ BUG();
+ return -EINVAL;
+ }
+
+ /* Notify listeners the CPU is about to power down */
+ if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter())
+ return -EINTR;
+
+ /* Enter that state */
+ err = cps_pm_enter_state(pm_state);
+
+ /* Notify listeners the CPU is back up */
+ if (pm_state == CPS_PM_POWER_GATED)
+ cpu_pm_exit();
+
+ return err ?: index;
+}
+
+static struct cpuidle_driver cps_driver = {
+ .name = "cpc_cpuidle",
+ .owner = THIS_MODULE,
+ .states = {
+ [STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE,
+ [STATE_NC_WAIT] = {
+ .enter = cps_nc_enter,
+ .exit_latency = 200,
+ .target_residency = 450,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "nc-wait",
+ .desc = "non-coherent MIPS wait",
+ },
+ [STATE_CLOCK_GATED] = {
+ .enter = cps_nc_enter,
+ .exit_latency = 300,
+ .target_residency = 700,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_TIMER_STOP,
+ .name = "clock-gated",
+ .desc = "core clock gated",
+ },
+ [STATE_POWER_GATED] = {
+ .enter = cps_nc_enter,
+ .exit_latency = 600,
+ .target_residency = 1000,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_TIMER_STOP,
+ .name = "power-gated",
+ .desc = "core power gated",
+ },
+ },
+ .state_count = STATE_COUNT,
+ .safe_state_index = 0,
+};
+
+static void __init cps_cpuidle_unregister(void)
+{
+ int cpu;
+ struct cpuidle_device *device;
+
+ for_each_possible_cpu(cpu) {
+ device = &per_cpu(cpuidle_dev, cpu);
+ cpuidle_unregister_device(device);
+ }
+
+ cpuidle_unregister_driver(&cps_driver);
+}
+
+static int __init cps_cpuidle_init(void)
+{
+ int err, cpu, core, i;
+ struct cpuidle_device *device;
+
+ /* Detect supported states */
+ if (!cps_pm_support_state(CPS_PM_POWER_GATED))
+ cps_driver.state_count = STATE_CLOCK_GATED + 1;
+ if (!cps_pm_support_state(CPS_PM_CLOCK_GATED))
+ cps_driver.state_count = STATE_NC_WAIT + 1;
+ if (!cps_pm_support_state(CPS_PM_NC_WAIT))
+ cps_driver.state_count = STATE_WAIT + 1;
+
+ /* Inform the user if some states are unavailable */
+ if (cps_driver.state_count < STATE_COUNT) {
+ pr_info("cpuidle-cps: limited to ");
+ switch (cps_driver.state_count - 1) {
+ case STATE_WAIT:
+ pr_cont("coherent wait\n");
+ break;
+ case STATE_NC_WAIT:
+ pr_cont("non-coherent wait\n");
+ break;
+ case STATE_CLOCK_GATED:
+ pr_cont("clock gating\n");
+ break;
+ }
+ }
+
+ /*
+ * Set the coupled flag on the appropriate states if this system
+ * requires it.
+ */
+ if (coupled_coherence)
+ for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++)
+ cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED;
+
+ err = cpuidle_register_driver(&cps_driver);
+ if (err) {
+ pr_err("Failed to register CPS cpuidle driver\n");
+ return err;
+ }
+
+ for_each_possible_cpu(cpu) {
+ core = cpu_data[cpu].core;
+ device = &per_cpu(cpuidle_dev, cpu);
+ device->cpu = cpu;
+#ifdef CONFIG_MIPS_MT
+ cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
+#endif
+
+ err = cpuidle_register_device(device);
+ if (err) {
+ pr_err("Failed to register CPU%d cpuidle device\n",
+ cpu);
+ goto err_out;
+ }
+ }
+
+ return 0;
+err_out:
+ cps_cpuidle_unregister();
+ return err;
+}
+device_initcall(cps_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 719f6fb5b1c3..74f5788d50b1 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -73,12 +73,10 @@ static int fastsleep_loop(struct cpuidle_device *dev,
return index;
new_lpcr = old_lpcr;
- new_lpcr &= ~(LPCR_MER | LPCR_PECE); /* lpcr[mer] must be 0 */
-
- /* exit powersave upon external interrupt, but not decrementer
- * interrupt.
+ /* Do not exit powersave upon decrementer as we've setup the timer
+ * offload.
*/
- new_lpcr |= LPCR_PECE0;
+ new_lpcr &= ~LPCR_PECE1;
mtspr(SPRN_LPCR, new_lpcr);
power7_sleep();
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 136d6a283e0a..9634f20e3926 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -187,8 +187,11 @@ static int poll_idle(struct cpuidle_device *dev,
t1 = ktime_get();
local_irq_enable();
- while (!need_resched())
- cpu_relax();
+ if (!current_set_polling_and_test()) {
+ while (!need_resched())
+ cpu_relax();
+ }
+ current_clr_polling();
t2 = ktime_get();
diff = ktime_to_us(ktime_sub(t2, t1));
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f066fa23cc05..02f177aeb16c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -313,7 +313,7 @@ config CRYPTO_DEV_S5P
config CRYPTO_DEV_NX
bool "Support for IBM Power7+ in-Nest cryptographic acceleration"
- depends on PPC64 && IBMVIO
+ depends on PPC64 && IBMVIO && !CPU_LITTLE_ENDIAN
default n
help
Support for Power7+ in-Nest cryptographic acceleration.
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1d80bd3636c5..b512a4ba7569 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev)
int error;
jrdev = &pdev->dev;
- jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
- GFP_KERNEL);
+ jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
+ GFP_KERNEL);
if (!jrpriv)
return -ENOMEM;
@@ -487,10 +487,8 @@ static int caam_jr_probe(struct platform_device *pdev)
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
- if (error) {
- kfree(jrpriv);
+ if (error)
return error;
- }
jrpriv->dev = jrdev;
spin_lock(&driver_data.jr_alloc_lock);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5c5863842de9..1eca7b9760e6 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -234,7 +234,7 @@ config PL330_DMA
config PCH_DMA
tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
- depends on PCI && X86
+ depends on PCI && (X86_32 || COMPILE_TEST)
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
@@ -269,7 +269,7 @@ config MXS_DMA
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
- and APBX-DMA is integrated into Freescale i.MX23/28 chips.
+ and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
config EP93XX_DMA
bool "Cirrus Logic EP93xx DMA support"
@@ -361,6 +361,20 @@ config FSL_EDMA
multiplexing capability for DMA request sources(slot).
This module can be found on Freescale Vybrid and LS-1 SoCs.
+config XILINX_VDMA
+ tristate "Xilinx AXI VDMA Engine"
+ depends on (ARCH_ZYNQ || MICROBLAZE)
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx AXI VDMA Soft IP.
+
+ This engine provides high-bandwidth direct memory access
+ between memory and AXI4-Stream video type target
+ peripherals including peripherals which support AXI4-
+ Stream Video Protocol. It has two stream interfaces/
+ channels, Memory Mapped to Stream (MM2S) and Stream to
+ Memory Mapped (S2MM) for the data transfers.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5150c82c9caf..c779e1eb2db2 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -46,3 +46,4 @@ obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
+obj-y += xilinx/
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d028f36ae655..8f8b0b608875 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -86,6 +86,9 @@
#define USBSS_IRQ_PD_COMP (1 << 2)
+/* Packet Descriptor */
+#define PD2_ZERO_LENGTH (1 << 19)
+
struct cppi41_channel {
struct dma_chan chan;
struct dma_async_tx_descriptor txd;
@@ -307,7 +310,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
__iormb();
while (val) {
- u32 desc;
+ u32 desc, len;
q_num = __fls(val);
val &= ~(1 << q_num);
@@ -319,9 +322,13 @@ static irqreturn_t cppi41_irq(int irq, void *data)
q_num, desc);
continue;
}
- c->residue = pd_trans_len(c->desc->pd6) -
- pd_trans_len(c->desc->pd0);
+ if (c->desc->pd2 & PD2_ZERO_LENGTH)
+ len = 0;
+ else
+ len = pd_trans_len(c->desc->pd0);
+
+ c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
c->txd.callback(c->txd.callback_param);
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7a740769c2fa..a27ded53ab4f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1493,6 +1493,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dw->regs = chip->regs;
chip->dw = dw;
+ dw->clk = devm_clk_get(chip->dev, "hclk");
+ if (IS_ERR(dw->clk))
+ return PTR_ERR(dw->clk);
+ err = clk_prepare_enable(dw->clk);
+ if (err)
+ return err;
+
dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
@@ -1500,15 +1507,19 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
if (!pdata && autocfg) {
pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ if (!pdata) {
+ err = -ENOMEM;
+ goto err_pdata;
+ }
/* Fill platform data with the default values */
pdata->is_private = true;
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
- } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
+ } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
+ err = -EINVAL;
+ goto err_pdata;
+ }
if (autocfg)
nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
@@ -1517,13 +1528,10 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
GFP_KERNEL);
- if (!dw->chan)
- return -ENOMEM;
-
- dw->clk = devm_clk_get(chip->dev, "hclk");
- if (IS_ERR(dw->clk))
- return PTR_ERR(dw->clk);
- clk_prepare_enable(dw->clk);
+ if (!dw->chan) {
+ err = -ENOMEM;
+ goto err_pdata;
+ }
/* Get hardware configuration parameters */
if (autocfg) {
@@ -1553,7 +1561,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
sizeof(struct dw_desc), 4, 0);
if (!dw->desc_pool) {
dev_err(chip->dev, "No memory for descriptors dma pool\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_pdata;
}
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
@@ -1561,7 +1570,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
"dw_dmac", dw);
if (err)
- return err;
+ goto err_pdata;
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < nr_channels; i++) {
@@ -1650,12 +1659,20 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
+ err = dma_async_device_register(&dw->dma);
+ if (err)
+ goto err_dma_register;
+
dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
nr_channels);
- dma_async_device_register(&dw->dma);
-
return 0;
+
+err_dma_register:
+ free_irq(chip->irq, dw);
+err_pdata:
+ clk_disable_unprepare(dw->clk);
+ return err;
}
EXPORT_SYMBOL_GPL(dw_dma_probe);
@@ -1676,6 +1693,8 @@ int dw_dma_remove(struct dw_dma_chip *chip)
channel_clear_bit(dw, CH_EN, dwc->mask);
}
+ clk_disable_unprepare(dw->clk);
+
return 0;
}
EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index fec59f1a77bb..39e30c3c7a9d 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -93,19 +93,13 @@ static int dw_pci_resume_early(struct device *dev)
return dw_dma_resume(chip);
};
-#else /* !CONFIG_PM_SLEEP */
-
-#define dw_pci_suspend_late NULL
-#define dw_pci_resume_early NULL
-
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops dw_pci_dev_pm_ops = {
- .suspend_late = dw_pci_suspend_late,
- .resume_early = dw_pci_resume_early,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early)
};
-static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
+static const struct pci_device_id dw_pci_id_table[] = {
/* Medfield */
{ PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
{ PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata },
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 453822cc4f9d..c5b339af6be5 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -256,7 +256,7 @@ MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
#ifdef CONFIG_PM_SLEEP
-static int dw_suspend_noirq(struct device *dev)
+static int dw_suspend_late(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
@@ -264,7 +264,7 @@ static int dw_suspend_noirq(struct device *dev)
return dw_dma_suspend(chip);
}
-static int dw_resume_noirq(struct device *dev)
+static int dw_resume_early(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
@@ -272,20 +272,10 @@ static int dw_resume_noirq(struct device *dev)
return dw_dma_resume(chip);
}
-#else /* !CONFIG_PM_SLEEP */
-
-#define dw_suspend_noirq NULL
-#define dw_resume_noirq NULL
-
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops dw_dev_pm_ops = {
- .suspend_noirq = dw_suspend_noirq,
- .resume_noirq = dw_resume_noirq,
- .freeze_noirq = dw_suspend_noirq,
- .thaw_noirq = dw_resume_noirq,
- .restore_noirq = dw_resume_noirq,
- .poweroff_noirq = dw_suspend_noirq,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
};
static struct platform_driver dw_driver = {
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f157c6f76b32..e0fec68aed25 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -61,6 +61,16 @@ static u32 get_sr(struct fsldma_chan *chan)
return DMA_IN(chan, &chan->regs->sr, 32);
}
+static void set_mr(struct fsldma_chan *chan, u32 val)
+{
+ DMA_OUT(chan, &chan->regs->mr, val, 32);
+}
+
+static u32 get_mr(struct fsldma_chan *chan)
+{
+ return DMA_IN(chan, &chan->regs->mr, 32);
+}
+
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
@@ -71,6 +81,11 @@ static dma_addr_t get_cdar(struct fsldma_chan *chan)
return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}
+static void set_bcr(struct fsldma_chan *chan, u32 val)
+{
+ DMA_OUT(chan, &chan->regs->bcr, val, 32);
+}
+
static u32 get_bcr(struct fsldma_chan *chan)
{
return DMA_IN(chan, &chan->regs->bcr, 32);
@@ -135,7 +150,7 @@ static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
static void dma_init(struct fsldma_chan *chan)
{
/* Reset the channel */
- DMA_OUT(chan, &chan->regs->mr, 0, 32);
+ set_mr(chan, 0);
switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
@@ -144,16 +159,15 @@ static void dma_init(struct fsldma_chan *chan)
* EOLNIE - End of links interrupt enable
* BWC - Bandwidth sharing among channels
*/
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
- | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
+ set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
+ | FSL_DMA_MR_EOLNIE);
break;
case FSL_DMA_IP_83XX:
/* Set the channel to below modes:
* EOTIE - End-of-transfer interrupt enable
* PRC_RM - PCI read multiple
*/
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
- | FSL_DMA_MR_PRC_RM, 32);
+ set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
break;
}
}
@@ -175,10 +189,10 @@ static void dma_start(struct fsldma_chan *chan)
{
u32 mode;
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
- DMA_OUT(chan, &chan->regs->bcr, 0, 32);
+ set_bcr(chan, 0);
mode |= FSL_DMA_MR_EMP_EN;
} else {
mode &= ~FSL_DMA_MR_EMP_EN;
@@ -191,7 +205,7 @@ static void dma_start(struct fsldma_chan *chan)
mode |= FSL_DMA_MR_CS;
}
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
}
static void dma_halt(struct fsldma_chan *chan)
@@ -200,7 +214,7 @@ static void dma_halt(struct fsldma_chan *chan)
int i;
/* read the mode register */
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
/*
* The 85xx controller supports channel abort, which will stop
@@ -209,14 +223,14 @@ static void dma_halt(struct fsldma_chan *chan)
*/
if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
mode |= FSL_DMA_MR_CA;
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
mode &= ~FSL_DMA_MR_CA;
}
/* stop the DMA controller */
mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
/* wait for the DMA controller to become idle */
for (i = 0; i < 100; i++) {
@@ -245,7 +259,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
{
u32 mode;
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
switch (size) {
case 0:
@@ -259,7 +273,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
break;
}
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
}
/**
@@ -277,7 +291,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
{
u32 mode;
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
switch (size) {
case 0:
@@ -291,7 +305,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
break;
}
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
}
/**
@@ -312,10 +326,10 @@ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
BUG_ON(size > 1024);
- mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode = get_mr(chan);
mode |= (__ilog2(size) << 24) & 0x0f000000;
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ set_mr(chan, mode);
}
/**
@@ -404,6 +418,19 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
}
/**
+ * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
+ * @chan : Freescale DMA channel
+ * @desc: descriptor to be freed
+ */
+static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ list_del(&desc->node);
+ chan_dbg(chan, "LD %p free\n", desc);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+}
+
+/**
* fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
* @chan : Freescale DMA channel
*
@@ -426,14 +453,107 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
desc->async_tx.tx_submit = fsl_dma_tx_submit;
desc->async_tx.phys = pdesc;
-#ifdef FSL_DMA_LD_DEBUG
chan_dbg(chan, "LD %p allocated\n", desc);
-#endif
return desc;
}
/**
+ * fsl_chan_xfer_ld_queue - transfer any pending transactions
+ * @chan : Freescale DMA channel
+ *
+ * HARDWARE STATE: idle
+ * LOCKING: must hold chan->desc_lock
+ */
+static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
+{
+ struct fsl_desc_sw *desc;
+
+ /*
+ * If the list of pending descriptors is empty, then we
+ * don't need to do any work at all
+ */
+ if (list_empty(&chan->ld_pending)) {
+ chan_dbg(chan, "no pending LDs\n");
+ return;
+ }
+
+ /*
+ * The DMA controller is not idle, which means that the interrupt
+ * handler will start any queued transactions when it runs after
+ * this transaction finishes
+ */
+ if (!chan->idle) {
+ chan_dbg(chan, "DMA controller still busy\n");
+ return;
+ }
+
+ /*
+ * If there are some link descriptors which have not been
+ * transferred, we need to start the controller
+ */
+
+ /*
+ * Move all elements from the queue of pending transactions
+ * onto the list of running transactions
+ */
+ chan_dbg(chan, "idle, starting controller\n");
+ desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
+ list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
+
+ /*
+ * The 85xx DMA controller doesn't clear the channel start bit
+ * automatically at the end of a transfer. Therefore we must clear
+ * it in software before starting the transfer.
+ */
+ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+ u32 mode;
+
+ mode = get_mr(chan);
+ mode &= ~FSL_DMA_MR_CS;
+ set_mr(chan, mode);
+ }
+
+ /*
+ * Program the descriptor's address into the DMA controller,
+ * then start the DMA transaction
+ */
+ set_cdar(chan, desc->async_tx.phys);
+ get_cdar(chan);
+
+ dma_start(chan);
+ chan->idle = false;
+}
+
+/**
+ * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
+ * @chan: Freescale DMA channel
+ * @desc: descriptor to cleanup and free
+ *
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, and then
+ * free the descriptor.
+ */
+static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+
+ /* Run the link descriptor callback function */
+ if (txd->callback) {
+ chan_dbg(chan, "LD %p callback\n", desc);
+ txd->callback(txd->callback_param);
+ }
+
+ /* Run any dependencies */
+ dma_run_dependencies(txd);
+
+ dma_descriptor_unmap(txd);
+ chan_dbg(chan, "LD %p free\n", desc);
+ dma_pool_free(chan->desc_pool, desc, txd->phys);
+}
+
+/**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
* @chan : Freescale DMA channel
*
@@ -477,13 +597,8 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan,
{
struct fsl_desc_sw *desc, *_desc;
- list_for_each_entry_safe(desc, _desc, list, node) {
- list_del(&desc->node);
-#ifdef FSL_DMA_LD_DEBUG
- chan_dbg(chan, "LD %p free\n", desc);
-#endif
- dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
- }
+ list_for_each_entry_safe(desc, _desc, list, node)
+ fsl_dma_free_descriptor(chan, desc);
}
static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
@@ -491,13 +606,8 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
{
struct fsl_desc_sw *desc, *_desc;
- list_for_each_entry_safe_reverse(desc, _desc, list, node) {
- list_del(&desc->node);
-#ifdef FSL_DMA_LD_DEBUG
- chan_dbg(chan, "LD %p free\n", desc);
-#endif
- dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
- }
+ list_for_each_entry_safe_reverse(desc, _desc, list, node)
+ fsl_dma_free_descriptor(chan, desc);
}
/**
@@ -520,35 +630,6 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
}
static struct dma_async_tx_descriptor *
-fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
-{
- struct fsldma_chan *chan;
- struct fsl_desc_sw *new;
-
- if (!dchan)
- return NULL;
-
- chan = to_fsl_chan(dchan);
-
- new = fsl_dma_alloc_descriptor(chan);
- if (!new) {
- chan_err(chan, "%s\n", msg_ld_oom);
- return NULL;
- }
-
- new->async_tx.cookie = -EBUSY;
- new->async_tx.flags = flags;
-
- /* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &new->tx_list);
-
- /* Set End-of-link to the last link descriptor of new list */
- set_ld_eol(chan, new);
-
- return &new->async_tx;
-}
-
-static struct dma_async_tx_descriptor *
fsl_dma_prep_memcpy(struct dma_chan *dchan,
dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, unsigned long flags)
@@ -817,105 +898,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
}
/**
- * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
- * @chan: Freescale DMA channel
- * @desc: descriptor to cleanup and free
- *
- * This function is used on a descriptor which has been executed by the DMA
- * controller. It will run any callbacks, submit any dependencies, and then
- * free the descriptor.
- */
-static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- struct dma_async_tx_descriptor *txd = &desc->async_tx;
-
- /* Run the link descriptor callback function */
- if (txd->callback) {
-#ifdef FSL_DMA_LD_DEBUG
- chan_dbg(chan, "LD %p callback\n", desc);
-#endif
- txd->callback(txd->callback_param);
- }
-
- /* Run any dependencies */
- dma_run_dependencies(txd);
-
- dma_descriptor_unmap(txd);
-#ifdef FSL_DMA_LD_DEBUG
- chan_dbg(chan, "LD %p free\n", desc);
-#endif
- dma_pool_free(chan->desc_pool, desc, txd->phys);
-}
-
-/**
- * fsl_chan_xfer_ld_queue - transfer any pending transactions
- * @chan : Freescale DMA channel
- *
- * HARDWARE STATE: idle
- * LOCKING: must hold chan->desc_lock
- */
-static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
-{
- struct fsl_desc_sw *desc;
-
- /*
- * If the list of pending descriptors is empty, then we
- * don't need to do any work at all
- */
- if (list_empty(&chan->ld_pending)) {
- chan_dbg(chan, "no pending LDs\n");
- return;
- }
-
- /*
- * The DMA controller is not idle, which means that the interrupt
- * handler will start any queued transactions when it runs after
- * this transaction finishes
- */
- if (!chan->idle) {
- chan_dbg(chan, "DMA controller still busy\n");
- return;
- }
-
- /*
- * If there are some link descriptors which have not been
- * transferred, we need to start the controller
- */
-
- /*
- * Move all elements from the queue of pending transactions
- * onto the list of running transactions
- */
- chan_dbg(chan, "idle, starting controller\n");
- desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
- list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
-
- /*
- * The 85xx DMA controller doesn't clear the channel start bit
- * automatically at the end of a transfer. Therefore we must clear
- * it in software before starting the transfer.
- */
- if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
- u32 mode;
-
- mode = DMA_IN(chan, &chan->regs->mr, 32);
- mode &= ~FSL_DMA_MR_CS;
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
- }
-
- /*
- * Program the descriptor's address into the DMA controller,
- * then start the DMA transaction
- */
- set_cdar(chan, desc->async_tx.phys);
- get_cdar(chan);
-
- dma_start(chan);
- chan->idle = false;
-}
-
-/**
* fsl_dma_memcpy_issue_pending - Issue the DMA start command
* @chan : Freescale DMA channel
*/
@@ -1304,12 +1286,10 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
- dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
dma_cap_set(DMA_SG, fdev->common.cap_mask);
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
- fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
fdev->common.device_tx_status = fsl_tx_status;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 19041cefabb1..14867e3ac8ff 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -255,6 +255,7 @@ struct sdma_channel {
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
unsigned int num_bd;
+ unsigned int period_len;
struct sdma_buffer_descriptor *bd;
dma_addr_t bd_phys;
unsigned int pc_from_device, pc_to_device;
@@ -593,6 +594,12 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
{
+ if (sdmac->desc.callback)
+ sdmac->desc.callback(sdmac->desc.callback_param);
+}
+
+static void sdma_update_channel_loop(struct sdma_channel *sdmac)
+{
struct sdma_buffer_descriptor *bd;
/*
@@ -607,15 +614,10 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
if (bd->mode.status & BD_RROR)
sdmac->status = DMA_ERROR;
- else
- sdmac->status = DMA_IN_PROGRESS;
bd->mode.status |= BD_DONE;
sdmac->buf_tail++;
sdmac->buf_tail %= sdmac->num_bd;
-
- if (sdmac->desc.callback)
- sdmac->desc.callback(sdmac->desc.callback_param);
}
}
@@ -671,6 +673,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ sdma_update_channel_loop(sdmac);
+
tasklet_schedule(&sdmac->tasklet);
__clear_bit(channel, &stat);
@@ -1131,6 +1136,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
sdmac->buf_tail = 0;
+ sdmac->period_len = period_len;
sdmac->flags |= IMX_DMA_SG_LOOP;
sdmac->direction = direction;
@@ -1227,9 +1233,15 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ u32 residue;
+
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
+ else
+ residue = sdmac->chn_count - sdmac->chn_real_count;
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
- sdmac->chn_count - sdmac->chn_real_count);
+ residue);
return sdmac->status;
}
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index bf02e7beb51a..a7b186d536b3 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -29,8 +29,8 @@
#define DALGN 0x00a0
#define DINT 0x00f0
#define DDADR 0x0200
-#define DSADR 0x0204
-#define DTADR 0x0208
+#define DSADR(n) (0x0204 + ((n) << 4))
+#define DTADR(n) (0x0208 + ((n) << 4))
#define DCMD 0x020c
#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
@@ -277,7 +277,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
return;
/* clear the channel mapping in DRCMR */
- reg = DRCMR(pchan->phy->vchan->drcmr);
+ reg = DRCMR(pchan->drcmr);
writel(0, pchan->phy->base + reg);
spin_lock_irqsave(&pdev->phy_lock, flags);
@@ -748,11 +748,92 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
return 0;
}
+static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct mmp_pdma_desc_sw *sw;
+ u32 curr, residue = 0;
+ bool passed = false;
+ bool cyclic = chan->cyclic_first != NULL;
+
+ /*
+ * If the channel does not have a phy pointer anymore, it has already
+ * been completed. Therefore, its residue is 0.
+ */
+ if (!chan->phy)
+ return 0;
+
+ if (chan->dir == DMA_DEV_TO_MEM)
+ curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+ else
+ curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+
+ list_for_each_entry(sw, &chan->chain_running, node) {
+ u32 start, end, len;
+
+ if (chan->dir == DMA_DEV_TO_MEM)
+ start = sw->desc.dtadr;
+ else
+ start = sw->desc.dsadr;
+
+ len = sw->desc.dcmd & DCMD_LENGTH;
+ end = start + len;
+
+ /*
+ * 'passed' will be latched once we found the descriptor which
+ * lies inside the boundaries of the curr pointer. All
+ * descriptors that occur in the list _after_ we found that
+ * partially handled descriptor are still to be processed and
+ * are hence added to the residual bytes counter.
+ */
+
+ if (passed) {
+ residue += len;
+ } else if (curr >= start && curr <= end) {
+ residue += end - curr;
+ passed = true;
+ }
+
+ /*
+ * Descriptors that have the ENDIRQEN bit set mark the end of a
+ * transaction chain, and the cookie assigned with it has been
+ * returned previously from mmp_pdma_tx_submit().
+ *
+ * In case we have multiple transactions in the running chain,
+ * and the cookie does not match the one the user asked us
+ * about, reset the state variables and start over.
+ *
+ * This logic does not apply to cyclic transactions, where all
+ * descriptors have the ENDIRQEN bit set, and for which we
+ * can't have multiple transactions on one channel anyway.
+ */
+ if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
+ continue;
+
+ if (sw->async_tx.cookie == cookie) {
+ return residue;
+ } else {
+ residue = 0;
+ passed = false;
+ }
+ }
+
+ /* We should only get here in case of cyclic transactions */
+ return residue;
+}
+
static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- return dma_cookie_status(dchan, cookie, txstate);
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (likely(ret != DMA_ERROR))
+ dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
+
+ return ret;
}
/**
@@ -858,8 +939,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
struct mmp_pdma_chan *chan;
int ret;
- chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
- GFP_KERNEL);
+ chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
if (chan == NULL)
return -ENOMEM;
@@ -946,8 +1026,7 @@ static int mmp_pdma_probe(struct platform_device *op)
irq_num++;
}
- pdev->phy = devm_kcalloc(pdev->dev,
- dma_channels, sizeof(struct mmp_pdma_chan),
+ pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
GFP_KERNEL);
if (pdev->phy == NULL)
return -ENOMEM;
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 448750da4402..2ad43738ac8b 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -2,6 +2,7 @@
* Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
* Copyright (C) Semihalf 2009
* Copyright (C) Ilya Yanok, Emcraft Systems 2010
+ * Copyright (C) Alexander Popov, Promcontroller 2014
*
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
* (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -29,8 +30,18 @@
*/
/*
- * This is initial version of MPC5121 DMA driver. Only memory to memory
- * transfers are supported (tested using dmatest module).
+ * MPC512x and MPC8308 DMA driver. It supports
+ * memory to memory data transfers (tested using dmatest module) and
+ * data transfers between memory and peripheral I/O memory
+ * by means of slave scatter/gather with these limitations:
+ * - chunked transfers (described by s/g lists with more than one item)
+ * are refused as long as proper support for scatter/gather is missing;
+ * - transfers on MPC8308 always start from software as this SoC appears
+ * not to have external request lines for peripheral flow control;
+ * - only peripheral devices with 4-byte FIFO access register are supported;
+ * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
+ * source and destination addresses must be 4-byte aligned
+ * and transfer size must be aligned on (4 * maxburst) boundary;
*/
#include <linux/module.h>
@@ -52,9 +63,17 @@
#define MPC_DMA_DESCRIPTORS 64
/* Macro definitions */
-#define MPC_DMA_CHANNELS 64
#define MPC_DMA_TCD_OFFSET 0x1000
+/*
+ * Maximum channel counts for individual hardware variants
+ * and the maximum channel count over all supported controllers,
+ * used for data structure size
+ */
+#define MPC8308_DMACHAN_MAX 16
+#define MPC512x_DMACHAN_MAX 64
+#define MPC_DMA_CHANNELS 64
+
/* Arbitration mode of group and channel */
#define MPC_DMA_DMACR_EDCG (1 << 31)
#define MPC_DMA_DMACR_ERGA (1 << 3)
@@ -181,6 +200,7 @@ struct mpc_dma_desc {
dma_addr_t tcd_paddr;
int error;
struct list_head node;
+ int will_access_peripheral;
};
struct mpc_dma_chan {
@@ -193,6 +213,12 @@ struct mpc_dma_chan {
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
+ /* Settings for access to peripheral FIFO */
+ dma_addr_t src_per_paddr;
+ u32 src_tcd_nunits;
+ dma_addr_t dst_per_paddr;
+ u32 dst_tcd_nunits;
+
/* Lock for this structure */
spinlock_t lock;
};
@@ -243,8 +269,23 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
struct mpc_dma_desc *mdesc;
int cid = mchan->chan.chan_id;
- /* Move all queued descriptors to active list */
- list_splice_tail_init(&mchan->queued, &mchan->active);
+ while (!list_empty(&mchan->queued)) {
+ mdesc = list_first_entry(&mchan->queued,
+ struct mpc_dma_desc, node);
+ /*
+ * Grab either several mem-to-mem transfer descriptors
+ * or one peripheral transfer descriptor,
+ * don't mix mem-to-mem and peripheral transfer descriptors
+ * within the same 'active' list.
+ */
+ if (mdesc->will_access_peripheral) {
+ if (list_empty(&mchan->active))
+ list_move_tail(&mdesc->node, &mchan->active);
+ break;
+ } else {
+ list_move_tail(&mdesc->node, &mchan->active);
+ }
+ }
/* Chain descriptors into one transaction */
list_for_each_entry(mdesc, &mchan->active, node) {
@@ -270,7 +311,17 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
if (first != prev)
mdma->tcd[cid].e_sg = 1;
- out_8(&mdma->regs->dmassrt, cid);
+
+ if (mdma->is_mpc8308) {
+ /* MPC8308, no request lines, software initiated start */
+ out_8(&mdma->regs->dmassrt, cid);
+ } else if (first->will_access_peripheral) {
+ /* Peripherals involved, start by external request signal */
+ out_8(&mdma->regs->dmaserq, cid);
+ } else {
+ /* Memory to memory transfer, software initiated start */
+ out_8(&mdma->regs->dmassrt, cid);
+ }
}
/* Handle interrupt on one half of DMA controller (32 channels) */
@@ -588,6 +639,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
}
mdesc->error = 0;
+ mdesc->will_access_peripheral = 0;
tcd = mdesc->tcd;
/* Prepare Transfer Control Descriptor for this transaction */
@@ -635,6 +687,193 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
return &mdesc->desc;
}
+static struct dma_async_tx_descriptor *
+mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc = NULL;
+ dma_addr_t per_paddr;
+ u32 tcd_nunits;
+ struct mpc_dma_tcd *tcd;
+ unsigned long iflags;
+ struct scatterlist *sg;
+ size_t len;
+ int iter, i;
+
+ /* Currently there is no proper support for scatter/gather */
+ if (sg_len != 1)
+ return NULL;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ spin_lock_irqsave(&mchan->lock, iflags);
+
+ mdesc = list_first_entry(&mchan->free,
+ struct mpc_dma_desc, node);
+ if (!mdesc) {
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+ /* Try to free completed descriptors */
+ mpc_dma_process_completed(mdma);
+ return NULL;
+ }
+
+ list_del(&mdesc->node);
+
+ if (direction == DMA_DEV_TO_MEM) {
+ per_paddr = mchan->src_per_paddr;
+ tcd_nunits = mchan->src_tcd_nunits;
+ } else {
+ per_paddr = mchan->dst_per_paddr;
+ tcd_nunits = mchan->dst_tcd_nunits;
+ }
+
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ if (per_paddr == 0 || tcd_nunits == 0)
+ goto err_prep;
+
+ mdesc->error = 0;
+ mdesc->will_access_peripheral = 1;
+
+ /* Prepare Transfer Control Descriptor for this transaction */
+ tcd = mdesc->tcd;
+
+ memset(tcd, 0, sizeof(struct mpc_dma_tcd));
+
+ if (!IS_ALIGNED(sg_dma_address(sg), 4))
+ goto err_prep;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ tcd->saddr = per_paddr;
+ tcd->daddr = sg_dma_address(sg);
+ tcd->soff = 0;
+ tcd->doff = 4;
+ } else {
+ tcd->saddr = sg_dma_address(sg);
+ tcd->daddr = per_paddr;
+ tcd->soff = 4;
+ tcd->doff = 0;
+ }
+
+ tcd->ssize = MPC_DMA_TSIZE_4;
+ tcd->dsize = MPC_DMA_TSIZE_4;
+
+ len = sg_dma_len(sg);
+ tcd->nbytes = tcd_nunits * 4;
+ if (!IS_ALIGNED(len, tcd->nbytes))
+ goto err_prep;
+
+ iter = len / tcd->nbytes;
+ if (iter >= 1 << 15) {
+ /* len is too big */
+ goto err_prep;
+ }
+ /* citer_linkch contains the high bits of iter */
+ tcd->biter = iter & 0x1ff;
+ tcd->biter_linkch = iter >> 9;
+ tcd->citer = tcd->biter;
+ tcd->citer_linkch = tcd->biter_linkch;
+
+ tcd->e_sg = 0;
+ tcd->d_req = 1;
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+ }
+
+ return &mdesc->desc;
+
+err_prep:
+ /* Put the descriptor back */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ list_add_tail(&mdesc->node, &mchan->free);
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ return NULL;
+}
+
+static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct mpc_dma_chan *mchan;
+ struct mpc_dma *mdma;
+ struct dma_slave_config *cfg;
+ unsigned long flags;
+
+ mchan = dma_chan_to_mpc_dma_chan(chan);
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ /* Disable channel requests */
+ mdma = dma_chan_to_mpc_dma(chan);
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ out_8(&mdma->regs->dmacerq, chan->chan_id);
+ list_splice_tail_init(&mchan->prepared, &mchan->free);
+ list_splice_tail_init(&mchan->queued, &mchan->free);
+ list_splice_tail_init(&mchan->active, &mchan->free);
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ return 0;
+
+ case DMA_SLAVE_CONFIG:
+ /*
+ * Software constraints:
+ * - only transfers between a peripheral device and
+ * memory are supported;
+ * - only peripheral devices with 4-byte FIFO access register
+ * are supported;
+ * - minimal transfer chunk is 4 bytes and consequently
+ * source and destination addresses must be 4-byte aligned
+ * and transfer size must be aligned on (4 * maxburst)
+ * boundary;
+ * - during the transfer RAM address is being incremented by
+ * the size of minimal transfer chunk;
+ * - peripheral port's address is constant during the transfer.
+ */
+
+ cfg = (void *)arg;
+
+ if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
+ cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
+ !IS_ALIGNED(cfg->src_addr, 4) ||
+ !IS_ALIGNED(cfg->dst_addr, 4)) {
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ mchan->src_per_paddr = cfg->src_addr;
+ mchan->src_tcd_nunits = cfg->src_maxburst;
+ mchan->dst_per_paddr = cfg->dst_addr;
+ mchan->dst_tcd_nunits = cfg->dst_maxburst;
+
+ /* Apply defaults */
+ if (mchan->src_tcd_nunits == 0)
+ mchan->src_tcd_nunits = 1;
+ if (mchan->dst_tcd_nunits == 0)
+ mchan->dst_tcd_nunits = 1;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ return 0;
+
+ default:
+ /* Unknown command */
+ break;
+ }
+
+ return -ENXIO;
+}
+
static int mpc_dma_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
@@ -649,13 +888,15 @@ static int mpc_dma_probe(struct platform_device *op)
mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
if (!mdma) {
dev_err(dev, "Memory exhausted!\n");
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err;
}
mdma->irq = irq_of_parse_and_map(dn, 0);
if (mdma->irq == NO_IRQ) {
dev_err(dev, "Error mapping IRQ!\n");
- return -EINVAL;
+ retval = -EINVAL;
+ goto err;
}
if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
@@ -663,14 +904,15 @@ static int mpc_dma_probe(struct platform_device *op)
mdma->irq2 = irq_of_parse_and_map(dn, 1);
if (mdma->irq2 == NO_IRQ) {
dev_err(dev, "Error mapping IRQ!\n");
- return -EINVAL;
+ retval = -EINVAL;
+ goto err_dispose1;
}
}
retval = of_address_to_resource(dn, 0, &res);
if (retval) {
dev_err(dev, "Error parsing memory region!\n");
- return retval;
+ goto err_dispose2;
}
regs_start = res.start;
@@ -678,31 +920,34 @@ static int mpc_dma_probe(struct platform_device *op)
if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
- return -EBUSY;
+ retval = -EBUSY;
+ goto err_dispose2;
}
mdma->regs = devm_ioremap(dev, regs_start, regs_size);
if (!mdma->regs) {
dev_err(dev, "Error mapping memory region!\n");
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_dispose2;
}
mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
+ MPC_DMA_TCD_OFFSET);
- retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
- mdma);
+ retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
if (retval) {
dev_err(dev, "Error requesting IRQ!\n");
- return -EINVAL;
+ retval = -EINVAL;
+ goto err_dispose2;
}
if (mdma->is_mpc8308) {
- retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
- DRV_NAME, mdma);
+ retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
+ DRV_NAME, mdma);
if (retval) {
dev_err(dev, "Error requesting IRQ2!\n");
- return -EINVAL;
+ retval = -EINVAL;
+ goto err_free1;
}
}
@@ -710,18 +955,21 @@ static int mpc_dma_probe(struct platform_device *op)
dma = &mdma->dma;
dma->dev = dev;
- if (!mdma->is_mpc8308)
- dma->chancnt = MPC_DMA_CHANNELS;
+ if (mdma->is_mpc8308)
+ dma->chancnt = MPC8308_DMACHAN_MAX;
else
- dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
+ dma->chancnt = MPC512x_DMACHAN_MAX;
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
dma->device_tx_status = mpc_dma_tx_status;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
+ dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
+ dma->device_control = mpc_dma_device_control;
INIT_LIST_HEAD(&dma->channels);
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma->cap_mask);
for (i = 0; i < dma->chancnt; i++) {
mchan = &mdma->channels[i];
@@ -747,7 +995,19 @@ static int mpc_dma_probe(struct platform_device *op)
* - Round-robin group arbitration,
* - Round-robin channel arbitration.
*/
- if (!mdma->is_mpc8308) {
+ if (mdma->is_mpc8308) {
+ /* MPC8308 has 16 channels and lacks some registers */
+ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
+
+ /* enable snooping */
+ out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
+ /* Disable error interrupts */
+ out_be32(&mdma->regs->dmaeeil, 0);
+
+ /* Clear interrupts status */
+ out_be32(&mdma->regs->dmaintl, 0xFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFF);
+ } else {
out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
@@ -768,29 +1028,28 @@ static int mpc_dma_probe(struct platform_device *op)
/* Route interrupts to IPIC */
out_be32(&mdma->regs->dmaihsa, 0);
out_be32(&mdma->regs->dmailsa, 0);
- } else {
- /* MPC8308 has 16 channels and lacks some registers */
- out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
-
- /* enable snooping */
- out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
- /* Disable error interrupts */
- out_be32(&mdma->regs->dmaeeil, 0);
-
- /* Clear interrupts status */
- out_be32(&mdma->regs->dmaintl, 0xFFFF);
- out_be32(&mdma->regs->dmaerrl, 0xFFFF);
}
/* Register DMA engine */
dev_set_drvdata(dev, mdma);
retval = dma_async_device_register(dma);
- if (retval) {
- devm_free_irq(dev, mdma->irq, mdma);
- irq_dispose_mapping(mdma->irq);
- }
+ if (retval)
+ goto err_free2;
return retval;
+
+err_free2:
+ if (mdma->is_mpc8308)
+ free_irq(mdma->irq2, mdma);
+err_free1:
+ free_irq(mdma->irq, mdma);
+err_dispose2:
+ if (mdma->is_mpc8308)
+ irq_dispose_mapping(mdma->irq2);
+err_dispose1:
+ irq_dispose_mapping(mdma->irq);
+err:
+ return retval;
}
static int mpc_dma_remove(struct platform_device *op)
@@ -799,7 +1058,11 @@ static int mpc_dma_remove(struct platform_device *op)
struct mpc_dma *mdma = dev_get_drvdata(dev);
dma_async_device_unregister(&mdma->dma);
- devm_free_irq(dev, mdma->irq, mdma);
+ if (mdma->is_mpc8308) {
+ free_irq(mdma->irq2, mdma);
+ irq_dispose_mapping(mdma->irq2);
+ }
+ free_irq(mdma->irq, mdma);
irq_dispose_mapping(mdma->irq);
return 0;
@@ -807,6 +1070,7 @@ static int mpc_dma_remove(struct platform_device *op)
static struct of_device_id mpc_dma_match[] = {
{ .compatible = "fsl,mpc5121-dma", },
+ { .compatible = "fsl,mpc8308-dma", },
{},
};
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 05fa548bd659..9f9ca9fe5ce6 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pch_dma.h>
@@ -996,7 +997,7 @@ static void pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
-DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
+const struct pci_device_id pch_dma_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index b209a0f17344..012520c9fd79 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -164,6 +164,7 @@ struct s3c24xx_sg {
* @disrcc: value for source control register
* @didstc: value for destination control register
* @dcon: base value for dcon register
+ * @cyclic: indicate cyclic transfer
*/
struct s3c24xx_txd {
struct virt_dma_desc vd;
@@ -173,6 +174,7 @@ struct s3c24xx_txd {
u32 disrcc;
u32 didstc;
u32 dcon;
+ bool cyclic;
};
struct s3c24xx_dma_chan;
@@ -669,8 +671,10 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
/* when more sg's are in this txd, start the next one */
if (!list_is_last(txd->at, &txd->dsg_list)) {
txd->at = txd->at->next;
+ if (txd->cyclic)
+ vchan_cyclic_callback(&txd->vd);
s3c24xx_dma_start_next_sg(s3cchan, txd);
- } else {
+ } else if (!txd->cyclic) {
s3cchan->at = NULL;
vchan_cookie_complete(&txd->vd);
@@ -682,6 +686,12 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
s3c24xx_dma_start_next_txd(s3cchan);
else
s3c24xx_dma_phy_free(s3cchan);
+ } else {
+ vchan_cyclic_callback(&txd->vd);
+
+ /* Cyclic: reset at beginning */
+ txd->at = txd->dsg_list.next;
+ s3c24xx_dma_start_next_sg(s3cchan, txd);
}
}
spin_unlock(&s3cchan->vc.lock);
@@ -877,6 +887,104 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
}
+static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+ struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
+ struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
+ struct s3c24xx_txd *txd;
+ struct s3c24xx_sg *dsg;
+ unsigned sg_len;
+ dma_addr_t slave_addr;
+ u32 hwcfg = 0;
+ int i;
+
+ dev_dbg(&s3cdma->pdev->dev,
+ "prepare cyclic transaction of %zu bytes with period %zu from %s\n",
+ size, period, s3cchan->name);
+
+ if (!is_slave_direction(direction)) {
+ dev_err(&s3cdma->pdev->dev,
+ "direction %d unsupported\n", direction);
+ return NULL;
+ }
+
+ txd = s3c24xx_dma_get_txd();
+ if (!txd)
+ return NULL;
+
+ txd->cyclic = 1;
+
+ if (cdata->handshake)
+ txd->dcon |= S3C24XX_DCON_HANDSHAKE;
+
+ switch (cdata->bus) {
+ case S3C24XX_DMA_APB:
+ txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
+ hwcfg |= S3C24XX_DISRCC_LOC_APB;
+ break;
+ case S3C24XX_DMA_AHB:
+ txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
+ hwcfg |= S3C24XX_DISRCC_LOC_AHB;
+ break;
+ }
+
+ /*
+ * Always assume our peripheral desintation is a fixed
+ * address in memory.
+ */
+ hwcfg |= S3C24XX_DISRCC_INC_FIXED;
+
+ /*
+ * Individual dma operations are requested by the slave,
+ * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
+ */
+ txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
+ S3C24XX_DISRCC_INC_INCREMENT;
+ txd->didstc = hwcfg;
+ slave_addr = s3cchan->cfg.dst_addr;
+ txd->width = s3cchan->cfg.dst_addr_width;
+ } else {
+ txd->disrcc = hwcfg;
+ txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
+ S3C24XX_DIDSTC_INC_INCREMENT;
+ slave_addr = s3cchan->cfg.src_addr;
+ txd->width = s3cchan->cfg.src_addr_width;
+ }
+
+ sg_len = size / period;
+
+ for (i = 0; i < sg_len; i++) {
+ dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
+ if (!dsg) {
+ s3c24xx_dma_free_txd(txd);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = period;
+ /* Check last period length */
+ if (i == sg_len - 1)
+ dsg->len = size - period * i;
+ if (direction == DMA_MEM_TO_DEV) {
+ dsg->src_addr = addr + period * i;
+ dsg->dst_addr = slave_addr;
+ } else { /* DMA_DEV_TO_MEM */
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = addr + period * i;
+ }
+ }
+
+ return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
+}
+
static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -961,7 +1069,6 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
dsg->src_addr = slave_addr;
dsg->dst_addr = sg_dma_address(sg);
}
- break;
}
return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
@@ -1198,6 +1305,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
/* Initialize slave engine for SoC internal dedicated peripherals */
dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
s3cdma->slave.dev = &pdev->dev;
s3cdma->slave.device_alloc_chan_resources =
@@ -1207,6 +1315,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
+ s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
s3cdma->slave.device_control = s3c24xx_dma_control;
/* Register as many memcpy channels as there are physical channels */
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index b4c813831006..0f719816c91b 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -4,7 +4,7 @@
config SH_DMAE_BASE
bool "Renesas SuperH DMA Engine support"
- depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
+ depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST
depends on !SH_DMA_API
default y
select DMA_ENGINE
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index 3083d901a414..b212d9471ab5 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -18,6 +18,7 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 52396771acbe..b35007e21e6b 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -73,8 +73,7 @@ static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct shdma_desc *chunk, *c, *desc =
- container_of(tx, struct shdma_desc, async_tx),
- *last = desc;
+ container_of(tx, struct shdma_desc, async_tx);
struct shdma_chan *schan = to_shdma_chan(tx->chan);
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
@@ -98,19 +97,20 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
&chunk->node == &schan->ld_free))
break;
chunk->mark = DESC_SUBMITTED;
- /* Callback goes to the last chunk */
- chunk->async_tx.callback = NULL;
+ if (chunk->chunks == 1) {
+ chunk->async_tx.callback = callback;
+ chunk->async_tx.callback_param = tx->callback_param;
+ } else {
+ /* Callback goes to the last chunk */
+ chunk->async_tx.callback = NULL;
+ }
chunk->cookie = cookie;
list_move_tail(&chunk->node, &schan->ld_queue);
- last = chunk;
dev_dbg(schan->dev, "submit #%d@%p on %d\n",
- tx->cookie, &last->async_tx, schan->id);
+ tx->cookie, &chunk->async_tx, schan->id);
}
- last->async_tx.callback = callback;
- last->async_tx.callback_param = tx->callback_param;
-
if (power_up) {
int ret;
schan->pm_state = SHDMA_PM_BUSY;
@@ -304,6 +304,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
dma_async_tx_callback callback = NULL;
void *param = NULL;
unsigned long flags;
+ LIST_HEAD(cyclic_list);
spin_lock_irqsave(&schan->chan_lock, flags);
list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
@@ -369,10 +370,16 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
if (((desc->mark == DESC_COMPLETED ||
desc->mark == DESC_WAITING) &&
async_tx_test_ack(&desc->async_tx)) || all) {
- /* Remove from ld_queue list */
- desc->mark = DESC_IDLE;
- list_move(&desc->node, &schan->ld_free);
+ if (all || !desc->cyclic) {
+ /* Remove from ld_queue list */
+ desc->mark = DESC_IDLE;
+ list_move(&desc->node, &schan->ld_free);
+ } else {
+ /* reuse as cyclic */
+ desc->mark = DESC_SUBMITTED;
+ list_move_tail(&desc->node, &cyclic_list);
+ }
if (list_empty(&schan->ld_queue)) {
dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
@@ -389,6 +396,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
*/
schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
+ list_splice_tail(&cyclic_list, &schan->ld_queue);
+
spin_unlock_irqrestore(&schan->chan_lock, flags);
if (callback)
@@ -521,7 +530,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
*/
static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
{
struct scatterlist *sg;
struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
@@ -569,7 +578,11 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
if (!new)
goto err_get_desc;
- new->chunks = chunks--;
+ new->cyclic = cyclic;
+ if (cyclic)
+ new->chunks = 1;
+ else
+ new->chunks = chunks--;
list_add_tail(&new->node, &tx_list);
} while (len);
}
@@ -612,7 +625,8 @@ static struct dma_async_tx_descriptor *shdma_prep_memcpy(
sg_dma_address(&sg) = dma_src;
sg_dma_len(&sg) = len;
- return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
+ return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
+ flags, false);
}
static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
@@ -640,7 +654,58 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
slave_addr = ops->slave_addr(schan);
return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
- direction, flags);
+ direction, flags, false);
+}
+
+#define SHDMA_MAX_SG_LEN 32
+
+static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ unsigned int sg_len = buf_len / period_len;
+ int slave_id = schan->slave_id;
+ dma_addr_t slave_addr;
+ struct scatterlist sgl[SHDMA_MAX_SG_LEN];
+ int i;
+
+ if (!chan)
+ return NULL;
+
+ BUG_ON(!schan->desc_num);
+
+ if (sg_len > SHDMA_MAX_SG_LEN) {
+ dev_err(schan->dev, "sg length %d exceds limit %d",
+ sg_len, SHDMA_MAX_SG_LEN);
+ return NULL;
+ }
+
+ /* Someone calling slave DMA on a generic channel? */
+ if (slave_id < 0 || (buf_len < period_len)) {
+ dev_warn(schan->dev,
+ "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
+ __func__, buf_len, period_len, slave_id);
+ return NULL;
+ }
+
+ slave_addr = ops->slave_addr(schan);
+
+ sg_init_table(sgl, sg_len);
+ for (i = 0; i < sg_len; i++) {
+ dma_addr_t src = buf_addr + (period_len * i);
+
+ sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
+ offset_in_page(src));
+ sg_dma_address(&sgl[i]) = src;
+ sg_dma_len(&sgl[i]) = period_len;
+ }
+
+ return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
+ direction, flags, true);
}
static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -915,6 +980,7 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,
/* Compulsory for DMA_SLAVE fields */
dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
dma_dev->device_control = shdma_control;
dma_dev->dev = dev;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index dda7e7563f5d..146d5df926db 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -18,21 +18,22 @@
*
*/
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kdebug.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/rculist.h>
#include <linux/sh_dma.h>
-#include <linux/notifier.h>
-#include <linux/kdebug.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/rculist.h>
#include "../dmaengine.h"
#include "shdma.h"
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 4e7df43b50d6..3ce103909896 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -14,12 +14,13 @@
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
+#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/sudmac.h>
struct sudmac_chan {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index bf18c786ed40..c7984459ede7 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -556,7 +556,6 @@ struct d40_gen_dmac {
* later
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
- * @initialized: true if the dma has been initialized
* @gen_dmac: the struct for generic registers values to represent u8500/8540
* DMA controller
*/
@@ -594,7 +593,6 @@ struct d40_base {
u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
u32 *reg_val_backup_chan;
u16 gcc_pwr_off_mask;
- bool initialized;
struct d40_gen_dmac gen_dmac;
};
@@ -1056,62 +1054,6 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
return len;
}
-
-#ifdef CONFIG_PM
-static void dma40_backup(void __iomem *baseaddr, u32 *backup,
- u32 *regaddr, int num, bool save)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- void __iomem *addr = baseaddr + regaddr[i];
-
- if (save)
- backup[i] = readl_relaxed(addr);
- else
- writel_relaxed(backup[i], addr);
- }
-}
-
-static void d40_save_restore_registers(struct d40_base *base, bool save)
-{
- int i;
-
- /* Save/Restore channel specific registers */
- for (i = 0; i < base->num_phy_chans; i++) {
- void __iomem *addr;
- int idx;
-
- if (base->phy_res[i].reserved)
- continue;
-
- addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
- idx = i * ARRAY_SIZE(d40_backup_regs_chan);
-
- dma40_backup(addr, &base->reg_val_backup_chan[idx],
- d40_backup_regs_chan,
- ARRAY_SIZE(d40_backup_regs_chan),
- save);
- }
-
- /* Save/Restore global registers */
- dma40_backup(base->virtbase, base->reg_val_backup,
- d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
- save);
-
- /* Save/Restore registers only existing on dma40 v3 and later */
- if (base->gen_dmac.backup)
- dma40_backup(base->virtbase, base->reg_val_backup_v4,
- base->gen_dmac.backup,
- base->gen_dmac.backup_size,
- save);
-}
-#else
-static void d40_save_restore_registers(struct d40_base *base, bool save)
-{
-}
-#endif
-
static int __d40_execute_command_phy(struct d40_chan *d40c,
enum d40_command command)
{
@@ -1495,8 +1437,8 @@ static int d40_pause(struct d40_chan *d40c)
if (!d40c->busy)
return 0;
- pm_runtime_get_sync(d40c->base->dev);
spin_lock_irqsave(&d40c->lock, flags);
+ pm_runtime_get_sync(d40c->base->dev);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -2998,18 +2940,88 @@ failure1:
}
/* Suspend resume functionality */
-#ifdef CONFIG_PM
-static int dma40_pm_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int dma40_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
- int ret = 0;
+ int ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
if (base->lcpa_regulator)
ret = regulator_disable(base->lcpa_regulator);
return ret;
}
+static int dma40_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (base->lcpa_regulator) {
+ ret = regulator_enable(base->lcpa_regulator);
+ if (ret)
+ return ret;
+ }
+
+ return pm_runtime_force_resume(dev);
+}
+#endif
+
+#ifdef CONFIG_PM
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
+ u32 *regaddr, int num, bool save)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ void __iomem *addr = baseaddr + regaddr[i];
+
+ if (save)
+ backup[i] = readl_relaxed(addr);
+ else
+ writel_relaxed(backup[i], addr);
+ }
+}
+
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+ int i;
+
+ /* Save/Restore channel specific registers */
+ for (i = 0; i < base->num_phy_chans; i++) {
+ void __iomem *addr;
+ int idx;
+
+ if (base->phy_res[i].reserved)
+ continue;
+
+ addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
+ idx = i * ARRAY_SIZE(d40_backup_regs_chan);
+
+ dma40_backup(addr, &base->reg_val_backup_chan[idx],
+ d40_backup_regs_chan,
+ ARRAY_SIZE(d40_backup_regs_chan),
+ save);
+ }
+
+ /* Save/Restore global registers */
+ dma40_backup(base->virtbase, base->reg_val_backup,
+ d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
+ save);
+
+ /* Save/Restore registers only existing on dma40 v3 and later */
+ if (base->gen_dmac.backup)
+ dma40_backup(base->virtbase, base->reg_val_backup_v4,
+ base->gen_dmac.backup,
+ base->gen_dmac.backup_size,
+ save);
+}
+
static int dma40_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -3030,36 +3042,20 @@ static int dma40_runtime_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
- if (base->initialized)
- d40_save_restore_registers(base, false);
+ d40_save_restore_registers(base, false);
writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
base->virtbase + D40_DREG_GCC);
return 0;
}
-
-static int dma40_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
- int ret = 0;
-
- if (base->lcpa_regulator)
- ret = regulator_enable(base->lcpa_regulator);
-
- return ret;
-}
+#endif
static const struct dev_pm_ops dma40_pm_ops = {
- .suspend = dma40_pm_suspend,
- .runtime_suspend = dma40_runtime_suspend,
- .runtime_resume = dma40_runtime_resume,
- .resume = dma40_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
+ SET_PM_RUNTIME_PM_OPS(dma40_runtime_suspend,
+ dma40_runtime_resume,
+ NULL)
};
-#define DMA40_PM_OPS (&dma40_pm_ops)
-#else
-#define DMA40_PM_OPS NULL
-#endif
/* Initialization functions. */
@@ -3645,12 +3641,6 @@ static int __init d40_probe(struct platform_device *pdev)
goto failure;
}
- pm_runtime_irq_safe(base->dev);
- pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(base->dev);
- pm_runtime_enable(base->dev);
- pm_runtime_resume(base->dev);
-
if (base->plat_data->use_esram_lcla) {
base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
@@ -3671,7 +3661,15 @@ static int __init d40_probe(struct platform_device *pdev)
}
}
- base->initialized = true;
+ writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
+
+ pm_runtime_irq_safe(base->dev);
+ pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(base->dev);
+ pm_runtime_mark_last_busy(base->dev);
+ pm_runtime_set_active(base->dev);
+ pm_runtime_enable(base->dev);
+
ret = d40_dmaengine_init(base, num_reserved_chans);
if (ret)
goto failure;
@@ -3754,7 +3752,7 @@ static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
- .pm = DMA40_PM_OPS,
+ .pm = &dma40_pm_ops,
.of_match_table = d40_match,
},
};
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
new file mode 100644
index 000000000000..3c4e9f2fea28
--- /dev/null
+++ b/drivers/dma/xilinx/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
new file mode 100644
index 000000000000..42a13e8d4607
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -0,0 +1,1379 @@
+/*
+ * DMA driver for Xilinx Video DMA Engine
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * Based on the Freescale DMA driver.
+ *
+ * Description:
+ * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
+ * core that provides high-bandwidth direct memory access between memory
+ * and AXI4-Stream type video target peripherals. The core provides efficient
+ * two dimensional DMA operations with independent asynchronous read (S2MM)
+ * and write (MM2S) channel operation. It can be configured to have either
+ * one channel or two channels. If configured as two channels, one is to
+ * transmit to the video device (MM2S) and another is to receive from the
+ * video device (S2MM). Initialization, status, interrupt and management
+ * registers are accessed through an AXI4-Lite slave interface.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/amba/xilinx_dma.h>
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include "../dmaengine.h"
+
+/* Register/Descriptor Offsets */
+#define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000
+#define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030
+#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
+#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
+
+/* Control Registers */
+#define XILINX_VDMA_REG_DMACR 0x0000
+#define XILINX_VDMA_DMACR_DELAY_MAX 0xff
+#define XILINX_VDMA_DMACR_DELAY_SHIFT 24
+#define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff
+#define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16
+#define XILINX_VDMA_DMACR_ERR_IRQ BIT(14)
+#define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13)
+#define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12)
+#define XILINX_VDMA_DMACR_MASTER_SHIFT 8
+#define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5
+#define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4)
+#define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3)
+#define XILINX_VDMA_DMACR_RESET BIT(2)
+#define XILINX_VDMA_DMACR_CIRC_EN BIT(1)
+#define XILINX_VDMA_DMACR_RUNSTOP BIT(0)
+#define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
+
+#define XILINX_VDMA_REG_DMASR 0x0004
+#define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15)
+#define XILINX_VDMA_DMASR_ERR_IRQ BIT(14)
+#define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13)
+#define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12)
+#define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11)
+#define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10)
+#define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9)
+#define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8)
+#define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7)
+#define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6)
+#define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5)
+#define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4)
+#define XILINX_VDMA_DMASR_IDLE BIT(1)
+#define XILINX_VDMA_DMASR_HALTED BIT(0)
+#define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24)
+#define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
+
+#define XILINX_VDMA_REG_CURDESC 0x0008
+#define XILINX_VDMA_REG_TAILDESC 0x0010
+#define XILINX_VDMA_REG_REG_INDEX 0x0014
+#define XILINX_VDMA_REG_FRMSTORE 0x0018
+#define XILINX_VDMA_REG_THRESHOLD 0x001c
+#define XILINX_VDMA_REG_FRMPTR_STS 0x0024
+#define XILINX_VDMA_REG_PARK_PTR 0x0028
+#define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8
+#define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0
+#define XILINX_VDMA_REG_VDMA_VERSION 0x002c
+
+/* Register Direct Mode Registers */
+#define XILINX_VDMA_REG_VSIZE 0x0000
+#define XILINX_VDMA_REG_HSIZE 0x0004
+
+#define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008
+#define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
+#define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
+
+#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
+
+/* HW specific definitions */
+#define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2
+
+#define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \
+ (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \
+ XILINX_VDMA_DMASR_DLY_CNT_IRQ | \
+ XILINX_VDMA_DMASR_ERR_IRQ)
+
+#define XILINX_VDMA_DMASR_ALL_ERR_MASK \
+ (XILINX_VDMA_DMASR_EOL_LATE_ERR | \
+ XILINX_VDMA_DMASR_SOF_LATE_ERR | \
+ XILINX_VDMA_DMASR_SG_DEC_ERR | \
+ XILINX_VDMA_DMASR_SG_SLV_ERR | \
+ XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_VDMA_DMASR_DMA_DEC_ERR | \
+ XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \
+ XILINX_VDMA_DMASR_DMA_INT_ERR)
+
+/*
+ * Recoverable errors are DMA Internal error, SOF Early, EOF Early
+ * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
+ * is enabled in the h/w system.
+ */
+#define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \
+ (XILINX_VDMA_DMASR_SOF_LATE_ERR | \
+ XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_VDMA_DMASR_DMA_INT_ERR)
+
+/* Axi VDMA Flush on Fsync bits */
+#define XILINX_VDMA_FLUSH_S2MM 3
+#define XILINX_VDMA_FLUSH_MM2S 2
+#define XILINX_VDMA_FLUSH_BOTH 1
+
+/* Delay loop counter to prevent hardware failure */
+#define XILINX_VDMA_LOOP_COUNT 1000000
+
+/**
+ * struct xilinx_vdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @buf_addr: Buffer address @0x08
+ * @pad2: Reserved @0x0C
+ * @vsize: Vertical Size @0x10
+ * @hsize: Horizontal Size @0x14
+ * @stride: Number of bytes between the first
+ * pixels of each horizontal line @0x18
+ */
+struct xilinx_vdma_desc_hw {
+ u32 next_desc;
+ u32 pad1;
+ u32 buf_addr;
+ u32 pad2;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+} __aligned(64);
+
+/**
+ * struct xilinx_vdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_vdma_tx_segment {
+ struct xilinx_vdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_vdma_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @segments: TX segments list
+ * @node: Node in the channel descriptors list
+ */
+struct xilinx_vdma_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head segments;
+ struct list_head node;
+};
+
+/**
+ * struct xilinx_vdma_chan - Driver specific VDMA channel structure
+ * @xdev: Driver specific device structure
+ * @ctrl_offset: Control registers offset
+ * @desc_offset: TX descriptor registers offset
+ * @lock: Descriptor operation lock
+ * @pending_list: Descriptors waiting
+ * @active_desc: Active descriptor
+ * @allocated_desc: Allocated descriptor
+ * @done_list: Complete descriptors
+ * @common: DMA common channel
+ * @desc_pool: Descriptors pool
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @id: Channel ID
+ * @direction: Transfer direction
+ * @num_frms: Number of frames
+ * @has_sg: Support scatter transfers
+ * @genlock: Support genlock mode
+ * @err: Channel has errors
+ * @tasklet: Cleanup work after irq
+ * @config: Device configuration info
+ * @flush_on_fsync: Flush on Frame sync
+ */
+struct xilinx_vdma_chan {
+ struct xilinx_vdma_device *xdev;
+ u32 ctrl_offset;
+ u32 desc_offset;
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct xilinx_vdma_tx_descriptor *active_desc;
+ struct xilinx_vdma_tx_descriptor *allocated_desc;
+ struct list_head done_list;
+ struct dma_chan common;
+ struct dma_pool *desc_pool;
+ struct device *dev;
+ int irq;
+ int id;
+ enum dma_transfer_direction direction;
+ int num_frms;
+ bool has_sg;
+ bool genlock;
+ bool err;
+ struct tasklet_struct tasklet;
+ struct xilinx_vdma_config config;
+ bool flush_on_fsync;
+};
+
+/**
+ * struct xilinx_vdma_device - VDMA device structure
+ * @regs: I/O mapped base address
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific VDMA channel
+ * @has_sg: Specifies whether Scatter-Gather is present or not
+ * @flush_on_fsync: Flush on frame sync
+ */
+struct xilinx_vdma_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE];
+ bool has_sg;
+ u32 flush_on_fsync;
+};
+
+/* Macros */
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_vdma_chan, common)
+#define to_vdma_tx_descriptor(tx) \
+ container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
+
+/* IO accessors */
+static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
+{
+ return ioread32(chan->xdev->regs + reg);
+}
+
+static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value)
+{
+ iowrite32(value, chan->xdev->regs + reg);
+}
+
+static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg,
+ u32 value)
+{
+ vdma_write(chan, chan->desc_offset + reg, value);
+}
+
+static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg)
+{
+ return vdma_read(chan, chan->ctrl_offset + reg);
+}
+
+static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg,
+ u32 value)
+{
+ vdma_write(chan, chan->ctrl_offset + reg, value);
+}
+
+static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg,
+ u32 clr)
+{
+ vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr);
+}
+
+static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg,
+ u32 set)
+{
+ vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set);
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors and segments alloc and free
+ */
+
+/**
+ * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_vdma_tx_segment *
+xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_tx_segment *segment;
+ dma_addr_t phys;
+
+ segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!segment)
+ return NULL;
+
+ memset(segment, 0, sizeof(*segment));
+ segment->phys = phys;
+
+ return segment;
+}
+
+/**
+ * xilinx_vdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific VDMA channel
+ * @segment: VDMA transaction segment
+ */
+static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan,
+ struct xilinx_vdma_tx_segment *segment)
+{
+ dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_vdma_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: The allocated descriptor on success and NULL on failure.
+ */
+static struct xilinx_vdma_tx_descriptor *
+xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_tx_descriptor *desc;
+ unsigned long flags;
+
+ if (chan->allocated_desc)
+ return chan->allocated_desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->allocated_desc = desc;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ INIT_LIST_HEAD(&desc->segments);
+
+ return desc;
+}
+
+/**
+ * xilinx_vdma_free_tx_descriptor - Free transaction descriptor
+ * @chan: Driver specific VDMA channel
+ * @desc: VDMA transaction descriptor
+ */
+static void
+xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan,
+ struct xilinx_vdma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *segment, *next;
+
+ if (!desc)
+ return;
+
+ list_for_each_entry_safe(segment, next, &desc->segments, node) {
+ list_del(&segment->node);
+ xilinx_vdma_free_tx_segment(chan, segment);
+ }
+
+ kfree(desc);
+}
+
+/* Required functions */
+
+/**
+ * xilinx_vdma_free_desc_list - Free descriptors list
+ * @chan: Driver specific VDMA channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_vdma_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ xilinx_vdma_free_tx_descriptor(chan, desc);
+ }
+}
+
+/**
+ * xilinx_vdma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_vdma_free_desc_list(chan, &chan->pending_list);
+ xilinx_vdma_free_desc_list(chan, &chan->done_list);
+
+ xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
+ chan->active_desc = NULL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_vdma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+
+ xilinx_vdma_free_descriptors(chan);
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+/**
+ * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ /* Remove from the list of running transactions */
+ list_del(&desc->node);
+
+ /* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ xilinx_vdma_free_tx_descriptor(chan, desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_vdma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx VDMA channel structure
+ */
+static void xilinx_vdma_do_tasklet(unsigned long data)
+{
+ struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data;
+
+ xilinx_vdma_chan_desc_cleanup(chan);
+}
+
+/**
+ * xilinx_vdma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
+ /* Has this channel already been allocated? */
+ if (chan->desc_pool)
+ return 0;
+
+ /*
+ * We need the descriptor to be aligned to 64bytes
+ * for meeting Xilinx VDMA specification requirement.
+ */
+ chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_vdma_tx_segment),
+ __alignof__(struct xilinx_vdma_tx_segment), 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptor pool\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ dma_cookie_init(dchan);
+ return 0;
+}
+
+/**
+ * xilinx_vdma_tx_status - Get VDMA transaction status
+ * @dchan: DMA channel
+ * @cookie: Transaction identifier
+ * @txstate: Transaction state
+ *
+ * Return: DMA transaction status
+ */
+static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+/**
+ * xilinx_vdma_is_running - Check if VDMA channel is running
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: '1' if running, '0' if not.
+ */
+static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan)
+{
+ return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
+ XILINX_VDMA_DMASR_HALTED) &&
+ (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
+ XILINX_VDMA_DMACR_RUNSTOP);
+}
+
+/**
+ * xilinx_vdma_is_idle - Check if VDMA channel is idle
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: '1' if idle, '0' if not.
+ */
+static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
+{
+ return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
+ XILINX_VDMA_DMASR_IDLE;
+}
+
+/**
+ * xilinx_vdma_halt - Halt VDMA channel
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
+{
+ int loop = XILINX_VDMA_LOOP_COUNT;
+
+ vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
+
+ /* Wait for the hardware to halt */
+ do {
+ if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
+ XILINX_VDMA_DMASR_HALTED)
+ break;
+ } while (loop--);
+
+ if (!loop) {
+ dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+ chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+ chan->err = true;
+ }
+
+ return;
+}
+
+/**
+ * xilinx_vdma_start - Start VDMA channel
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
+{
+ int loop = XILINX_VDMA_LOOP_COUNT;
+
+ vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
+
+ /* Wait for the hardware to start */
+ do {
+ if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
+ XILINX_VDMA_DMASR_HALTED))
+ break;
+ } while (loop--);
+
+ if (!loop) {
+ dev_err(chan->dev, "Cannot start channel %p: %x\n",
+ chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+
+ chan->err = true;
+ }
+
+ return;
+}
+
+/**
+ * xilinx_vdma_start_transfer - Starts VDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_config *config = &chan->config;
+ struct xilinx_vdma_tx_descriptor *desc;
+ unsigned long flags;
+ u32 reg;
+ struct xilinx_vdma_tx_segment *head, *tail = NULL;
+
+ if (chan->err)
+ return;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* There's already an active descriptor, bail out. */
+ if (chan->active_desc)
+ goto out_unlock;
+
+ if (list_empty(&chan->pending_list))
+ goto out_unlock;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+
+ /* If it is SG mode and hardware is busy, cannot submit */
+ if (chan->has_sg && xilinx_vdma_is_running(chan) &&
+ !xilinx_vdma_is_idle(chan)) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ goto out_unlock;
+ }
+
+ /*
+ * If hardware is idle, then all descriptors on the running lists are
+ * done, start new transfers
+ */
+ if (chan->has_sg) {
+ head = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ tail = list_entry(desc->segments.prev,
+ struct xilinx_vdma_tx_segment, node);
+
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
+ }
+
+ /* Configure the hardware using info in the config structure */
+ reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
+
+ if (config->frm_cnt_en)
+ reg |= XILINX_VDMA_DMACR_FRAMECNT_EN;
+ else
+ reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
+
+ /*
+ * With SG, start with circular mode, so that BDs can be fetched.
+ * In direct register mode, if not parking, enable circular mode
+ */
+ if (chan->has_sg || !config->park)
+ reg |= XILINX_VDMA_DMACR_CIRC_EN;
+
+ if (config->park)
+ reg &= ~XILINX_VDMA_DMACR_CIRC_EN;
+
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg);
+
+ if (config->park && (config->park_frm >= 0) &&
+ (config->park_frm < chan->num_frms)) {
+ if (chan->direction == DMA_MEM_TO_DEV)
+ vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
+ config->park_frm <<
+ XILINX_VDMA_PARK_PTR_RD_REF_SHIFT);
+ else
+ vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
+ config->park_frm <<
+ XILINX_VDMA_PARK_PTR_WR_REF_SHIFT);
+ }
+
+ /* Start the hardware */
+ xilinx_vdma_start(chan);
+
+ if (chan->err)
+ goto out_unlock;
+
+ /* Start the transfer */
+ if (chan->has_sg) {
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
+ } else {
+ struct xilinx_vdma_tx_segment *segment, *last = NULL;
+ int i = 0;
+
+ list_for_each_entry(segment, &desc->segments, node) {
+ vdma_desc_write(chan,
+ XILINX_VDMA_REG_START_ADDRESS(i++),
+ segment->hw.buf_addr);
+ last = segment;
+ }
+
+ if (!last)
+ goto out_unlock;
+
+ /* HW expects these parameters to be same for one transaction */
+ vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
+ vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE,
+ last->hw.stride);
+ vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
+ }
+
+ list_del(&desc->node);
+ chan->active_desc = desc;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_vdma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_vdma_start_transfer(chan);
+}
+
+/**
+ * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete
+ * @chan : xilinx DMA channel
+ *
+ * CONTEXT: hardirq
+ */
+static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
+{
+ struct xilinx_vdma_tx_descriptor *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = chan->active_desc;
+ if (!desc) {
+ dev_dbg(chan->dev, "no running descriptors\n");
+ goto out_unlock;
+ }
+
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+
+ chan->active_desc = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_vdma_reset - Reset VDMA channel
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
+{
+ int loop = XILINX_VDMA_LOOP_COUNT;
+ u32 tmp;
+
+ vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
+
+ tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
+ XILINX_VDMA_DMACR_RESET;
+
+ /* Wait for the hardware to finish reset */
+ do {
+ tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
+ XILINX_VDMA_DMACR_RESET;
+ } while (loop-- && tmp);
+
+ if (!loop) {
+ dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
+ vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
+ vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+ return -ETIMEDOUT;
+ }
+
+ chan->err = false;
+
+ return 0;
+}
+
+/**
+ * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts
+ * @chan: Driver specific VDMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan)
+{
+ int err;
+
+ /* Reset VDMA */
+ err = xilinx_vdma_reset(chan);
+ if (err)
+ return err;
+
+ /* Enable interrupts */
+ vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR,
+ XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+
+ return 0;
+}
+
+/**
+ * xilinx_vdma_irq_handler - VDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx VDMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_vdma_chan *chan = data;
+ u32 status;
+
+ /* Read the status and ack the interrupts. */
+ status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR);
+ if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK))
+ return IRQ_NONE;
+
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
+ status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+
+ if (status & XILINX_VDMA_DMASR_ERR_IRQ) {
+ /*
+ * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
+ * error is recoverable, ignore it. Otherwise flag the error.
+ *
+ * Only recoverable errors can be cleared in the DMASR register,
+ * make sure not to write to other error bits to 1.
+ */
+ u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK;
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
+ errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK);
+
+ if (!chan->flush_on_fsync ||
+ (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) {
+ dev_err(chan->dev,
+ "Channel %p has errors %x, cdr %x tdr %x\n",
+ chan, errors,
+ vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC),
+ vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC));
+ chan->err = true;
+ }
+ }
+
+ if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) {
+ /*
+ * Device takes too long to do the transfer when user requires
+ * responsiveness.
+ */
+ dev_dbg(chan->dev, "Inter-packet latency too long\n");
+ }
+
+ if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+ xilinx_vdma_complete_descriptor(chan);
+ xilinx_vdma_start_transfer(chan);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_vdma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx);
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+ int err;
+
+ if (chan->err) {
+ /*
+ * If reset fails, need to hard reset the system.
+ * Channel is no longer functional
+ */
+ err = xilinx_vdma_chan_reset(chan);
+ if (err < 0)
+ return err;
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ /* Append the transaction to the pending transactions queue. */
+ list_add_tail(&desc->node, &chan->pending_list);
+
+ /* Free the allocated desc */
+ chan->allocated_desc = NULL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_vdma_tx_descriptor *desc;
+ struct xilinx_vdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_vdma_desc_hw *hw;
+
+ if (!is_slave_direction(xt->dir))
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_vdma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_vdma_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_vdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /* Fill in the hardware descriptor */
+ hw = &segment->hw;
+ hw->vsize = xt->numf;
+ hw->hsize = xt->sgl[0].size;
+ hw->stride = xt->sgl[0].icg <<
+ XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT;
+ hw->stride |= chan->config.frm_dly <<
+ XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
+
+ if (xt->dir != DMA_MEM_TO_DEV)
+ hw->buf_addr = xt->dst_start;
+ else
+ hw->buf_addr = xt->src_start;
+
+ /* Link the previous next descriptor to current */
+ prev = list_last_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ prev->hw.next_desc = segment->phys;
+
+ /* Insert the segment into the descriptor segments list. */
+ list_add_tail(&segment->node, &desc->segments);
+
+ prev = segment;
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ prev->hw.next_desc = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_vdma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_vdma_terminate_all - Halt the channel and free descriptors
+ * @chan: Driver specific VDMA Channel pointer
+ */
+static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan)
+{
+ /* Halt the DMA engine */
+ xilinx_vdma_halt(chan);
+
+ /* Remove and free all of the descriptors in the lists */
+ xilinx_vdma_free_descriptors(chan);
+}
+
+/**
+ * xilinx_vdma_channel_set_config - Configure VDMA channel
+ * Run-time configuration for Axi VDMA, supports:
+ * . halt the channel
+ * . configure interrupt coalescing and inter-packet delay threshold
+ * . start/stop parking
+ * . enable genlock
+ *
+ * @dchan: DMA channel
+ * @cfg: VDMA device configuration pointer
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
+ struct xilinx_vdma_config *cfg)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ u32 dmacr;
+
+ if (cfg->reset)
+ return xilinx_vdma_chan_reset(chan);
+
+ dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
+
+ chan->config.frm_dly = cfg->frm_dly;
+ chan->config.park = cfg->park;
+
+ /* genlock settings */
+ chan->config.gen_lock = cfg->gen_lock;
+ chan->config.master = cfg->master;
+
+ if (cfg->gen_lock && chan->genlock) {
+ dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN;
+ dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT;
+ }
+
+ chan->config.frm_cnt_en = cfg->frm_cnt_en;
+ if (cfg->park)
+ chan->config.park_frm = cfg->park_frm;
+ else
+ chan->config.park_frm = -1;
+
+ chan->config.coalesc = cfg->coalesc;
+ chan->config.delay = cfg->delay;
+
+ if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) {
+ dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT;
+ chan->config.coalesc = cfg->coalesc;
+ }
+
+ if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) {
+ dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT;
+ chan->config.delay = cfg->delay;
+ }
+
+ /* FSync Source selection */
+ dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK;
+ dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT;
+
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr);
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
+
+/**
+ * xilinx_vdma_device_control - Configure DMA channel of the device
+ * @dchan: DMA Channel pointer
+ * @cmd: DMA control command
+ * @arg: Channel configuration
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_device_control(struct dma_chan *dchan,
+ enum dma_ctrl_cmd cmd, unsigned long arg)
+{
+ struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ xilinx_vdma_terminate_all(chan);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+/**
+ * xilinx_vdma_chan_remove - Per Channel remove function
+ * @chan: Driver specific VDMA channel
+ */
+static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan)
+{
+ /* Disable all interrupts */
+ vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR,
+ XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+
+ if (chan->irq > 0)
+ free_irq(chan->irq, chan);
+
+ tasklet_kill(&chan->tasklet);
+
+ list_del(&chan->common.device_node);
+}
+
+/**
+ * xilinx_vdma_chan_probe - Per Channel Probing
+ * It get channel features from the device tree entry and
+ * initialize special channel handling routines
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
+ struct device_node *node)
+{
+ struct xilinx_vdma_chan *chan;
+ bool has_dre = false;
+ u32 value, width;
+ int err;
+
+ /* Allocate and initialize the channel structure */
+ chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->dev = xdev->dev;
+ chan->xdev = xdev;
+ chan->has_sg = xdev->has_sg;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ /* Retrieve the channel properties from the device tree */
+ has_dre = of_property_read_bool(node, "xlnx,include-dre");
+
+ chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
+
+ err = of_property_read_u32(node, "xlnx,datawidth", &value);
+ if (err) {
+ dev_err(xdev->dev, "missing xlnx,datawidth property\n");
+ return err;
+ }
+ width = value >> 3; /* Convert bits to bytes */
+
+ /* If data width is greater than 8 bytes, DRE is not in hw */
+ if (width > 8)
+ has_dre = false;
+
+ if (!has_dre)
+ xdev->common.copy_align = fls(width - 1);
+
+ if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) {
+ chan->direction = DMA_MEM_TO_DEV;
+ chan->id = 0;
+
+ chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET;
+ chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+
+ if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S)
+ chan->flush_on_fsync = true;
+ } else if (of_device_is_compatible(node,
+ "xlnx,axi-vdma-s2mm-channel")) {
+ chan->direction = DMA_DEV_TO_MEM;
+ chan->id = 1;
+
+ chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET;
+ chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+
+ if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM)
+ chan->flush_on_fsync = true;
+ } else {
+ dev_err(xdev->dev, "Invalid channel compatible node\n");
+ return -EINVAL;
+ }
+
+ /* Request the interrupt */
+ chan->irq = irq_of_parse_and_map(node, 0);
+ err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED,
+ "xilinx-vdma-controller", chan);
+ if (err) {
+ dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
+ return err;
+ }
+
+ /* Initialize the tasklet */
+ tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet,
+ (unsigned long)chan);
+
+ /*
+ * Initialize the DMA channel and add it to the DMA engine channels
+ * list.
+ */
+ chan->common.device = &xdev->common;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+ xdev->chan[chan->id] = chan;
+
+ /* Reset the channel */
+ err = xilinx_vdma_chan_reset(chan);
+ if (err < 0) {
+ dev_err(xdev->dev, "Reset channel failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * of_dma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
+ int chan_id = dma_spec->args[0];
+
+ if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+ return NULL;
+
+ return dma_get_slave_channel(&xdev->chan[chan_id]->common);
+}
+
+/**
+ * xilinx_vdma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_vdma_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct xilinx_vdma_device *xdev;
+ struct device_node *child;
+ struct resource *io;
+ u32 num_frames;
+ int i, err;
+
+ /* Allocate and initialize the DMA engine structure */
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+
+ /* Request and map I/O memory */
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(xdev->regs))
+ return PTR_ERR(xdev->regs);
+
+ /* Retrieve the DMA engine properties from the device tree */
+ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
+
+ err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames);
+ if (err < 0) {
+ dev_err(xdev->dev, "missing xlnx,num-fstores property\n");
+ return err;
+ }
+
+ err = of_property_read_u32(node, "xlnx,flush-fsync",
+ &xdev->flush_on_fsync);
+ if (err < 0)
+ dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n");
+
+ /* Initialize the DMA engine */
+ xdev->common.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+
+ xdev->common.device_alloc_chan_resources =
+ xilinx_vdma_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xilinx_vdma_free_chan_resources;
+ xdev->common.device_prep_interleaved_dma =
+ xilinx_vdma_dma_prep_interleaved;
+ xdev->common.device_control = xilinx_vdma_device_control;
+ xdev->common.device_tx_status = xilinx_vdma_tx_status;
+ xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
+
+ platform_set_drvdata(pdev, xdev);
+
+ /* Initialize the channels */
+ for_each_child_of_node(node, child) {
+ err = xilinx_vdma_chan_probe(xdev, child);
+ if (err < 0)
+ goto error;
+ }
+
+ for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+ if (xdev->chan[i])
+ xdev->chan[i]->num_frms = num_frames;
+
+ /* Register the DMA engine with the core */
+ dma_async_device_register(&xdev->common);
+
+ err = of_dma_controller_register(node, of_dma_xilinx_xlate,
+ xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+ dma_async_device_unregister(&xdev->common);
+ goto error;
+ }
+
+ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
+
+ return 0;
+
+error:
+ for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+ if (xdev->chan[i])
+ xilinx_vdma_chan_remove(xdev->chan[i]);
+
+ return err;
+}
+
+/**
+ * xilinx_vdma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int xilinx_vdma_remove(struct platform_device *pdev)
+{
+ struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev);
+ int i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+
+ dma_async_device_unregister(&xdev->common);
+
+ for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+ if (xdev->chan[i])
+ xilinx_vdma_chan_remove(xdev->chan[i]);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_vdma_of_ids[] = {
+ { .compatible = "xlnx,axi-vdma-1.00.a",},
+ {}
+};
+
+static struct platform_driver xilinx_vdma_driver = {
+ .driver = {
+ .name = "xilinx-vdma",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_vdma_of_ids,
+ },
+ .probe = xilinx_vdma_probe,
+ .remove = xilinx_vdma_remove,
+};
+
+module_platform_driver(xilinx_vdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx VDMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 4b9dc836dcf9..e992abc5ef26 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -40,7 +40,7 @@ struct pstore_read_data {
static inline u64 generic_id(unsigned long timestamp,
unsigned int part, int count)
{
- return (timestamp * 100 + part) * 1000 + count;
+ return ((u64) timestamp * 100 + part) * 1000 + count;
}
static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index cd36deb619fa..eff1a2f22f09 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -353,10 +353,10 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
int depth, void *data)
{
struct param_info *info = data;
- void *prop, *dest;
- unsigned long len;
+ const void *prop;
+ void *dest;
u64 val;
- int i;
+ int i, len;
if (depth != 1 ||
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c
index 5c6a8e8a9580..82d774161cc9 100644
--- a/drivers/firmware/efi/fdt.c
+++ b/drivers/firmware/efi/fdt.c
@@ -63,7 +63,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
*/
prev = 0;
for (;;) {
- const char *type, *name;
+ const char *type;
int len;
node = fdt_next_node(fdt, prev, NULL);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d9c9cb4665db..2ebc9071e354 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2614,7 +2614,7 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
&of_flags);
- if (!IS_ERR(desc))
+ if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
break;
}
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index d8a22c2a579d..70da9eb52a42 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1,2 +1,3 @@
obj-y += drm/ vga/
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
+obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d1cc2f613a78..f5120046ff80 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -83,6 +83,8 @@ config DRM_KMS_CMA_HELPER
source "drivers/gpu/drm/i2c/Kconfig"
+source "drivers/gpu/drm/bridge/Kconfig"
+
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@@ -199,5 +201,3 @@ source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
-
-source "drivers/gpu/drm/bridge/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 48e38ba22783..dd2ba4269740 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
- drm_plane_helper.o
+ drm_modeset_lock.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,7 +23,8 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+ drm_plane_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 32982da82694..8ab3cd1a8cdb 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -173,7 +173,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_kms;
- ret = drm_irq_install(dev);
+ ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
if (ret)
goto err_kms;
@@ -402,7 +402,7 @@ static struct platform_driver armada_drm_platform_driver = {
static int __init armada_drm_init(void)
{
- armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
+ armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
return platform_driver_register(&armada_drm_platform_driver);
}
module_init(armada_drm_init);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 948cb14c561e..fd166f532ab9 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -181,10 +181,8 @@ void armada_fbdev_lastclose(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
- drm_modeset_lock_all(dev);
if (priv->fbdev)
- drm_fb_helper_restore_fbdev_mode(priv->fbdev);
- drm_modeset_unlock_all(dev);
+ drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
}
void armada_fbdev_fini(struct drm_device *dev)
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 887816f43476..bb9b642d8485 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -433,7 +433,6 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
if (dobj->obj.filp) {
struct address_space *mapping;
- gfp_t gfp;
int count;
count = dobj->obj.size / PAGE_SIZE;
@@ -441,12 +440,11 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
goto free_sgt;
mapping = file_inode(dobj->obj.filp)->i_mapping;
- gfp = mapping_gfp_mask(mapping);
for_each_sg(sgt->sgl, sg, count, i) {
struct page *page;
- page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
num = i;
goto release;
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 8df4f284ee24..171aa0622b66 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
-ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o ast_dp501.o
-obj-$(CONFIG_DRM_AST) := ast.o \ No newline at end of file
+obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
new file mode 100644
index 000000000000..5da4b62285fa
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -0,0 +1,410 @@
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "ast_drv.h"
+MODULE_FIRMWARE("ast_dp501_fw.bin");
+
+int ast_load_dp501_microcode(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ static char *fw_name = "ast_dp501_fw.bin";
+ int err;
+ err = request_firmware(&ast->dp501_fw, fw_name, dev->dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void send_ack(struct ast_private *ast)
+{
+ u8 sendack;
+ sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
+ sendack |= 0x80;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
+}
+
+static void send_nack(struct ast_private *ast)
+{
+ u8 sendack;
+ sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
+ sendack &= ~0x80;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
+}
+
+static bool wait_ack(struct ast_private *ast)
+{
+ u8 waitack;
+ u32 retry = 0;
+ do {
+ waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
+ waitack &= 0x80;
+ udelay(100);
+ } while ((!waitack) && (retry++ < 1000));
+
+ if (retry < 1000)
+ return true;
+ else
+ return false;
+}
+
+static bool wait_nack(struct ast_private *ast)
+{
+ u8 waitack;
+ u32 retry = 0;
+ do {
+ waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
+ waitack &= 0x80;
+ udelay(100);
+ } while ((waitack) && (retry++ < 1000));
+
+ if (retry < 1000)
+ return true;
+ else
+ return false;
+}
+
+static void set_cmd_trigger(struct ast_private *ast)
+{
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
+}
+
+static void clear_cmd_trigger(struct ast_private *ast)
+{
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
+}
+
+#if 0
+static bool wait_fw_ready(struct ast_private *ast)
+{
+ u8 waitready;
+ u32 retry = 0;
+ do {
+ waitready = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
+ waitready &= 0x40;
+ udelay(100);
+ } while ((!waitready) && (retry++ < 1000));
+
+ if (retry < 1000)
+ return true;
+ else
+ return false;
+}
+#endif
+
+static bool ast_write_cmd(struct drm_device *dev, u8 data)
+{
+ struct ast_private *ast = dev->dev_private;
+ int retry = 0;
+ if (wait_nack(ast)) {
+ send_nack(ast);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
+ send_ack(ast);
+ set_cmd_trigger(ast);
+ do {
+ if (wait_ack(ast)) {
+ clear_cmd_trigger(ast);
+ send_nack(ast);
+ return true;
+ }
+ } while (retry++ < 100);
+ }
+ clear_cmd_trigger(ast);
+ send_nack(ast);
+ return false;
+}
+
+static bool ast_write_data(struct drm_device *dev, u8 data)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (wait_nack(ast)) {
+ send_nack(ast);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
+ send_ack(ast);
+ if (wait_ack(ast)) {
+ send_nack(ast);
+ return true;
+ }
+ }
+ send_nack(ast);
+ return false;
+}
+
+#if 0
+static bool ast_read_data(struct drm_device *dev, u8 *data)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 tmp;
+
+ *data = 0;
+
+ if (wait_ack(ast) == false)
+ return false;
+ tmp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd3, 0xff);
+ *data = tmp;
+ if (wait_nack(ast) == false) {
+ send_nack(ast);
+ return false;
+ }
+ send_nack(ast);
+ return true;
+}
+
+static void clear_cmd(struct ast_private *ast)
+{
+ send_nack(ast);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
+}
+#endif
+
+void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
+{
+ ast_write_cmd(dev, 0x40);
+ ast_write_data(dev, mode);
+
+ msleep(10);
+}
+
+static u32 get_fw_base(struct ast_private *ast)
+{
+ return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
+}
+
+bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
+{
+ struct ast_private *ast = dev->dev_private;
+ u32 i, data;
+ u32 boot_address;
+
+ data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
+ if (data) {
+ boot_address = get_fw_base(ast);
+ for (i = 0; i < size; i += 4)
+ *(u32 *)(addr + i) = ast_mindwm(ast, boot_address + i);
+ return true;
+ }
+ return false;
+}
+
+bool ast_launch_m68k(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u32 i, data, len = 0;
+ u32 boot_address;
+ u8 *fw_addr = NULL;
+ u8 jreg;
+
+ data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
+ if (!data) {
+
+ if (ast->dp501_fw_addr) {
+ fw_addr = ast->dp501_fw_addr;
+ len = 32*1024;
+ } else if (ast->dp501_fw) {
+ fw_addr = (u8 *)ast->dp501_fw->data;
+ len = ast->dp501_fw->size;
+ }
+ /* Get BootAddress */
+ ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
+ data = ast_mindwm(ast, 0x1e6e0004);
+ switch (data & 0x03) {
+ case 0:
+ boot_address = 0x44000000;
+ break;
+ default:
+ case 1:
+ boot_address = 0x48000000;
+ break;
+ case 2:
+ boot_address = 0x50000000;
+ break;
+ case 3:
+ boot_address = 0x60000000;
+ break;
+ }
+ boot_address -= 0x200000; /* -2MB */
+
+ /* copy image to buffer */
+ for (i = 0; i < len; i += 4) {
+ data = *(u32 *)(fw_addr + i);
+ ast_moutdwm(ast, boot_address + i, data);
+ }
+
+ /* Init SCU */
+ ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
+
+ /* Launch FW */
+ ast_moutdwm(ast, 0x1e6e2104, 0x80000000 + boot_address);
+ ast_moutdwm(ast, 0x1e6e2100, 1);
+
+ /* Update Scratch */
+ data = ast_mindwm(ast, 0x1e6e2040) & 0xfffff1ff; /* D[11:9] = 100b: UEFI handling */
+ data |= 0x800;
+ ast_moutdwm(ast, 0x1e6e2040, data);
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xfc); /* D[1:0]: Reserved Video Buffer */
+ jreg |= 0x02;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg);
+ }
+ return true;
+}
+
+u8 ast_get_dp501_max_clk(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u32 boot_address, offset, data;
+ u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
+
+ boot_address = get_fw_base(ast);
+
+ /* validate FW version */
+ offset = 0xf000;
+ data = ast_mindwm(ast, boot_address + offset);
+ if ((data & 0xf0) != 0x10) /* version: 1x */
+ return maxclk;
+
+ /* Read Link Capability */
+ offset = 0xf014;
+ *(u32 *)linkcap = ast_mindwm(ast, boot_address + offset);
+ if (linkcap[2] == 0) {
+ linkrate = linkcap[0];
+ linklanes = linkcap[1];
+ data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
+ if (data > 0xff)
+ data = 0xff;
+ maxclk = (u8)data;
+ }
+ return maxclk;
+}
+
+bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+{
+ struct ast_private *ast = dev->dev_private;
+ u32 i, boot_address, offset, data;
+
+ boot_address = get_fw_base(ast);
+
+ /* validate FW version */
+ offset = 0xf000;
+ data = ast_mindwm(ast, boot_address + offset);
+ if ((data & 0xf0) != 0x10)
+ return false;
+
+ /* validate PnP Monitor */
+ offset = 0xf010;
+ data = ast_mindwm(ast, boot_address + offset);
+ if (!(data & 0x01))
+ return false;
+
+ /* Read EDID */
+ offset = 0xf020;
+ for (i = 0; i < 128; i += 4) {
+ data = ast_mindwm(ast, boot_address + offset + i);
+ *(u32 *)(ediddata + i) = data;
+ }
+
+ return true;
+}
+
+static bool ast_init_dvo(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+ u32 data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if (!(jreg & 0x80)) {
+ /* Init SCU DVO Settings */
+ data = ast_read32(ast, 0x12008);
+ /* delay phase */
+ data &= 0xfffff8ff;
+ data |= 0x00000500;
+ ast_write32(ast, 0x12008, data);
+
+ if (ast->chip == AST2300) {
+ data = ast_read32(ast, 0x12084);
+ /* multi-pins for DVO single-edge */
+ data |= 0xfffe0000;
+ ast_write32(ast, 0x12084, data);
+
+ data = ast_read32(ast, 0x12088);
+ /* multi-pins for DVO single-edge */
+ data |= 0x000fffff;
+ ast_write32(ast, 0x12088, data);
+
+ data = ast_read32(ast, 0x12090);
+ /* multi-pins for DVO single-edge */
+ data &= 0xffffffcf;
+ data |= 0x00000020;
+ ast_write32(ast, 0x12090, data);
+ } else { /* AST2400 */
+ data = ast_read32(ast, 0x12088);
+ /* multi-pins for DVO single-edge */
+ data |= 0x30000000;
+ ast_write32(ast, 0x12088, data);
+
+ data = ast_read32(ast, 0x1208c);
+ /* multi-pins for DVO single-edge */
+ data |= 0x000000cf;
+ ast_write32(ast, 0x1208c, data);
+
+ data = ast_read32(ast, 0x120a4);
+ /* multi-pins for DVO single-edge */
+ data |= 0xffff0000;
+ ast_write32(ast, 0x120a4, data);
+
+ data = ast_read32(ast, 0x120a8);
+ /* multi-pins for DVO single-edge */
+ data |= 0x0000000f;
+ ast_write32(ast, 0x120a8, data);
+
+ data = ast_read32(ast, 0x12094);
+ /* multi-pins for DVO single-edge */
+ data |= 0x00000002;
+ ast_write32(ast, 0x12094, data);
+ }
+ }
+
+ /* Force to DVO */
+ data = ast_read32(ast, 0x1202c);
+ data &= 0xfffbffff;
+ ast_write32(ast, 0x1202c, data);
+
+ /* Init VGA DVO Settings */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);
+ return true;
+}
+
+void ast_init_3rdtx(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+ u32 data;
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ switch (jreg & 0x0e) {
+ case 0x04:
+ ast_init_dvo(dev);
+ break;
+ case 0x08:
+ ast_launch_m68k(dev);
+ break;
+ case 0x0c:
+ ast_init_dvo(dev);
+ break;
+ default:
+ if (ast->tx_chip_type == AST_TX_SIL164)
+ ast_init_dvo(dev);
+ else {
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ data = ast_read32(ast, 0x1202c);
+ data &= 0xfffcffff;
+ ast_write32(ast, 0, data);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 5137f15dba19..44074fbcf7ff 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -94,9 +94,7 @@ static int ast_drm_thaw(struct drm_device *dev)
ast_post_gpu(dev);
drm_mode_config_reset(dev);
- drm_modeset_lock_all(dev);
drm_helper_resume_force_mode(dev);
- drm_modeset_unlock_all(dev);
console_lock();
ast_fbdev_set_suspend(dev, 0);
@@ -198,7 +196,6 @@ static const struct file_operations ast_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
- .dev_priv_size = 0,
.load = ast_driver_load,
.unload = ast_driver_unload,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 9833a1b1acc1..5d6a87573c33 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -61,9 +61,17 @@ enum ast_chip {
AST2200,
AST2150,
AST2300,
+ AST2400,
AST1180,
};
+enum ast_tx_chip {
+ AST_TX_NONE,
+ AST_TX_SIL164,
+ AST_TX_ITE66121,
+ AST_TX_DP501,
+};
+
#define AST_DRAM_512Mx16 0
#define AST_DRAM_1Gx16 1
#define AST_DRAM_512Mx32 2
@@ -102,6 +110,12 @@ struct ast_private {
* we have. */
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
+ bool support_wide_screen;
+
+ enum ast_tx_chip tx_chip_type;
+ u8 dp501_maxclk;
+ u8 *dp501_fw_addr;
+ const struct firmware *dp501_fw; /* dp501 fw */
};
int ast_driver_load(struct drm_device *dev, unsigned long flags);
@@ -368,4 +382,14 @@ int ast_mmap(struct file *filp, struct vm_area_struct *vma);
/* ast post */
void ast_post_gpu(struct drm_device *dev);
+u32 ast_mindwm(struct ast_private *ast, u32 r);
+void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
+/* ast dp501 */
+int ast_load_dp501_microcode(struct drm_device *dev);
+void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
+bool ast_launch_m68k(struct drm_device *dev);
+bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
+bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
+u8 ast_get_dp501_max_clk(struct drm_device *dev);
+void ast_init_3rdtx(struct drm_device *dev);
#endif
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 50535fd5a88d..a2cc6be97983 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -66,12 +66,16 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static int ast_detect_chip(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
+ uint32_t data, jreg;
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
DRM_INFO("AST 1180 detected\n");
} else {
- if (dev->pdev->revision >= 0x20) {
+ if (dev->pdev->revision >= 0x30) {
+ ast->chip = AST2400;
+ DRM_INFO("AST 2400 detected\n");
+ } else if (dev->pdev->revision >= 0x20) {
ast->chip = AST2300;
DRM_INFO("AST 2300 detected\n");
} else if (dev->pdev->revision >= 0x10) {
@@ -104,6 +108,59 @@ static int ast_detect_chip(struct drm_device *dev)
DRM_INFO("AST 2000 detected\n");
}
}
+
+ switch (ast->chip) {
+ case AST1180:
+ ast->support_wide_screen = true;
+ break;
+ case AST2000:
+ ast->support_wide_screen = false;
+ break;
+ default:
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if (!(jreg & 0x80))
+ ast->support_wide_screen = true;
+ else if (jreg & 0x01)
+ ast->support_wide_screen = true;
+ else {
+ ast->support_wide_screen = false;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x1207c);
+ data &= 0x300;
+ if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+ ast->support_wide_screen = true;
+ if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+ ast->support_wide_screen = true;
+ }
+ break;
+ }
+
+ ast->tx_chip_type = AST_TX_NONE;
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
+ if (jreg & 0x80)
+ ast->tx_chip_type = AST_TX_SIL164;
+ if ((ast->chip == AST2300) || (ast->chip == AST2400)) {
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ switch (jreg) {
+ case 0x04:
+ ast->tx_chip_type = AST_TX_SIL164;
+ break;
+ case 0x08:
+ ast->dp501_fw_addr = kzalloc(32*1024, GFP_KERNEL);
+ if (ast->dp501_fw_addr) {
+ /* backup firmware */
+ if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) {
+ kfree(ast->dp501_fw_addr);
+ ast->dp501_fw_addr = NULL;
+ }
+ }
+ /* fallthrough */
+ case 0x0c:
+ ast->tx_chip_type = AST_TX_DP501;
+ }
+ }
+
return 0;
}
@@ -129,7 +186,7 @@ static int ast_get_dram_info(struct drm_device *dev)
else
ast->dram_bus_width = 32;
- if (ast->chip == AST2300) {
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
switch (data & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
@@ -257,17 +314,32 @@ static u32 ast_get_vram_info(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u8 jreg;
-
+ u32 vram_size;
ast_open_key(ast);
+ vram_size = AST_VIDMEM_DEFAULT_SIZE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
switch (jreg & 3) {
- case 0: return AST_VIDMEM_SIZE_8M;
- case 1: return AST_VIDMEM_SIZE_16M;
- case 2: return AST_VIDMEM_SIZE_32M;
- case 3: return AST_VIDMEM_SIZE_64M;
+ case 0: vram_size = AST_VIDMEM_SIZE_8M; break;
+ case 1: vram_size = AST_VIDMEM_SIZE_16M; break;
+ case 2: vram_size = AST_VIDMEM_SIZE_32M; break;
+ case 3: vram_size = AST_VIDMEM_SIZE_64M; break;
+ }
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff);
+ switch (jreg & 0x03) {
+ case 1:
+ vram_size -= 0x100000;
+ break;
+ case 2:
+ vram_size -= 0x200000;
+ break;
+ case 3:
+ vram_size -= 0x400000;
+ break;
}
- return AST_VIDMEM_DEFAULT_SIZE;
+
+ return vram_size;
}
int ast_driver_load(struct drm_device *dev, unsigned long flags)
@@ -316,6 +388,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
if (ast->chip == AST2100 ||
ast->chip == AST2200 ||
ast->chip == AST2300 ||
+ ast->chip == AST2400 ||
ast->chip == AST1180) {
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 2048;
@@ -343,6 +416,7 @@ int ast_driver_unload(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
+ kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
ast_fbdev_fini(dev);
drm_mode_config_cleanup(dev);
@@ -411,16 +485,13 @@ static void ast_bo_unref(struct ast_bo **bo)
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
-
+ *bo = NULL;
}
+
void ast_gem_free_object(struct drm_gem_object *obj)
{
struct ast_bo *ast_bo = gem_to_ast_bo(obj);
- if (!ast_bo)
- return;
ast_bo_unref(&ast_bo);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a4afdc8bb578..114aee941d46 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -115,11 +115,17 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
else
vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
break;
+ case 1360:
+ vbios_mode->enh_table = &res_1360x768[refresh_rate_index];
+ break;
case 1440:
vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
break;
case 1600:
- vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
+ if (crtc->mode.crtc_vdisplay == 900)
+ vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
break;
case 1680:
vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
@@ -175,14 +181,17 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
+ if (vbios_mode->enh_table->flags & NewModeInfo) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+ }
}
return true;
@@ -389,7 +398,7 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
/* Set Threshold */
- if (ast->chip == AST2300) {
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
} else if (ast->chip == AST2100 ||
@@ -451,9 +460,13 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+ if (ast->tx_chip_type == AST_TX_DP501)
+ ast_set_dp501_video_output(crtc->dev, 1);
ast_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_OFF:
+ if (ast->tx_chip_type == AST_TX_DP501)
+ ast_set_dp501_video_output(crtc->dev, 0);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
break;
}
@@ -729,10 +742,24 @@ static int ast_encoder_init(struct drm_device *dev)
static int ast_get_modes(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct ast_private *ast = connector->dev->dev_private;
struct edid *edid;
int ret;
-
- edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
+ bool flags = false;
+ if (ast->tx_chip_type == AST_TX_DP501) {
+ ast->dp501_maxclk = 0xff;
+ edid = kmalloc(128, GFP_KERNEL);
+ if (!edid)
+ return -ENOMEM;
+
+ flags = ast_dp501_read_edid(connector->dev, (u8 *)edid);
+ if (flags)
+ ast->dp501_maxclk = ast_get_dp501_max_clk(connector->dev);
+ else
+ kfree(edid);
+ }
+ if (!flags)
+ edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
if (edid) {
drm_mode_connector_update_edid_property(&ast_connector->base, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -746,7 +773,56 @@ static int ast_get_modes(struct drm_connector *connector)
static int ast_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- return MODE_OK;
+ struct ast_private *ast = connector->dev->dev_private;
+ int flags = MODE_NOMODE;
+ uint32_t jtemp;
+
+ if (ast->support_wide_screen) {
+ if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
+ return MODE_OK;
+ if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
+ return MODE_OK;
+ if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
+ return MODE_OK;
+ if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
+ return MODE_OK;
+ if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
+ return MODE_OK;
+
+ if ((ast->chip == AST2100) || (ast->chip == AST2200) || (ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST1180)) {
+ if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
+ return MODE_OK;
+
+ if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ if (jtemp & 0x01)
+ return MODE_NOMODE;
+ else
+ return MODE_OK;
+ }
+ }
+ }
+ switch (mode->hdisplay) {
+ case 640:
+ if (mode->vdisplay == 480) flags = MODE_OK;
+ break;
+ case 800:
+ if (mode->vdisplay == 600) flags = MODE_OK;
+ break;
+ case 1024:
+ if (mode->vdisplay == 768) flags = MODE_OK;
+ break;
+ case 1280:
+ if (mode->vdisplay == 1024) flags = MODE_OK;
+ break;
+ case 1600:
+ if (mode->vdisplay == 1200) flags = MODE_OK;
+ break;
+ default:
+ return flags;
+ }
+
+ return flags;
}
static void ast_connector_destroy(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 635f6ffc27c2..38d437f3a267 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -78,7 +78,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
for (i = 0x81; i <= 0x8f; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
- if (ast->chip == AST2300) {
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
if (dev->pdev->revision >= 0x20)
ext_reg_info = extreginfo_ast2300;
else
@@ -102,23 +102,32 @@ ast_set_def_ext_reg(struct drm_device *dev)
/* Enable RAMDAC for A1 */
reg = 0x04;
- if (ast->chip == AST2300)
+ if (ast->chip == AST2300 || ast->chip == AST2400)
reg |= 0x20;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
}
-static inline u32 mindwm(struct ast_private *ast, u32 r)
+u32 ast_mindwm(struct ast_private *ast, u32 r)
{
+ uint32_t data;
+
ast_write32(ast, 0xf004, r & 0xffff0000);
ast_write32(ast, 0xf000, 0x1);
+ do {
+ data = ast_read32(ast, 0xf004) & 0xffff0000;
+ } while (data != (r & 0xffff0000));
return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
}
-static inline void moutdwm(struct ast_private *ast, u32 r, u32 v)
+void ast_moutdwm(struct ast_private *ast, u32 r, u32 v)
{
+ uint32_t data;
ast_write32(ast, 0xf004, r & 0xffff0000);
ast_write32(ast, 0xf000, 0x1);
+ do {
+ data = ast_read32(ast, 0xf004) & 0xffff0000;
+ } while (data != (r & 0xffff0000));
ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
}
@@ -154,28 +163,28 @@ static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
- moutdwm(ast, 0x1e6e0070, 0x00000000);
- moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
timeout = 0;
do {
- data = mindwm(ast, 0x1e6e0070) & 0x40;
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
if (++timeout > TIMEOUT_AST2150) {
- moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0xffffffff;
}
} while (!data);
- moutdwm(ast, 0x1e6e0070, 0x00000000);
- moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
timeout = 0;
do {
- data = mindwm(ast, 0x1e6e0070) & 0x40;
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
if (++timeout > TIMEOUT_AST2150) {
- moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0xffffffff;
}
} while (!data);
- data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- moutdwm(ast, 0x1e6e0070, 0x00000000);
+ data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return data;
}
@@ -184,18 +193,18 @@ static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
- moutdwm(ast, 0x1e6e0070, 0x00000000);
- moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
timeout = 0;
do {
- data = mindwm(ast, 0x1e6e0070) & 0x40;
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x40;
if (++timeout > TIMEOUT_AST2150) {
- moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0xffffffff;
}
} while (!data);
- data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
- moutdwm(ast, 0x1e6e0070, 0x00000000);
+ data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return data;
}
#endif
@@ -215,7 +224,7 @@ static int cbrscan_ast2150(struct ast_private *ast, int busw)
u32 patcnt, loop;
for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
- moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
if (cbrtest_ast2150(ast))
break;
@@ -237,7 +246,7 @@ cbr_start:
passcnt = 0;
for (dlli = 0; dlli < 100; dlli++) {
- moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
data = cbrscan_ast2150(ast, busw);
if (data != 0) {
if (data & 0x1) {
@@ -254,7 +263,7 @@ cbr_start:
goto cbr_start;
dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
- moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
}
@@ -365,10 +374,12 @@ void ast_post_gpu(struct drm_device *dev)
ast_open_key(ast);
ast_set_def_ext_reg(dev);
- if (ast->chip == AST2300)
+ if (ast->chip == AST2300 || ast->chip == AST2400)
ast_init_dram_2300(dev);
else
ast_init_dram_reg(dev);
+
+ ast_init_3rdtx(dev);
}
/* AST 2300 DRAM settings */
@@ -403,6 +414,7 @@ struct ast2300_dram_param {
/*
* DQSI DLL CBR Setting
*/
+#define CBR_SIZE0 ((1 << 10) - 1)
#define CBR_SIZE1 ((4 << 10) - 1)
#define CBR_SIZE2 ((64 << 10) - 1)
#define CBR_PASSNUM 5
@@ -423,88 +435,84 @@ static const u32 pattern[8] = {
0x7C61D253
};
-#if 0 /* unused in DDX, included for completeness */
static int mmc_test_burst(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
- moutdwm(ast, 0x1e6e0070, 0x00000000);
- moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
timeout = 0;
do {
- data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x3000;
if (data & 0x2000) {
return 0;
}
if (++timeout > TIMEOUT) {
- moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return 0;
}
} while (!data);
- moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
return 1;
}
-#endif
static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
- moutdwm(ast, 0x1e6e0070, 0x00000000);
- moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
timeout = 0;
do {
- data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
if (++timeout > TIMEOUT) {
- moutdwm(ast, 0x1e6e0070, 0x0);
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
return -1;
}
} while (!data);
- data = mindwm(ast, 0x1e6e0078);
+ data = ast_mindwm(ast, 0x1e6e0078);
data = (data | (data >> 16)) & 0xffff;
- moutdwm(ast, 0x1e6e0070, 0x0);
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
return data;
}
-#if 0 /* Unused in DDX here for completeness */
static int mmc_test_single(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
- moutdwm(ast, 0x1e6e0070, 0x00000000);
- moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
timeout = 0;
do {
- data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x3000;
if (data & 0x2000)
return 0;
if (++timeout > TIMEOUT) {
- moutdwm(ast, 0x1e6e0070, 0x0);
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
return 0;
}
} while (!data);
- moutdwm(ast, 0x1e6e0070, 0x0);
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
return 1;
}
-#endif
static int mmc_test_single2(struct ast_private *ast, u32 datagen)
{
u32 data, timeout;
- moutdwm(ast, 0x1e6e0070, 0x00000000);
- moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000000);
+ ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
timeout = 0;
do {
- data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ data = ast_mindwm(ast, 0x1e6e0070) & 0x1000;
if (++timeout > TIMEOUT) {
- moutdwm(ast, 0x1e6e0070, 0x0);
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
return -1;
}
} while (!data);
- data = mindwm(ast, 0x1e6e0078);
+ data = ast_mindwm(ast, 0x1e6e0078);
data = (data | (data >> 16)) & 0xffff;
- moutdwm(ast, 0x1e6e0070, 0x0);
+ ast_moutdwm(ast, 0x1e6e0070, 0x0);
return data;
}
@@ -533,7 +541,7 @@ static int cbr_scan(struct ast_private *ast)
data2 = 3;
for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
for (loop = 0; loop < CBR_PASSNUM2; loop++) {
if ((data = cbr_test(ast)) != 0) {
data2 &= data;
@@ -568,7 +576,7 @@ static u32 cbr_scan2(struct ast_private *ast)
data2 = 0xffff;
for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
- moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
for (loop = 0; loop < CBR_PASSNUM2; loop++) {
if ((data = cbr_test2(ast)) != 0) {
data2 &= data;
@@ -583,106 +591,35 @@ static u32 cbr_scan2(struct ast_private *ast)
return data2;
}
-#if 0 /* unused in DDX - added for completeness */
-static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
+static u32 cbr_test3(struct ast_private *ast)
{
- u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
-
- gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff;
- gold_sadj[1] = gold_sadj[0] >> 8;
- gold_sadj[0] = gold_sadj[0] & 0xff;
- gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1;
- gold_sadj[1] = gold_sadj[0];
-
- for (cnt = 0; cnt < 16; cnt++) {
- dllmin[cnt] = 0xff;
- dllmax[cnt] = 0x0;
- }
- passcnt = 0;
- for (dlli = 0; dlli < 76; dlli++) {
- moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
- /* Wait DQSI latch phase calibration */
- moutdwm(ast, 0x1E6E0074, 0x00000010);
- moutdwm(ast, 0x1E6E0070, 0x00000003);
- do {
- data = mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- moutdwm(ast, 0x1E6E0070, 0x00000000);
+ if (!mmc_test_burst(ast, 0))
+ return 0;
+ if (!mmc_test_single(ast, 0))
+ return 0;
+ return 1;
+}
- moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
- data = cbr_scan2(ast);
- if (data != 0) {
- mask = 0x00010001;
- for (cnt = 0; cnt < 16; cnt++) {
- if (data & mask) {
- if (dllmin[cnt] > dlli) {
- dllmin[cnt] = dlli;
- }
- if (dllmax[cnt] < dlli) {
- dllmax[cnt] = dlli;
- }
- }
- mask <<= 1;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD) {
- break;
- }
- }
- data = 0;
- for (cnt = 0; cnt < 8; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
- dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
- if (gold_sadj[0] >= dlli) {
- dlli = (gold_sadj[0] - dlli) >> 1;
- if (dlli > 3) {
- dlli = 3;
- }
- } else {
- dlli = (dlli - gold_sadj[0]) >> 1;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
- }
- }
- moutdwm(ast, 0x1E6E0080, data);
+static u32 cbr_scan3(struct ast_private *ast)
+{
+ u32 patcnt, loop;
- data = 0;
- for (cnt = 8; cnt < 16; cnt++) {
- data >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
- dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
- if (gold_sadj[1] >= dlli) {
- dlli = (gold_sadj[1] - dlli) >> 1;
- if (dlli > 3) {
- dlli = 3;
- } else {
- dlli = (dlli - 1) & 0x7;
- }
- } else {
- dlli = (dlli - gold_sadj[1]) >> 1;
- dlli += 1;
- if (dlli > 4) {
- dlli = 4;
- }
- dlli = (8 - dlli) & 0x7;
- }
- data |= dlli << 21;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < 2; loop++) {
+ if (cbr_test3(ast))
+ break;
}
+ if (loop == 2)
+ return 0;
}
- moutdwm(ast, 0x1E6E0084, data);
-
-} /* finetuneDQI */
-#endif
+ return 1;
+}
-static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
{
- u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
-
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
+ bool status = false;
FINETUNE_START:
for (cnt = 0; cnt < 16; cnt++) {
dllmin[cnt] = 0xff;
@@ -690,16 +627,8 @@ FINETUNE_START:
}
passcnt = 0;
for (dlli = 0; dlli < 76; dlli++) {
- moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
- /* Wait DQSI latch phase calibration */
- moutdwm(ast, 0x1E6E0074, 0x00000010);
- moutdwm(ast, 0x1E6E0070, 0x00000003);
- do {
- data = mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- moutdwm(ast, 0x1E6E0070, 0x00000000);
-
- moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
data = cbr_scan2(ast);
if (data != 0) {
mask = 0x00010001;
@@ -727,9 +656,13 @@ FINETUNE_START:
passcnt++;
}
}
+ if (retry++ > 10)
+ goto FINETUNE_DONE;
if (passcnt != 16) {
goto FINETUNE_START;
}
+ status = true;
+FINETUNE_DONE:
gold_sadj[0] = gold_sadj[0] >> 4;
gold_sadj[1] = gold_sadj[0];
@@ -753,7 +686,7 @@ FINETUNE_START:
data |= dlli << 21;
}
}
- moutdwm(ast, 0x1E6E0080, data);
+ ast_moutdwm(ast, 0x1E6E0080, data);
data = 0;
for (cnt = 8; cnt < 16; cnt++) {
@@ -778,162 +711,116 @@ FINETUNE_START:
data |= dlli << 21;
}
}
- moutdwm(ast, 0x1E6E0084, data);
-
+ ast_moutdwm(ast, 0x1E6E0084, data);
+ return status;
} /* finetuneDQI_L */
-static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param)
+static void finetuneDQSI(struct ast_private *ast)
{
- u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2;
+ u32 dlli, dqsip, dqidly;
+ u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
+ u32 g_dqidly, g_dqsip, g_margin, g_side;
+ u16 pass[32][2][2];
+ char tag[2][76];
+
+ /* Disable DQI CBR */
+ reg_mcr0c = ast_mindwm(ast, 0x1E6E000C);
+ reg_mcr18 = ast_mindwm(ast, 0x1E6E0018);
+ reg_mcr18 &= 0x0000ffff;
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
- for (cnt = 0; cnt < 16; cnt++) {
- dllmin[cnt] = 0xff;
- dllmax[cnt] = 0x0;
- }
- passcnt = 0;
for (dlli = 0; dlli < 76; dlli++) {
- moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
- /* Wait DQSI latch phase calibration */
- moutdwm(ast, 0x1E6E0074, 0x00000010);
- moutdwm(ast, 0x1E6E0070, 0x00000003);
- do {
- data = mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- moutdwm(ast, 0x1E6E0070, 0x00000000);
-
- moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
- data = cbr_scan2(ast);
- if (data != 0) {
- mask = 0x00010001;
- for (cnt = 0; cnt < 16; cnt++) {
- if (data & mask) {
- if (dllmin[cnt] > dlli) {
- dllmin[cnt] = dlli;
- }
- if (dllmax[cnt] < dlli) {
- dllmax[cnt] = dlli;
- }
- }
- mask <<= 1;
- }
- passcnt++;
- } else if (passcnt >= CBR_THRESHOLD2) {
- break;
- }
+ tag[0][dlli] = 0x0;
+ tag[1][dlli] = 0x0;
}
- gold_sadj[0] = 0x0;
- gold_sadj[1] = 0xFF;
- for (cnt = 0; cnt < 8; cnt++) {
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- if (gold_sadj[0] < dllmin[cnt]) {
- gold_sadj[0] = dllmin[cnt];
- }
- if (gold_sadj[1] > dllmax[cnt]) {
- gold_sadj[1] = dllmax[cnt];
- }
- }
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ pass[dqidly][0][0] = 0xff;
+ pass[dqidly][0][1] = 0x0;
+ pass[dqidly][1][0] = 0xff;
+ pass[dqidly][1][1] = 0x0;
}
- gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
- gold_sadj[1] = mindwm(ast, 0x1E6E0080);
-
- data = 0;
- for (cnt = 0; cnt < 8; cnt++) {
- data >>= 3;
- data2 = gold_sadj[1] & 0x7;
- gold_sadj[1] >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
- if (gold_sadj[0] >= dlli) {
- dlli = (gold_sadj[0] - dlli) >> 1;
- if (dlli > 0) {
- dlli = 1;
- }
- if (data2 != 3) {
- data2 = (data2 + dlli) & 0x7;
- }
- } else {
- dlli = (dlli - gold_sadj[0]) >> 1;
- if (dlli > 0) {
- dlli = 1;
- }
- if (data2 != 4) {
- data2 = (data2 - dlli) & 0x7;
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ passcnt[0] = passcnt[1] = 0;
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ ast_moutdwm(ast, 0x1E6E000C, 0);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23));
+ ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c);
+ for (dlli = 0; dlli < 76; dlli++) {
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0070, 0);
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0);
+ if (cbr_scan3(ast)) {
+ if (dlli == 0)
+ break;
+ passcnt[dqsip]++;
+ tag[dqsip][dlli] = 'P';
+ if (dlli < pass[dqidly][dqsip][0])
+ pass[dqidly][dqsip][0] = (u16) dlli;
+ if (dlli > pass[dqidly][dqsip][1])
+ pass[dqidly][dqsip][1] = (u16) dlli;
+ } else if (passcnt[dqsip] >= 5)
+ break;
+ else {
+ pass[dqidly][dqsip][0] = 0xff;
+ pass[dqidly][dqsip][1] = 0x0;
}
}
}
- data |= data2 << 21;
- }
- moutdwm(ast, 0x1E6E0080, data);
-
- gold_sadj[0] = 0x0;
- gold_sadj[1] = 0xFF;
- for (cnt = 8; cnt < 16; cnt++) {
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- if (gold_sadj[0] < dllmin[cnt]) {
- gold_sadj[0] = dllmin[cnt];
- }
- if (gold_sadj[1] > dllmax[cnt]) {
- gold_sadj[1] = dllmax[cnt];
- }
- }
+ if (passcnt[0] == 0 && passcnt[1] == 0)
+ dqidly++;
}
- gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
- gold_sadj[1] = mindwm(ast, 0x1E6E0084);
-
- data = 0;
- for (cnt = 8; cnt < 16; cnt++) {
- data >>= 3;
- data2 = gold_sadj[1] & 0x7;
- gold_sadj[1] >>= 3;
- if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
- dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
- if (gold_sadj[0] >= dlli) {
- dlli = (gold_sadj[0] - dlli) >> 1;
- if (dlli > 0) {
- dlli = 1;
- }
- if (data2 != 3) {
- data2 = (data2 + dlli) & 0x7;
- }
- } else {
- dlli = (dlli - gold_sadj[0]) >> 1;
- if (dlli > 0) {
- dlli = 1;
- }
- if (data2 != 4) {
- data2 = (data2 - dlli) & 0x7;
- }
+ /* Search margin */
+ g_dqidly = g_dqsip = g_margin = g_side = 0;
+
+ for (dqidly = 0; dqidly < 32; dqidly++) {
+ for (dqsip = 0; dqsip < 2; dqsip++) {
+ if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1])
+ continue;
+ diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0];
+ if ((diff+2) < g_margin)
+ continue;
+ passcnt[0] = passcnt[1] = 0;
+ for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++);
+ for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++);
+ if (passcnt[0] > passcnt[1])
+ passcnt[0] = passcnt[1];
+ passcnt[1] = 0;
+ if (passcnt[0] > g_side)
+ passcnt[1] = passcnt[0] - g_side;
+ if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) {
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
+ } else if (passcnt[1] > 1 && g_side < 8) {
+ if (diff > g_margin)
+ g_margin = diff;
+ g_dqidly = dqidly;
+ g_dqsip = dqsip;
+ g_side = passcnt[0];
}
}
- data |= data2 << 21;
}
- moutdwm(ast, 0x1E6E0084, data);
-
-} /* finetuneDQI_L2 */
+ reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23);
+ ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
-static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+}
+static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
{
- u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt;
+ u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
+ bool status = false;
-
- finetuneDQI_L(ast, param);
- finetuneDQI_L2(ast, param);
+ finetuneDQSI(ast);
+ if (finetuneDQI_L(ast, param) == false)
+ return status;
CBR_START2:
dllmin[0] = dllmin[1] = 0xff;
dllmax[0] = dllmax[1] = 0x0;
passcnt = 0;
for (dlli = 0; dlli < 76; dlli++) {
- moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
- /* Wait DQSI latch phase calibration */
- moutdwm(ast, 0x1E6E0074, 0x00000010);
- moutdwm(ast, 0x1E6E0070, 0x00000003);
- do {
- data = mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- moutdwm(ast, 0x1E6E0070, 0x00000000);
-
- moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
data = cbr_scan(ast);
if (data != 0) {
if (data & 0x1) {
@@ -957,44 +844,31 @@ CBR_START2:
break;
}
}
+ if (retry++ > 10)
+ goto CBR_DONE2;
if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
goto CBR_START2;
}
if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
goto CBR_START2;
}
+ status = true;
+CBR_DONE2:
dlli = (dllmin[1] + dllmax[1]) >> 1;
dlli <<= 8;
dlli += (dllmin[0] + dllmax[0]) >> 1;
- moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16));
-
- data = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
- data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
- moutdwm(ast, 0x1E6E0018, data2);
- moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
-
- /* Wait DQSI latch phase calibration */
- moutdwm(ast, 0x1E6E0074, 0x00000010);
- moutdwm(ast, 0x1E6E0070, 0x00000003);
- do {
- data = mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- moutdwm(ast, 0x1E6E0070, 0x00000000);
- moutdwm(ast, 0x1E6E0070, 0x00000003);
- do {
- data = mindwm(ast, 0x1E6E0070);
- } while (!(data & 0x00001000));
- moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16));
+ return status;
} /* CBRDLL2 */
static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
- moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
/* Ger trap info */
- trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
trap_AC2 = 0x00020000 + (trap << 16);
trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
trap_MRS = 0x00000010 + (trap << 4);
@@ -1008,22 +882,35 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
switch (param->dram_freq) {
case 336:
- moutdwm(ast, 0x1E6E2020, 0x0190);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
param->wodt = 0;
param->reg_AC1 = 0x22202725;
param->reg_AC2 = 0xAA007613 | trap_AC2;
param->reg_DQSIC = 0x000000BA;
param->reg_MRS = 0x04001400 | trap_MRS;
param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000034;
+ param->reg_IOZ = 0x00000023;
param->reg_DQIDLY = 0x00000074;
param->reg_FREQ = 0x00004DC0;
param->madj_max = 96;
param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA00761C | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA007636 | trap_AC2;
+ break;
+ }
break;
default:
case 396:
- moutdwm(ast, 0x1E6E2020, 0x03F1);
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
param->wodt = 1;
param->reg_AC1 = 0x33302825;
param->reg_AC2 = 0xCC009617 | trap_AC2;
@@ -1033,7 +920,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->reg_IOZ = 0x00000034;
param->reg_DRV = 0x000000FA;
param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
+ param->reg_FREQ = 0x00005040;
param->madj_max = 96;
param->dll2_finetune_step = 4;
@@ -1053,14 +940,14 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
break;
case 408:
- moutdwm(ast, 0x1E6E2020, 0x01F0);
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
param->wodt = 1;
param->reg_AC1 = 0x33302825;
param->reg_AC2 = 0xCC009617 | trap_AC2;
param->reg_DQSIC = 0x000000E2;
param->reg_MRS = 0x04001600 | trap_MRS;
param->reg_EMRS = 0x00000000;
- param->reg_IOZ = 0x00000034;
+ param->reg_IOZ = 0x00000023;
param->reg_DRV = 0x000000FA;
param->reg_DQIDLY = 0x00000089;
param->reg_FREQ = 0x000050C0;
@@ -1083,7 +970,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
break;
case 456:
- moutdwm(ast, 0x1E6E2020, 0x0230);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
param->wodt = 0;
param->reg_AC1 = 0x33302926;
param->reg_AC2 = 0xCD44961A;
@@ -1097,7 +984,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 4;
break;
case 504:
- moutdwm(ast, 0x1E6E2020, 0x0270);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0270);
param->wodt = 1;
param->reg_AC1 = 0x33302926;
param->reg_AC2 = 0xDE44A61D;
@@ -1111,7 +998,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 4;
break;
case 528:
- moutdwm(ast, 0x1E6E2020, 0x0290);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0290);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302926;
@@ -1127,7 +1014,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 3;
break;
case 576:
- moutdwm(ast, 0x1E6E2020, 0x0140);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
param->reg_MADJ = 0x00136868;
param->reg_SADJ = 0x00004534;
param->wodt = 1;
@@ -1145,7 +1032,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 3;
break;
case 600:
- moutdwm(ast, 0x1E6E2020, 0x02E1);
+ ast_moutdwm(ast, 0x1E6E2020, 0x02E1);
param->reg_MADJ = 0x00136868;
param->reg_SADJ = 0x00004534;
param->wodt = 1;
@@ -1163,7 +1050,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 3;
break;
case 624:
- moutdwm(ast, 0x1E6E2020, 0x0160);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0160);
param->reg_MADJ = 0x00136868;
param->reg_SADJ = 0x00004534;
param->wodt = 1;
@@ -1196,7 +1083,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
case AST_DRAM_4Gx16:
param->dram_config = 0x133;
break;
- }; /* switch size */
+ } /* switch size */
switch (param->vram_size) {
default:
@@ -1218,106 +1105,98 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
{
- u32 data, data2;
+ u32 data, data2, retry = 0;
- moutdwm(ast, 0x1E6E0000, 0xFC600309);
- moutdwm(ast, 0x1E6E0018, 0x00000100);
- moutdwm(ast, 0x1E6E0024, 0x00000000);
- moutdwm(ast, 0x1E6E0034, 0x00000000);
+ddr3_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000000);
udelay(10);
- moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
udelay(10);
- moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
udelay(10);
- moutdwm(ast, 0x1E6E0004, param->dram_config);
- moutdwm(ast, 0x1E6E0008, 0x90040f);
- moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- moutdwm(ast, 0x1E6E0080, 0x00000000);
- moutdwm(ast, 0x1E6E0084, 0x00000000);
- moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- moutdwm(ast, 0x1E6E0018, 0x4040A170);
- moutdwm(ast, 0x1E6E0018, 0x20402370);
- moutdwm(ast, 0x1E6E0038, 0x00000000);
- moutdwm(ast, 0x1E6E0040, 0xFF444444);
- moutdwm(ast, 0x1E6E0044, 0x22222222);
- moutdwm(ast, 0x1E6E0048, 0x22222222);
- moutdwm(ast, 0x1E6E004C, 0x00000002);
- moutdwm(ast, 0x1E6E0050, 0x80000000);
- moutdwm(ast, 0x1E6E0050, 0x00000000);
- moutdwm(ast, 0x1E6E0054, 0);
- moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- moutdwm(ast, 0x1E6E0070, 0x00000000);
- moutdwm(ast, 0x1E6E0074, 0x00000000);
- moutdwm(ast, 0x1E6E0078, 0x00000000);
- moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A170);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002370);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ ast_moutdwm(ast, 0x1E6E0044, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E0048, 0x22222222);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000002);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
/* Wait MCLK2X lock to MCLK */
do {
- data = mindwm(ast, 0x1E6E001C);
+ data = ast_mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
- moutdwm(ast, 0x1E6E0034, 0x00000001);
- moutdwm(ast, 0x1E6E000C, 0x00005C04);
- udelay(10);
- moutdwm(ast, 0x1E6E000C, 0x00000000);
- moutdwm(ast, 0x1E6E0034, 0x00000000);
- data = mindwm(ast, 0x1E6E001C);
+ data = ast_mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
if ((data2 & 0xff) > param->madj_max) {
break;
}
- moutdwm(ast, 0x1E6E0064, data2);
+ ast_moutdwm(ast, 0x1E6E0064, data2);
if (data2 & 0x00100000) {
data2 = ((data2 & 0xff) >> 3) + 3;
} else {
data2 = ((data2 & 0xff) >> 2) + 5;
}
- data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
data2 += data & 0xff;
data = data | (data2 << 8);
- moutdwm(ast, 0x1E6E0068, data);
+ ast_moutdwm(ast, 0x1E6E0068, data);
udelay(10);
- moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
udelay(10);
- data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- moutdwm(ast, 0x1E6E0018, data);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
data = data | 0x200;
- moutdwm(ast, 0x1E6E0018, data);
+ ast_moutdwm(ast, 0x1E6E0018, data);
do {
- data = mindwm(ast, 0x1E6E001C);
+ data = ast_mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
- moutdwm(ast, 0x1E6E0034, 0x00000001);
- moutdwm(ast, 0x1E6E000C, 0x00005C04);
- udelay(10);
- moutdwm(ast, 0x1E6E000C, 0x00000000);
- moutdwm(ast, 0x1E6E0034, 0x00000000);
- data = mindwm(ast, 0x1E6E001C);
+ data = ast_mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
}
- data = mindwm(ast, 0x1E6E0018) | 0xC00;
- moutdwm(ast, 0x1E6E0018, data);
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
- moutdwm(ast, 0x1E6E0034, 0x00000001);
- moutdwm(ast, 0x1E6E000C, 0x00000040);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000040);
udelay(50);
/* Mode Register Setting */
- moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- moutdwm(ast, 0x1E6E0028, 0x00000005);
- moutdwm(ast, 0x1E6E0028, 0x00000007);
- moutdwm(ast, 0x1E6E0028, 0x00000003);
- moutdwm(ast, 0x1E6E0028, 0x00000001);
- moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- moutdwm(ast, 0x1E6E000C, 0x00005C08);
- moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C01);
data = 0;
if (param->wodt) {
data = 0x300;
@@ -1325,30 +1204,23 @@ static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
if (param->rodt) {
data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
}
- moutdwm(ast, 0x1E6E0034, data | 0x3);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
- /* Wait DQI delay lock */
- do {
- data = mindwm(ast, 0x1E6E0080);
- } while (!(data & 0x40000000));
- /* Wait DQSI delay lock */
- do {
- data = mindwm(ast, 0x1E6E0020);
- } while (!(data & 0x00000800));
/* Calibrate the DQSI delay */
- cbr_dll2(ast, param);
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr3_init_start;
- moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
/* ECC Memory Initialization */
#ifdef ECC
- moutdwm(ast, 0x1E6E007C, 0x00000000);
- moutdwm(ast, 0x1E6E0070, 0x221);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
do {
- data = mindwm(ast, 0x1E6E0070);
+ data = ast_mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
- moutdwm(ast, 0x1E6E0070, 0x00000000);
- moutdwm(ast, 0x1E6E0050, 0x80000000);
- moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
#endif
@@ -1358,10 +1230,10 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
{
u32 trap, trap_AC2, trap_MRS;
- moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+ ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
/* Ger trap info */
- trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
trap_AC2 = (trap << 20) | (trap << 16);
trap_AC2 += 0x00110000;
trap_MRS = 0x00000040 | (trap << 4);
@@ -1375,7 +1247,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
switch (param->dram_freq) {
case 264:
- moutdwm(ast, 0x1E6E2020, 0x0130);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0130);
param->wodt = 0;
param->reg_AC1 = 0x11101513;
param->reg_AC2 = 0x78117011;
@@ -1390,7 +1262,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 3;
break;
case 336:
- moutdwm(ast, 0x1E6E2020, 0x0190);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0190);
param->wodt = 1;
param->reg_AC1 = 0x22202613;
param->reg_AC2 = 0xAA009016 | trap_AC2;
@@ -1403,10 +1275,25 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->reg_FREQ = 0x00004DC0;
param->madj_max = 96;
param->dll2_finetune_step = 3;
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xAA009012 | trap_AC2;
+ break;
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xAA009023 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xAA00903B | trap_AC2;
+ break;
+ }
break;
default:
case 396:
- moutdwm(ast, 0x1E6E2020, 0x03F1);
+ ast_moutdwm(ast, 0x1E6E2020, 0x03F1);
param->wodt = 1;
param->rodt = 0;
param->reg_AC1 = 0x33302714;
@@ -1417,7 +1304,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->reg_DRV = 0x000000FA;
param->reg_IOZ = 0x00000034;
param->reg_DQIDLY = 0x00000089;
- param->reg_FREQ = 0x000050C0;
+ param->reg_FREQ = 0x00005040;
param->madj_max = 96;
param->dll2_finetune_step = 4;
@@ -1440,7 +1327,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
break;
case 408:
- moutdwm(ast, 0x1E6E2020, 0x01F0);
+ ast_moutdwm(ast, 0x1E6E2020, 0x01F0);
param->wodt = 1;
param->rodt = 0;
param->reg_AC1 = 0x33302714;
@@ -1473,7 +1360,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
break;
case 456:
- moutdwm(ast, 0x1E6E2020, 0x0230);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0230);
param->wodt = 0;
param->reg_AC1 = 0x33302815;
param->reg_AC2 = 0xCD44B01E;
@@ -1488,7 +1375,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 3;
break;
case 504:
- moutdwm(ast, 0x1E6E2020, 0x0261);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0261);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302815;
@@ -1504,7 +1391,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 3;
break;
case 528:
- moutdwm(ast, 0x1E6E2020, 0x0120);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0120);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x33302815;
@@ -1520,7 +1407,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 3;
break;
case 552:
- moutdwm(ast, 0x1E6E2020, 0x02A1);
+ ast_moutdwm(ast, 0x1E6E2020, 0x02A1);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x43402915;
@@ -1536,7 +1423,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
param->dll2_finetune_step = 3;
break;
case 576:
- moutdwm(ast, 0x1E6E2020, 0x0140);
+ ast_moutdwm(ast, 0x1E6E2020, 0x0140);
param->wodt = 1;
param->rodt = 1;
param->reg_AC1 = 0x43402915;
@@ -1567,7 +1454,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
case AST_DRAM_4Gx16:
param->dram_config = 0x123;
break;
- }; /* switch size */
+ } /* switch size */
switch (param->vram_size) {
default:
@@ -1588,110 +1475,102 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
{
- u32 data, data2;
-
- moutdwm(ast, 0x1E6E0000, 0xFC600309);
- moutdwm(ast, 0x1E6E0018, 0x00000100);
- moutdwm(ast, 0x1E6E0024, 0x00000000);
- moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
- moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ u32 data, data2, retry = 0;
+
+ddr2_init_start:
+ ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00000100);
+ ast_moutdwm(ast, 0x1E6E0024, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
udelay(10);
- moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
udelay(10);
- moutdwm(ast, 0x1E6E0004, param->dram_config);
- moutdwm(ast, 0x1E6E0008, 0x90040f);
- moutdwm(ast, 0x1E6E0010, param->reg_AC1);
- moutdwm(ast, 0x1E6E0014, param->reg_AC2);
- moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
- moutdwm(ast, 0x1E6E0080, 0x00000000);
- moutdwm(ast, 0x1E6E0084, 0x00000000);
- moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
- moutdwm(ast, 0x1E6E0018, 0x4040A130);
- moutdwm(ast, 0x1E6E0018, 0x20402330);
- moutdwm(ast, 0x1E6E0038, 0x00000000);
- moutdwm(ast, 0x1E6E0040, 0xFF808000);
- moutdwm(ast, 0x1E6E0044, 0x88848466);
- moutdwm(ast, 0x1E6E0048, 0x44440008);
- moutdwm(ast, 0x1E6E004C, 0x00000000);
- moutdwm(ast, 0x1E6E0050, 0x80000000);
- moutdwm(ast, 0x1E6E0050, 0x00000000);
- moutdwm(ast, 0x1E6E0054, 0);
- moutdwm(ast, 0x1E6E0060, param->reg_DRV);
- moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
- moutdwm(ast, 0x1E6E0070, 0x00000000);
- moutdwm(ast, 0x1E6E0074, 0x00000000);
- moutdwm(ast, 0x1E6E0078, 0x00000000);
- moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0004, param->dram_config);
+ ast_moutdwm(ast, 0x1E6E0008, 0x90040f);
+ ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ ast_moutdwm(ast, 0x1E6E0080, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0084, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ ast_moutdwm(ast, 0x1E6E0018, 0x4000A130);
+ ast_moutdwm(ast, 0x1E6E0018, 0x00002330);
+ ast_moutdwm(ast, 0x1E6E0038, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ ast_moutdwm(ast, 0x1E6E0044, 0x88848466);
+ ast_moutdwm(ast, 0x1E6E0048, 0x44440008);
+ ast_moutdwm(ast, 0x1E6E004C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0054, 0);
+ ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0074, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0078, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
/* Wait MCLK2X lock to MCLK */
do {
- data = mindwm(ast, 0x1E6E001C);
+ data = ast_mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
- moutdwm(ast, 0x1E6E0034, 0x00000001);
- moutdwm(ast, 0x1E6E000C, 0x00005C04);
- udelay(10);
- moutdwm(ast, 0x1E6E000C, 0x00000000);
- moutdwm(ast, 0x1E6E0034, 0x00000000);
- data = mindwm(ast, 0x1E6E001C);
+ data = ast_mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
- data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
if ((data2 & 0xff) > param->madj_max) {
break;
}
- moutdwm(ast, 0x1E6E0064, data2);
+ ast_moutdwm(ast, 0x1E6E0064, data2);
if (data2 & 0x00100000) {
data2 = ((data2 & 0xff) >> 3) + 3;
} else {
data2 = ((data2 & 0xff) >> 2) + 5;
}
- data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff;
data2 += data & 0xff;
data = data | (data2 << 8);
- moutdwm(ast, 0x1E6E0068, data);
+ ast_moutdwm(ast, 0x1E6E0068, data);
udelay(10);
- moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000);
udelay(10);
- data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
- moutdwm(ast, 0x1E6E0018, data);
+ data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ ast_moutdwm(ast, 0x1E6E0018, data);
data = data | 0x200;
- moutdwm(ast, 0x1E6E0018, data);
+ ast_moutdwm(ast, 0x1E6E0018, data);
do {
- data = mindwm(ast, 0x1E6E001C);
+ data = ast_mindwm(ast, 0x1E6E001C);
} while (!(data & 0x08000000));
- moutdwm(ast, 0x1E6E0034, 0x00000001);
- moutdwm(ast, 0x1E6E000C, 0x00005C04);
- udelay(10);
- moutdwm(ast, 0x1E6E000C, 0x00000000);
- moutdwm(ast, 0x1E6E0034, 0x00000000);
- data = mindwm(ast, 0x1E6E001C);
+ data = ast_mindwm(ast, 0x1E6E001C);
data = (data >> 8) & 0xff;
}
- data = mindwm(ast, 0x1E6E0018) | 0xC00;
- moutdwm(ast, 0x1E6E0018, data);
+ ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff);
+ data = ast_mindwm(ast, 0x1E6E0018) | 0xC00;
+ ast_moutdwm(ast, 0x1E6E0018, data);
- moutdwm(ast, 0x1E6E0034, 0x00000001);
- moutdwm(ast, 0x1E6E000C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0034, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E000C, 0x00000000);
udelay(50);
/* Mode Register Setting */
- moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
- moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- moutdwm(ast, 0x1E6E0028, 0x00000005);
- moutdwm(ast, 0x1E6E0028, 0x00000007);
- moutdwm(ast, 0x1E6E0028, 0x00000003);
- moutdwm(ast, 0x1E6E0028, 0x00000001);
-
- moutdwm(ast, 0x1E6E000C, 0x00005C08);
- moutdwm(ast, 0x1E6E002C, param->reg_MRS);
- moutdwm(ast, 0x1E6E0028, 0x00000001);
- moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
- moutdwm(ast, 0x1E6E0028, 0x00000003);
- moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
- moutdwm(ast, 0x1E6E0028, 0x00000003);
-
- moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000005);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000007);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000001);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+ ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ ast_moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
data = 0;
if (param->wodt) {
data = 0x500;
@@ -1699,30 +1578,23 @@ static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
if (param->rodt) {
data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
}
- moutdwm(ast, 0x1E6E0034, data | 0x3);
- moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ ast_moutdwm(ast, 0x1E6E0034, data | 0x3);
+ ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
- /* Wait DQI delay lock */
- do {
- data = mindwm(ast, 0x1E6E0080);
- } while (!(data & 0x40000000));
- /* Wait DQSI delay lock */
- do {
- data = mindwm(ast, 0x1E6E0020);
- } while (!(data & 0x00000800));
/* Calibrate the DQSI delay */
- cbr_dll2(ast, param);
+ if ((cbr_dll2(ast, param) == false) && (retry++ < 10))
+ goto ddr2_init_start;
/* ECC Memory Initialization */
#ifdef ECC
- moutdwm(ast, 0x1E6E007C, 0x00000000);
- moutdwm(ast, 0x1E6E0070, 0x221);
+ ast_moutdwm(ast, 0x1E6E007C, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x221);
do {
- data = mindwm(ast, 0x1E6E0070);
+ data = ast_mindwm(ast, 0x1E6E0070);
} while (!(data & 0x00001000));
- moutdwm(ast, 0x1E6E0070, 0x00000000);
- moutdwm(ast, 0x1E6E0050, 0x80000000);
- moutdwm(ast, 0x1E6E0050, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0070, 0x00000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x80000000);
+ ast_moutdwm(ast, 0x1E6E0050, 0x00000000);
#endif
}
@@ -1768,8 +1640,8 @@ static void ast_init_dram_2300(struct drm_device *dev)
ddr2_init(ast, &param);
}
- temp = mindwm(ast, 0x1e6e2040);
- moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ temp = ast_mindwm(ast, 0x1e6e2040);
+ ast_moutdwm(ast, 0x1e6e2040, temp | 0x40);
}
/* wait ready */
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 95fa6aba26bc..4c761dcea972 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -42,7 +42,7 @@
#define HBorder 0x00000020
#define VBorder 0x00000010
#define WideScreenMode 0x00000100
-
+#define NewModeInfo 0x00000200
/* DCLK Index */
#define VCLK25_175 0x00
@@ -67,6 +67,11 @@
#define VCLK106_5 0x12
#define VCLK146_25 0x13
#define VCLK148_5 0x14
+#define VCLK71 0x15
+#define VCLK88_75 0x16
+#define VCLK119 0x17
+#define VCLK85_5 0x18
+#define VCLK97_75 0x19
static struct ast_vbios_dclk_info dclk_table[] = {
{0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
@@ -90,6 +95,10 @@ static struct ast_vbios_dclk_info dclk_table[] = {
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+ {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
+ {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
+ {0x77, 0x58, 0x80}, /* 17: VCLK119 */
+ {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
};
static struct ast_vbios_stdtable vbios_stdtable[] = {
@@ -225,41 +234,63 @@ static struct ast_vbios_enhtable res_1600x1200[] = {
(SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
};
-static struct ast_vbios_enhtable res_1920x1200[] = {
- {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
- (SyncNP | Charx8Dot), 60, 1, 0x34 },
- {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
- (SyncNP | Charx8Dot), 0xFF, 1, 0x34 },
+/* 16:9 */
+static struct ast_vbios_enhtable res_1360x768[] = {
+ {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
+ {1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* end */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x39 },
+};
+
+static struct ast_vbios_enhtable res_1600x900[] = {
+ {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A },
+ {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* end */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x3A }
};
+static struct ast_vbios_enhtable res_1920x1080[] = {
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x38 },
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x38 },
+};
+
+
/* 16:10 */
static struct ast_vbios_enhtable res_1280x800[] = {
+ {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 35 },
{1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 },
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 },
{1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 },
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x35 },
};
static struct ast_vbios_enhtable res_1440x900[] = {
+ {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
{1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 },
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
{1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 },
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x36 },
};
static struct ast_vbios_enhtable res_1680x1050[] = {
+ {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 },
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 },
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x37 },
};
-/* HDTV */
-static struct ast_vbios_enhtable res_1920x1080[] = {
- {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 },
- {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
+static struct ast_vbios_enhtable res_1920x1200[] = {
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 },
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 },
};
+
#endif
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index f488be55d650..b9a695d92792 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -434,17 +434,13 @@ static void bochs_bo_unref(struct bochs_bo **bo)
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
-
+ *bo = NULL;
}
void bochs_gem_free_object(struct drm_gem_object *obj)
{
struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj);
- if (!bochs_bo)
- return;
bochs_bo_unref(&bochs_bo);
}
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index b171901a3553..98fd17ae4916 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -225,12 +225,6 @@ out:
return num_modes;
}
-static int ptn3460_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
{
struct ptn3460_bridge *ptn_bridge;
@@ -242,7 +236,6 @@ struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
.get_modes = ptn3460_get_modes,
- .mode_valid = ptn3460_mode_valid,
.best_encoder = ptn3460_best_encoder,
};
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 4b0170cf53fd..99c1983f99d2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -264,17 +264,13 @@ static void cirrus_bo_unref(struct cirrus_bo **bo)
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
-
+ *bo = NULL;
}
void cirrus_gem_free_object(struct drm_gem_object *obj)
{
struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
- if (!cirrus_bo)
- return;
cirrus_bo_unref(&cirrus_bo);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index f59433b7610c..49332c5fe35b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -505,13 +505,6 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
return count;
}
-static int cirrus_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* Any mode we've added is valid */
- return MODE_OK;
-}
-
static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
*connector)
{
@@ -546,7 +539,6 @@ static void cirrus_connector_destroy(struct drm_connector *connector)
struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
.get_modes = cirrus_vga_get_modes,
- .mode_valid = cirrus_vga_mode_valid,
.best_encoder = cirrus_connector_best_encoder,
};
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index edec31fe3fed..68175b54504b 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -363,7 +363,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
list->master = dev->primary->master;
*maplist = list;
return 0;
- }
+}
int drm_addmap(struct drm_device * dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
@@ -656,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@@ -805,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@@ -1015,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@@ -1175,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
* \param arg pointer to a drm_buf_info structure.
* \return zero on success or a negative number on failure.
*
- * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * Increments drm_device::buf_use while holding the drm_device::buf_lock
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
@@ -1196,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,
if (!dma)
return -EINVAL;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count)
@@ -1381,13 +1381,13 @@ int drm_mapbufs(struct drm_device *dev, void *data,
if (!dma)
return -EINVAL;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
dev->buf_use++; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
if (request->count >= dma->buf_count) {
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 534cb89b160d..a6b690626a6b 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -131,14 +131,14 @@ drm_clflush_sg(struct sg_table *st)
EXPORT_SYMBOL(drm_clflush_sg);
void
-drm_clflush_virt_range(char *addr, unsigned long length)
+drm_clflush_virt_range(void *addr, unsigned long length)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
- char *end = addr + length;
+ void *end = addr + length;
mb();
for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
- clflush(addr);
+ clflushopt(addr);
clflushopt(end - 1);
mb();
return;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d8b7099abece..fe94cc10cd35 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_lock.h>
#include "drm_crtc_internal.h"
@@ -50,12 +51,42 @@
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
- struct drm_crtc *crtc;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_modeset_acquire_ctx *ctx;
+ int ret;
- mutex_lock(&dev->mode_config.mutex);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (WARN_ON(!ctx))
+ return;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+ mutex_lock(&config->mutex);
+
+ drm_modeset_acquire_init(ctx, 0);
+
+retry:
+ ret = drm_modeset_lock(&config->connection_mutex, ctx);
+ if (ret)
+ goto fail;
+ ret = drm_modeset_lock_all_crtcs(dev, ctx);
+ if (ret)
+ goto fail;
+
+ WARN_ON(config->acquire_ctx);
+
+ /* now we hold the locks, so now that it is safe, stash the
+ * ctx for drm_modeset_unlock_all():
+ */
+ config->acquire_ctx = ctx;
+
+ drm_warn_on_modeset_not_all_locked(dev);
+
+ return;
+
+fail:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(ctx);
+ goto retry;
+ }
}
EXPORT_SYMBOL(drm_modeset_lock_all);
@@ -67,10 +98,17 @@ EXPORT_SYMBOL(drm_modeset_lock_all);
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
- struct drm_crtc *crtc;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- mutex_unlock(&crtc->mutex);
+ if (WARN_ON(!ctx))
+ return;
+
+ config->acquire_ctx = NULL;
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+
+ kfree(ctx);
mutex_unlock(&dev->mode_config.mutex);
}
@@ -91,8 +129,9 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- WARN_ON(!mutex_is_locked(&crtc->mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
@@ -227,6 +266,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
{ DRM_MODE_ENCODER_DSI, "DSI" },
+ { DRM_MODE_ENCODER_DPMST, "DP MST" },
};
static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
@@ -256,46 +296,6 @@ void drm_connector_ida_destroy(void)
}
/**
- * drm_get_encoder_name - return a string for encoder
- * @encoder: encoder to compute name of
- *
- * Note that the buffer used by this function is globally shared and owned by
- * the function itself.
- *
- * FIXME: This isn't really multithreading safe.
- */
-const char *drm_get_encoder_name(const struct drm_encoder *encoder)
-{
- static char buf[32];
-
- snprintf(buf, 32, "%s-%d",
- drm_encoder_enum_list[encoder->encoder_type].name,
- encoder->base.id);
- return buf;
-}
-EXPORT_SYMBOL(drm_get_encoder_name);
-
-/**
- * drm_get_connector_name - return a string for connector
- * @connector: connector to compute name of
- *
- * Note that the buffer used by this function is globally shared and owned by
- * the function itself.
- *
- * FIXME: This isn't really multithreading safe.
- */
-const char *drm_get_connector_name(const struct drm_connector *connector)
-{
- static char buf[32];
-
- snprintf(buf, 32, "%s-%d",
- drm_connector_enum_list[connector->connector_type].name,
- connector->connector_type_id);
- return buf;
-}
-EXPORT_SYMBOL(drm_get_connector_name);
-
-/**
* drm_get_connector_status_name - return a string for connector status
* @status: connector status to compute name of
*
@@ -409,6 +409,21 @@ void drm_mode_object_put(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
+static struct drm_mode_object *_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj = NULL;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) ||
+ (obj->id != id))
+ obj = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return obj;
+}
+
/**
* drm_mode_object_find - look up a drm object with static lifetime
* @dev: drm device
@@ -416,7 +431,9 @@ void drm_mode_object_put(struct drm_device *dev,
* @type: type of the mode object
*
* Note that framebuffers cannot be looked up with this functions - since those
- * are reference counted, they need special treatment.
+ * are reference counted, they need special treatment. Even with
+ * DRM_MODE_OBJECT_ANY (although that will simply return NULL
+ * rather than WARN_ON()).
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
@@ -426,13 +443,10 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
/* Framebuffers are reference counted and need their own lookup
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
-
- mutex_lock(&dev->mode_config.idr_mutex);
- obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (obj->type != type) || (obj->id != id))
+ obj = _object_find(dev, id, type);
+ /* don't leak out unref'd fb's */
+ if (obj && (obj->type == DRM_MODE_OBJECT_FB))
obj = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
-
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
@@ -538,7 +552,7 @@ EXPORT_SYMBOL(drm_framebuffer_lookup);
*/
void drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
- DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free);
}
EXPORT_SYMBOL(drm_framebuffer_unreference);
@@ -551,7 +565,7 @@ EXPORT_SYMBOL(drm_framebuffer_unreference);
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
- DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_get(&fb->refcount);
}
EXPORT_SYMBOL(drm_framebuffer_reference);
@@ -563,7 +577,7 @@ static void drm_framebuffer_free_bug(struct kref *kref)
static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
- DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
@@ -691,6 +705,8 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
}
EXPORT_SYMBOL(drm_framebuffer_remove);
+DEFINE_WW_CLASS(crtc_ww_class);
+
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@@ -710,6 +726,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
void *cursor,
const struct drm_crtc_funcs *funcs)
{
+ struct drm_mode_config *config = &dev->mode_config;
int ret;
crtc->dev = dev;
@@ -717,8 +734,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->invert_dimensions = false;
drm_modeset_lock_all(dev);
- mutex_init(&crtc->mutex);
- mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+ drm_modeset_lock_init(&crtc->mutex);
+ /* dropped by _unlock_all(): */
+ drm_modeset_lock(&crtc->mutex, config->acquire_ctx);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
@@ -726,8 +744,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->base.properties = &crtc->properties;
- list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
- dev->mode_config.num_crtc++;
+ list_add_tail(&crtc->head, &config->crtc_list);
+ config->num_crtc++;
crtc->primary = primary;
if (primary)
@@ -755,6 +773,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
+ drm_modeset_lock_fini(&crtc->mutex);
+
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
dev->mode_config.num_crtc--;
@@ -824,7 +844,7 @@ int drm_connector_init(struct drm_device *dev,
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
if (ret)
- goto out;
+ goto out_unlock;
connector->base.properties = &connector->properties;
connector->dev = dev;
@@ -834,9 +854,17 @@ int drm_connector_init(struct drm_device *dev,
ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
if (connector->connector_type_id < 0) {
ret = connector->connector_type_id;
- drm_mode_object_put(dev, &connector->base);
- goto out;
+ goto out_put;
+ }
+ connector->name =
+ kasprintf(GFP_KERNEL, "%s-%d",
+ drm_connector_enum_list[connector_type].name,
+ connector->connector_type_id);
+ if (!connector->name) {
+ ret = -ENOMEM;
+ goto out_put;
}
+
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
@@ -853,7 +881,11 @@ int drm_connector_init(struct drm_device *dev,
drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
- out:
+out_put:
+ if (ret)
+ drm_mode_object_put(dev, &connector->base);
+
+out_unlock:
drm_modeset_unlock_all(dev);
return ret;
@@ -881,6 +913,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->connector_type_id);
drm_mode_object_put(dev, &connector->base);
+ kfree(connector->name);
+ connector->name = NULL;
list_del(&connector->head);
dev->mode_config.num_connector--;
}
@@ -982,16 +1016,27 @@ int drm_encoder_init(struct drm_device *dev,
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
- goto out;
+ goto out_unlock;
encoder->dev = dev;
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
+ encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
+ drm_encoder_enum_list[encoder_type].name,
+ encoder->base.id);
+ if (!encoder->name) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
dev->mode_config.num_encoder++;
- out:
+out_put:
+ if (ret)
+ drm_mode_object_put(dev, &encoder->base);
+
+out_unlock:
drm_modeset_unlock_all(dev);
return ret;
@@ -1009,6 +1054,8 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
+ kfree(encoder->name);
+ encoder->name = NULL;
list_del(&encoder->head);
dev->mode_config.num_encoder--;
drm_modeset_unlock_all(dev);
@@ -1145,16 +1192,19 @@ EXPORT_SYMBOL(drm_plane_cleanup);
*/
void drm_plane_force_disable(struct drm_plane *plane)
{
+ struct drm_framebuffer *old_fb = plane->fb;
int ret;
- if (!plane->fb)
+ if (!old_fb)
return;
ret = plane->funcs->disable_plane(plane);
- if (ret)
+ if (ret) {
DRM_ERROR("failed to disable plane with busy fb\n");
+ return;
+ }
/* disconnect the plane from the fb and crtc: */
- __drm_framebuffer_unreference(plane->fb);
+ __drm_framebuffer_unreference(old_fb);
plane->fb = NULL;
plane->crtc = NULL;
}
@@ -1378,6 +1428,12 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
return 0;
}
+void drm_mode_group_destroy(struct drm_mode_group *group)
+{
+ kfree(group->id_list);
+ group->id_list = NULL;
+}
+
/*
* NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
* the drm core's responsibility to set up mode control groups.
@@ -1614,7 +1670,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
&dev->mode_config.encoder_list,
head) {
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
- drm_get_encoder_name(encoder));
+ encoder->name);
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
@@ -1646,7 +1702,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
head) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
- drm_get_connector_name(connector));
+ connector->name);
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
@@ -1695,7 +1751,6 @@ int drm_mode_getcrtc(struct drm_device *dev,
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
- struct drm_mode_object *obj;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -1703,13 +1758,11 @@ int drm_mode_getcrtc(struct drm_device *dev,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
+ if (!crtc) {
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
@@ -1763,7 +1816,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_connector *out_resp = data;
- struct drm_mode_object *obj;
struct drm_connector *connector;
struct drm_display_mode *mode;
int mode_count = 0;
@@ -1787,13 +1839,11 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->connector_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
+ connector = drm_connector_find(dev, out_resp->connector_id);
+ if (!connector) {
ret = -ENOENT;
goto out;
}
- connector = obj_to_connector(obj);
props_count = connector->properties.count;
@@ -1821,10 +1871,12 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
if (connector->encoder)
out_resp->encoder_id = connector->encoder->base.id;
else
out_resp->encoder_id = 0;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
/*
* This ioctl is called twice, once to determine how much space is
@@ -1908,7 +1960,6 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_encoder *enc_resp = data;
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
int ret = 0;
@@ -1916,13 +1967,11 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, enc_resp->encoder_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj) {
+ encoder = drm_encoder_find(dev, enc_resp->encoder_id);
+ if (!encoder) {
ret = -ENOENT;
goto out;
}
- encoder = obj_to_encoder(obj);
if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
@@ -2020,7 +2069,6 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane *plane_resp = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
uint32_t __user *format_ptr;
int ret = 0;
@@ -2029,13 +2077,11 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, plane_resp->plane_id,
- DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, plane_resp->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out;
}
- plane = obj_to_plane(obj);
if (plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
@@ -2088,7 +2134,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
@@ -2103,35 +2148,42 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
- obj = drm_mode_object_find(dev, plane_req->plane_id,
- DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, plane_req->plane_id);
+ if (!plane) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
return -ENOENT;
}
- plane = obj_to_plane(obj);
/* No fb means shut it down */
if (!plane_req->fb_id) {
drm_modeset_lock_all(dev);
old_fb = plane->fb;
- plane->funcs->disable_plane(plane);
- plane->crtc = NULL;
- plane->fb = NULL;
+ ret = plane->funcs->disable_plane(plane);
+ if (!ret) {
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ } else {
+ old_fb = NULL;
+ }
drm_modeset_unlock_all(dev);
goto out;
}
- obj = drm_mode_object_find(dev, plane_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, plane_req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
+
+ /* Check whether this plane is usable on this CRTC */
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+ DRM_DEBUG_KMS("Invalid crtc for plane\n");
+ ret = -EINVAL;
+ goto out;
+ }
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
if (!fb) {
@@ -2187,16 +2239,18 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
drm_modeset_lock_all(dev);
+ old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (!ret) {
- old_fb = plane->fb;
plane->crtc = crtc;
plane->fb = fb;
fb = NULL;
+ } else {
+ old_fb = NULL;
}
drm_modeset_unlock_all(dev);
@@ -2239,9 +2293,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
ret = crtc->funcs->set_config(set);
if (ret == 0) {
crtc->primary->crtc = crtc;
-
- /* crtc->fb must be updated by ->set_config, enforces this. */
- WARN_ON(fb != crtc->primary->fb);
+ crtc->primary->fb = fb;
}
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
@@ -2318,7 +2370,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
@@ -2336,14 +2387,12 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
return -ERANGE;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, crtc_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, crtc_req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
@@ -2426,18 +2475,16 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
- obj = drm_mode_object_find(dev, out_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
+ connector = drm_connector_find(dev, out_id);
+ if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
ret = -ENOENT;
goto out;
}
- connector = obj_to_connector(obj);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
- drm_get_connector_name(connector));
+ connector->name);
connector_set[i] = connector;
}
@@ -2466,7 +2513,6 @@ static int drm_mode_cursor_common(struct drm_device *dev,
struct drm_mode_cursor2 *req,
struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
@@ -2476,14 +2522,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
- obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
- mutex_lock(&crtc->mutex);
+ drm_modeset_lock(&crtc->mutex, NULL);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
@@ -2507,7 +2552,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
}
}
out:
- mutex_unlock(&crtc->mutex);
+ drm_modeset_unlock(&crtc->mutex);
return ret;
@@ -3097,6 +3142,8 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
if (!property)
return NULL;
+ property->dev = dev;
+
if (num_values) {
property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
if (!property->values)
@@ -3117,6 +3164,9 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
}
list_add_tail(&property->head, &dev->mode_config.property_list);
+
+ WARN_ON(!drm_property_type_valid(property));
+
return property;
fail:
kfree(property->values);
@@ -3217,6 +3267,22 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_property_create_bitmask);
+static struct drm_property *property_create_range(struct drm_device *dev,
+ int flags, const char *name,
+ uint64_t min, uint64_t max)
+{
+ struct drm_property *property;
+
+ property = drm_property_create(dev, flags, name, 2);
+ if (!property)
+ return NULL;
+
+ property->values[0] = min;
+ property->values[1] = max;
+
+ return property;
+}
+
/**
* drm_property_create - create a new ranged property type
* @dev: drm device
@@ -3239,20 +3305,36 @@ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags
const char *name,
uint64_t min, uint64_t max)
{
+ return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
+ name, min, max);
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+ int flags, const char *name,
+ int64_t min, int64_t max)
+{
+ return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
+ name, I642U64(min), I642U64(max));
+}
+EXPORT_SYMBOL(drm_property_create_signed_range);
+
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+ int flags, const char *name, uint32_t type)
+{
struct drm_property *property;
- flags |= DRM_MODE_PROP_RANGE;
+ flags |= DRM_MODE_PROP_OBJECT;
- property = drm_property_create(dev, flags, name, 2);
+ property = drm_property_create(dev, flags, name, 1);
if (!property)
return NULL;
- property->values[0] = min;
- property->values[1] = max;
+ property->values[0] = type;
return property;
}
-EXPORT_SYMBOL(drm_property_create_range);
+EXPORT_SYMBOL(drm_property_create_object);
/**
* drm_property_add_enum - add a possible value to an enumeration property
@@ -3274,14 +3356,16 @@ int drm_property_add_enum(struct drm_property *property, int index,
{
struct drm_property_enum *prop_enum;
- if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+ if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
return -EINVAL;
/*
* Bitmask enum properties have the additional constraint of values
* from 0 to 63
*/
- if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
+ if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
+ (value > 63))
return -EINVAL;
if (!list_empty(&property->enum_blob_list)) {
@@ -3438,7 +3522,6 @@ EXPORT_SYMBOL(drm_object_property_get_value);
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
@@ -3457,17 +3540,17 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
+ property = drm_property_find(dev, out_resp->prop_id);
+ if (!property) {
ret = -ENOENT;
goto done;
}
- property = obj_to_property(obj);
- if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
- } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
list_for_each_entry(prop_blob, &property->enum_blob_list, head)
blob_count++;
}
@@ -3489,7 +3572,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
out_resp->count_values = value_count;
- if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3511,7 +3595,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->count_enum_blobs = enum_count;
}
- if (property->flags & DRM_MODE_PROP_BLOB) {
+ if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3590,7 +3674,6 @@ static void drm_property_destroy_blob(struct drm_device *dev,
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_object *obj;
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
@@ -3600,12 +3683,11 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
- if (!obj) {
+ blob = drm_property_blob_find(dev, out_resp->blob_id);
+ if (!blob) {
ret = -ENOENT;
goto done;
}
- blob = obj_to_blob(obj);
if (out_resp->length == blob->length) {
blob_ptr = (void __user *)(unsigned long)out_resp->data;
@@ -3667,19 +3749,40 @@ static bool drm_property_change_is_valid(struct drm_property *property,
{
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
- if (property->flags & DRM_MODE_PROP_RANGE) {
+
+ if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
if (value < property->values[0] || value > property->values[1])
return false;
return true;
- } else if (property->flags & DRM_MODE_PROP_BITMASK) {
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
+ int64_t svalue = U642I64(value);
+ if (svalue < U642I64(property->values[0]) ||
+ svalue > U642I64(property->values[1]))
+ return false;
+ return true;
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
int i;
uint64_t valid_mask = 0;
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
- } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
/* Only the driver knows */
return true;
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+ struct drm_mode_object *obj;
+ /* a zero value for an object property translates to null: */
+ if (value == 0)
+ return true;
+ /*
+ * NOTE: use _object_find() directly to bypass restriction on
+ * looking up refcnt'd objects (ie. fb's). For a refcnt'd
+ * object this could race against object finalization, so it
+ * simply tells us that the object *was* valid. Which is good
+ * enough.
+ */
+ obj = _object_find(property->dev, value, property->values[0]);
+ return obj != NULL;
} else {
int i;
for (i = 0; i < property->num_values; i++)
@@ -3987,7 +4090,6 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
@@ -3997,12 +4099,11 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ if (!crtc) {
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
if (crtc->funcs->gamma_set == NULL) {
ret = -ENOSYS;
@@ -4061,7 +4162,6 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
@@ -4071,12 +4171,11 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
return -EINVAL;
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ if (!crtc) {
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
@@ -4129,7 +4228,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_page_flip *page_flip = data;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
@@ -4143,12 +4241,11 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
return -EINVAL;
- obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj)
+ crtc = drm_crtc_find(dev, page_flip->crtc_id);
+ if (!crtc)
return -ENOENT;
- crtc = obj_to_crtc(obj);
- mutex_lock(&crtc->mutex);
+ drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->primary->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
@@ -4232,7 +4329,7 @@ out:
drm_framebuffer_unreference(fb);
if (old_fb)
drm_framebuffer_unreference(old_fb);
- mutex_unlock(&crtc->mutex);
+ drm_modeset_unlock(&crtc->mutex);
return ret;
}
@@ -4597,6 +4694,7 @@ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
+ drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
mutex_init(&dev->mode_config.fb_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
@@ -4696,5 +4794,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
}
idr_destroy(&dev->mode_config.crtc_idr);
+ drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 872ba11c4533..78b37f3febd3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -93,8 +93,10 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
- if (!oops_in_progress)
+ if (!oops_in_progress) {
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ }
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
@@ -153,20 +155,14 @@ drm_encoder_disable(struct drm_encoder *encoder)
static void __drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
- struct drm_connector *connector;
struct drm_crtc *crtc;
drm_warn_on_modeset_not_all_locked(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder)
- continue;
- }
-
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (!drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
- /* disconnector encoder from any connector */
+ /* disconnect encoder from any connector */
encoder->crtc = NULL;
}
}
@@ -349,7 +345,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
continue;
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
- encoder->base.id, drm_get_encoder_name(encoder),
+ encoder->base.id, encoder->name,
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
@@ -400,8 +396,7 @@ done:
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
-
-static int
+static void
drm_crtc_helper_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -430,7 +425,6 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
}
__drm_helper_disable_unused_functions(dev);
- return 0;
}
/**
@@ -481,7 +475,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
- return drm_crtc_helper_disable(set->crtc);
+ drm_crtc_helper_disable(set->crtc);
+ return 0;
}
dev = set->crtc->dev;
@@ -620,11 +615,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
}
if (new_crtc) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
- connector->base.id, drm_get_connector_name(connector),
+ connector->base.id, connector->name,
new_crtc->base.id);
} else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
}
}
@@ -650,7 +645,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->name);
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 4b6e6f3ba0a1..08e33b8b13a4 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -206,13 +206,17 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
* i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
* @adapter: i2c adapter to register
*
- * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * This registers an i2c adapter that uses dp aux channel as it's underlaying
* transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
* and store it in the algo_data member of the @adapter argument. This will be
* used by the i2c over dp aux algorithm to drive the hardware.
*
* RETURNS:
* 0 on success, -ERRNO on failure.
+ *
+ * IMPORTANT:
+ * This interface is deprecated, please switch to the new dp aux helpers and
+ * drm_dp_aux_register().
*/
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
@@ -378,7 +382,10 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
* transactions.
*/
for (retry = 0; retry < 7; retry++) {
+
+ mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
+ mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
@@ -592,7 +599,9 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
* before giving up the AUX transaction.
*/
for (retry = 0; retry < 7; retry++) {
+ mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, msg);
+ mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
@@ -725,13 +734,15 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
};
/**
- * drm_dp_aux_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
+ * drm_dp_aux_register() - initialise and register aux channel
* @aux: DisplayPort AUX channel
*
* Returns 0 on success or a negative error code on failure.
*/
-int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux)
+int drm_dp_aux_register(struct drm_dp_aux *aux)
{
+ mutex_init(&aux->hw_mutex);
+
aux->ddc.algo = &drm_dp_i2c_algo;
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
@@ -746,14 +757,14 @@ int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux)
return i2c_add_adapter(&aux->ddc);
}
-EXPORT_SYMBOL(drm_dp_aux_register_i2c_bus);
+EXPORT_SYMBOL(drm_dp_aux_register);
/**
- * drm_dp_aux_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
+ * drm_dp_aux_unregister() - unregister an AUX adapter
* @aux: DisplayPort AUX channel
*/
-void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux)
+void drm_dp_aux_unregister(struct drm_dp_aux *aux)
{
i2c_del_adapter(&aux->ddc);
}
-EXPORT_SYMBOL(drm_dp_aux_unregister_i2c_bus);
+EXPORT_SYMBOL(drm_dp_aux_unregister);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 03711d00aaae..8218078b6133 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -419,8 +419,9 @@ long drm_ioctl(struct file *filp,
retcode = -EFAULT;
goto err_i1;
}
- } else
+ } else if (cmd & IOC_OUT) {
memset(kdata, 0, usize);
+ }
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d4e3f9d9370f..dfa9769b26b5 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -70,6 +70,8 @@
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
/* Force 8bpc */
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
+/* Force 12bpc */
+#define EDID_QUIRK_FORCE_12BPC (1 << 9)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -125,6 +127,9 @@ static struct edid_quirk {
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
+ { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
+
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
@@ -984,9 +989,13 @@ static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
- /*
- * Sanity check the header of the base EDID block. Return 8 if the header
- * is perfect, down to 0 if it's totally wrong.
+/**
+ * drm_edid_header_is_valid - sanity check the header of the base EDID block
+ * @raw_edid: pointer to raw base EDID block
+ *
+ * Sanity check the header of the base EDID block.
+ *
+ * Return: 8 if the header is perfect, down to 0 if it's totally wrong.
*/
int drm_edid_header_is_valid(const u8 *raw_edid)
{
@@ -1005,9 +1014,16 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
-/*
- * Sanity check the EDID block (base or extension). Return 0 if the block
- * doesn't check out, or 1 if it's valid.
+/**
+ * drm_edid_block_valid - Sanity check the EDID block (base or extension)
+ * @raw_edid: pointer to raw EDID block
+ * @block: type of block to validate (0 for base, extension otherwise)
+ * @print_bad_edid: if true, dump bad EDID blocks to the console
+ *
+ * Validate a base or extension EDID block and optionally dump bad blocks to
+ * the console.
+ *
+ * Return: True if the block is valid, false otherwise.
*/
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
@@ -1077,6 +1093,8 @@ EXPORT_SYMBOL(drm_edid_block_valid);
* @edid: EDID data
*
* Sanity-check an entire EDID record (including extensions)
+ *
+ * Return: True if the EDID data is valid, false otherwise.
*/
bool drm_edid_is_valid(struct edid *edid)
{
@@ -1096,18 +1114,15 @@ EXPORT_SYMBOL(drm_edid_is_valid);
#define DDC_SEGMENT_ADDR 0x30
/**
- * Get EDID information via I2C.
- *
- * @adapter : i2c device adaptor
+ * drm_do_probe_ddc_edid() - get EDID information via I2C
+ * @adapter: I2C device adaptor
* @buf: EDID data buffer to be filled
* @block: 128 byte EDID block to start fetching from
* @len: EDID data buffer length to fetch
*
- * Returns:
- *
- * 0 on success or -1 on failure.
+ * Try to fetch EDID information by calling I2C driver functions.
*
- * Try to fetch EDID information by calling i2c driver function.
+ * Return: 0 on success or -1 on failure.
*/
static int
drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
@@ -1118,7 +1133,8 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
unsigned char xfers = segment ? 3 : 2;
int ret, retries = 5;
- /* The core i2c driver will automatically retry the transfer if the
+ /*
+ * The core I2C driver will automatically retry the transfer if the
* adapter reports EAGAIN. However, we find that bit-banging transfers
* are susceptible to errors under a heavily loaded machine and
* generate spurious NAKs and timeouts. Retrying the transfer
@@ -1144,10 +1160,10 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
}
};
- /*
- * Avoid sending the segment addr to not upset non-compliant ddc
- * monitors.
- */
+ /*
+ * Avoid sending the segment addr to not upset non-compliant
+ * DDC monitors.
+ */
ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
if (ret == -ENXIO) {
@@ -1216,7 +1232,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
- drm_get_connector_name(connector), j);
+ connector->name, j);
connector->bad_edid_counter++;
}
@@ -1236,7 +1252,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
carp:
if (print_bad_edid) {
dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- drm_get_connector_name(connector), j);
+ connector->name, j);
}
connector->bad_edid_counter++;
@@ -1246,12 +1262,10 @@ out:
}
/**
- * Probe DDC presence.
- * @adapter: i2c adapter to probe
+ * drm_probe_ddc() - probe DDC presence
+ * @adapter: I2C adapter to probe
*
- * Returns:
- *
- * 1 on success
+ * Return: True on success, false on failure.
*/
bool
drm_probe_ddc(struct i2c_adapter *adapter)
@@ -1265,12 +1279,12 @@ EXPORT_SYMBOL(drm_probe_ddc);
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
+ * @adapter: I2C adapter to use for DDC
*
- * Poke the given i2c channel to grab EDID data if possible. If found,
+ * Poke the given I2C channel to grab EDID data if possible. If found,
* attach it to the connector.
*
- * Return edid data or NULL if we couldn't find any.
+ * Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
@@ -1288,7 +1302,7 @@ EXPORT_SYMBOL(drm_get_edid);
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
*
- * Return duplicate edid or NULL on allocation failure.
+ * Return: Pointer to duplicated EDID or NULL on allocation failure.
*/
struct edid *drm_edid_duplicate(const struct edid *edid)
{
@@ -1411,7 +1425,8 @@ mode_is_rb(const struct drm_display_mode *mode)
* @rb: Mode reduced-blanking-ness
*
* Walk the DMT mode list looking for a match for the given parameters.
- * Return a newly allocated copy of the mode, or NULL if not found.
+ *
+ * Return: A newly allocated copy of the mode, or NULL if not found.
*/
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
@@ -1595,14 +1610,13 @@ bad_std_timing(u8 a, u8 b)
* @connector: connector of for the EDID block
* @edid: EDID block to scan
* @t: standard timing params
- * @revision: standard timing level
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
*/
static struct drm_display_mode *
drm_mode_std(struct drm_connector *connector, struct edid *edid,
- struct std_timing *t, int revision)
+ struct std_timing *t)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *m, *mode = NULL;
@@ -1623,7 +1637,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
vrefresh_rate = vfreq + 60;
/* the vdisplay is calculated based on the aspect ratio */
if (aspect_ratio == 0) {
- if (revision < 3)
+ if (edid->revision < 3)
vsize = hsize;
else
vsize = (hsize * 10) / 16;
@@ -2140,7 +2154,7 @@ do_established_modes(struct detailed_timing *timing, void *c)
/**
* add_established_modes - get est. modes from EDID and add them
- * @connector: connector of for the EDID block
+ * @connector: connector to add mode(s) to
* @edid: EDID block to scan
*
* Each EDID block contains a bitmap of the supported "established modes" list
@@ -2191,8 +2205,7 @@ do_standard_modes(struct detailed_timing *timing, void *c)
struct drm_display_mode *newmode;
std = &data->data.timings[i];
- newmode = drm_mode_std(connector, edid, std,
- edid->revision);
+ newmode = drm_mode_std(connector, edid, std);
if (newmode) {
drm_mode_probed_add(connector, newmode);
closure->modes++;
@@ -2203,7 +2216,7 @@ do_standard_modes(struct detailed_timing *timing, void *c)
/**
* add_standard_modes - get std. modes from EDID and add them
- * @connector: connector of for the EDID block
+ * @connector: connector to add mode(s) to
* @edid: EDID block to scan
*
* Standard modes can be calculated using the appropriate standard (DMT,
@@ -2221,8 +2234,7 @@ add_standard_modes(struct drm_connector *connector, struct edid *edid)
struct drm_display_mode *newmode;
newmode = drm_mode_std(connector, edid,
- &edid->standard_timings[i],
- edid->revision);
+ &edid->standard_timings[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -2425,7 +2437,7 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
* drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode
*
- * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
+ * Return: The CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
* mode.
*/
u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
@@ -2452,6 +2464,22 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
}
EXPORT_SYMBOL(drm_match_cea_mode);
+/**
+ * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
+ * the input VIC from the CEA mode list
+ * @video_code: ID given to each of the CEA modes
+ *
+ * Returns picture aspect ratio
+ */
+enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
+{
+ /* return picture aspect ratio for video_code - 1 to access the
+ * right array element
+ */
+ return edid_cea_modes[video_code-1].picture_aspect_ratio;
+}
+EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
+
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
* specific block).
@@ -3023,11 +3051,9 @@ monitor_name(struct detailed_timing *t, void *data)
* @connector: connector corresponding to the HDMI/DP sink
* @edid: EDID to parse
*
- * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
- * Some ELD fields are left to the graphics driver caller:
- * - Conn_Type
- * - HDCP
- * - Port_ID
+ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
+ * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
+ * fill in.
*/
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
@@ -3111,9 +3137,10 @@ EXPORT_SYMBOL(drm_edid_to_eld);
* @sads: pointer that will be set to the extracted SADs
*
* Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
- * Note: returned pointer needs to be kfreed
*
- * Return number of found SADs or negative number on error.
+ * Note: The returned pointer needs to be freed using kfree().
+ *
+ * Return: The number of found SADs or negative number on error.
*/
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
{
@@ -3170,9 +3197,11 @@ EXPORT_SYMBOL(drm_edid_to_sad);
* @sadb: pointer to the speaker block
*
* Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
- * Note: returned pointer needs to be kfreed
*
- * Return number of found Speaker Allocation Blocks or negative number on error.
+ * Note: The returned pointer needs to be freed using kfree().
+ *
+ * Return: The number of found Speaker Allocation Blocks or negative number on
+ * error.
*/
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
{
@@ -3204,10 +3233,9 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
/* Speaker Allocation Data Block */
if (dbl == 3) {
- *sadb = kmalloc(dbl, GFP_KERNEL);
+ *sadb = kmemdup(&db[1], dbl, GFP_KERNEL);
if (!*sadb)
return -ENOMEM;
- memcpy(*sadb, &db[1], dbl);
count = dbl;
break;
}
@@ -3219,9 +3247,12 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
/**
- * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
+ * drm_av_sync_delay - compute the HDMI/DP sink audio-video sync delay
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
+ *
+ * Return: The HDMI/DP sink's audio-video sync delay in milliseconds or 0 if
+ * the sink doesn't support audio or video.
*/
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -3263,6 +3294,9 @@ EXPORT_SYMBOL(drm_av_sync_delay);
*
* It's possible for one encoder to be associated with multiple HDMI/DP sinks.
* The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
+ *
+ * Return: The connector associated with the first HDMI/DP sink that has ELD
+ * attached to it.
*/
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode)
@@ -3270,6 +3304,8 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
return connector;
@@ -3279,11 +3315,12 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
EXPORT_SYMBOL(drm_select_eld);
/**
- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
* Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI, false if not or unknown.
+ *
+ * Return: True if the monitor is HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
{
@@ -3321,6 +3358,7 @@ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
* audio format, assume at least 'basic audio' support, even if 'basic
* audio' is not defined in EDID.
*
+ * Return: True if the monitor supports audio, false otherwise.
*/
bool drm_detect_monitor_audio(struct edid *edid)
{
@@ -3364,6 +3402,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
* which quantization range (full or limited) is used.
+ *
+ * Return: True if the RGB quantization range is selectable, false otherwise.
*/
bool drm_rgb_quant_range_selectable(struct edid *edid)
{
@@ -3390,16 +3430,119 @@ bool drm_rgb_quant_range_selectable(struct edid *edid)
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/**
+ * drm_assign_hdmi_deep_color_info - detect whether monitor supports
+ * hdmi deep color modes and update drm_display_info if so.
+ *
+ * @edid: monitor EDID information
+ * @info: Updated with maximum supported deep color bpc and color format
+ * if deep color supported.
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI deep color supported, false if not or unknown.
+ */
+static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
+ struct drm_display_info *info,
+ struct drm_connector *connector)
+{
+ u8 *edid_ext, *hdmi;
+ int i;
+ int start_offset, end_offset;
+ unsigned int dc_bpc = 0;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ return false;
+
+ /*
+ * Because HDMI identifier is in Vendor Specific Block,
+ * search it from all data blocks of CEA extension.
+ */
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_is_hdmi_vsdb(&edid_ext[i])) {
+ /* HDMI supports at least 8 bpc */
+ info->bpc = 8;
+
+ hdmi = &edid_ext[i];
+ if (cea_db_payload_len(hdmi) < 6)
+ return false;
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
+ dc_bpc = 10;
+ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
+ DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
+ connector->name);
+ }
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
+ dc_bpc = 12;
+ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
+ DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
+ connector->name);
+ }
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
+ dc_bpc = 16;
+ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
+ DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
+ connector->name);
+ }
+
+ if (dc_bpc > 0) {
+ DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
+ connector->name, dc_bpc);
+ info->bpc = dc_bpc;
+
+ /*
+ * Deep color support mandates RGB444 support for all video
+ * modes and forbids YCRCB422 support for all video modes per
+ * HDMI 1.3 spec.
+ */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+
+ /* YCRCB444 is optional according to spec. */
+ if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
+ connector->name);
+ }
+
+ /*
+ * Spec says that if any deep color mode is supported at all,
+ * then deep color 36 bit must be supported.
+ */
+ if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
+ DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
+ connector->name);
+ }
+
+ return true;
+ }
+ else {
+ DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
+ connector->name);
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
+ * @connector: connector whose edid is used to build display info
*
* Grab any available display info and stuff it into the drm_display_info
* structure that's part of the connector. Useful for tracking bpp and
* color spaces.
*/
static void drm_add_display_info(struct edid *edid,
- struct drm_display_info *info)
+ struct drm_display_info *info,
+ struct drm_connector *connector)
{
u8 *edid_ext;
@@ -3429,6 +3572,9 @@ static void drm_add_display_info(struct edid *edid,
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
+ /* HDMI deep color modes supported? Assign to info, if so */
+ drm_assign_hdmi_deep_color_info(edid, info, connector);
+
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
@@ -3458,6 +3604,9 @@ static void drm_add_display_info(struct edid *edid,
break;
}
+ DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
+ connector->name, info->bpc);
+
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
@@ -3468,11 +3617,11 @@ static void drm_add_display_info(struct edid *edid,
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
- * @edid: edid data
+ * @edid: EDID data
*
* Add the specified modes to the connector's mode list.
*
- * Return number of modes added or 0 if we couldn't find any.
+ * Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
@@ -3484,7 +3633,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
}
if (!drm_edid_is_valid(edid)) {
dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
+ connector->name);
return 0;
}
@@ -3516,11 +3665,14 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
- drm_add_display_info(edid, &connector->display_info);
+ drm_add_display_info(edid, &connector->display_info, connector);
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
+ if (quirks & EDID_QUIRK_FORCE_12BPC)
+ connector->display_info.bpc = 12;
+
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
@@ -3534,7 +3686,7 @@ EXPORT_SYMBOL(drm_add_edid_modes);
* Add the specified modes to the connector's mode list. Only when the
* hdisplay/vdisplay is not beyond the given limit, it will be added.
*
- * Return number of modes added or 0 if we couldn't find any.
+ * Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
@@ -3573,13 +3725,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+/**
+ * drm_set_preferred_mode - Sets the preferred mode of a connector
+ * @connector: connector whose mode list should be processed
+ * @hpref: horizontal resolution of preferred mode
+ * @vpref: vertical resolution of preferred mode
+ *
+ * Marks a mode as preferred if it matches the resolution specified by @hpref
+ * and @vpref.
+ */
void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
- if (mode->hdisplay == hpref &&
+ if (mode->hdisplay == hpref &&
mode->vdisplay == vpref)
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
@@ -3592,7 +3753,7 @@ EXPORT_SYMBOL(drm_set_preferred_mode);
* @frame: HDMI AVI infoframe
* @mode: DRM display mode
*
- * Returns 0 on success or a negative error code on failure.
+ * Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
@@ -3613,6 +3774,12 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->video_code = drm_match_cea_mode(mode);
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+
+ /* Populate picture aspect ratio from CEA mode list */
+ if (frame->video_code > 0)
+ frame->picture_aspect = drm_get_cea_aspect_ratio(
+ frame->video_code);
+
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
@@ -3657,7 +3824,7 @@ s3d_structure_from_display_mode(const struct drm_display_mode *mode)
* 4k or stereoscopic 3D mode. So when giving any other mode as input this
* function will return -EINVAL, error that can be safely ignored.
*
- * Returns 0 on success or a negative error code on failure.
+ * Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 1b4c7a5442c5..0a235fe61c9b 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -31,8 +31,9 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
-#define GENERIC_EDIDS 5
+#define GENERIC_EDIDS 6
static const char *generic_edid_name[GENERIC_EDIDS] = {
+ "edid/800x600.bin",
"edid/1024x768.bin",
"edid/1280x1024.bin",
"edid/1600x1200.bin",
@@ -44,6 +45,24 @@ static const u8 generic_edid[GENERIC_EDIDS][128] = {
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x1b, 0x14, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x01, 0x00, 0x00, 0x45, 0x40,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xa0, 0x0f,
+ 0x20, 0x00, 0x31, 0x58, 0x1c, 0x20, 0x28, 0x80,
+ 0x14, 0x00, 0x15, 0xd0, 0x10, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x24, 0x26, 0x05, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
+ 0x56, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xc2,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
@@ -242,7 +261,7 @@ out:
int drm_load_edid_firmware(struct drm_connector *connector)
{
- const char *connector_name = drm_get_connector_name(connector);
+ const char *connector_name = connector->name;
char *edidname = edid_firmware, *last, *colon;
int ret;
struct edid *edid;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 61b5a47ad239..f27c883be391 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -429,13 +429,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
*/
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
{
- if (fbdev_cma) {
- struct drm_device *dev = fbdev_cma->fb_helper.dev;
-
- drm_modeset_lock_all(dev);
- drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
- drm_modeset_unlock_all(dev);
- }
+ if (fbdev_cma)
+ drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 04d3fd3658f3..d5d8cea1a679 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -45,13 +45,13 @@ static LIST_HEAD(kernel_fb_helper_list);
* DOC: fbdev helpers
*
* The fb helper functions are useful to provide an fbdev on top of a drm kernel
- * mode setting driver. They can be used mostly independantely from the crtc
+ * mode setting driver. They can be used mostly independently from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
* Initialization is done as a three-step process with drm_fb_helper_init(),
* drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
- * Drivers with fancier requirements than the default beheviour can override the
+ * Drivers with fancier requirements than the default behaviour can override the
* second step with their own code. Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
@@ -59,7 +59,7 @@ static LIST_HEAD(kernel_fb_helper_list);
* should also notify the fb helper code from updates to the output
* configuration by calling drm_fb_helper_hotplug_event(). For easier
* integration with the output polling code in drm_crtc_helper.c the modeset
- * code proves a ->output_poll_changed callback.
+ * code provides a ->output_poll_changed callback.
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
@@ -120,7 +120,7 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
mode = &fb_helper_conn->cmdline_mode;
/* do something on return - turn off connector maybe */
- if (fb_get_options(drm_get_connector_name(connector), &option))
+ if (fb_get_options(connector->name, &option))
continue;
if (drm_mode_parse_command_line_for_connector(option,
@@ -142,12 +142,12 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
}
DRM_INFO("forcing %s connector %s\n",
- drm_get_connector_name(connector), s);
+ connector->name, s);
connector->force = mode->force;
}
DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
- drm_get_connector_name(connector),
+ connector->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
@@ -273,15 +273,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
-/**
- * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
- * @fb_helper: fbcon to restore
- *
- * This should be called from driver's drm ->lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- */
-bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
@@ -311,7 +303,40 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
return error;
}
-EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
+/**
+ * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
+ * @fb_helper: fbcon to restore
+ *
+ * This should be called from driver's drm ->lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * Use this variant if you need to bypass locking (panic), or already
+ * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
+ */
+static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+ return restore_fbdev_mode(fb_helper);
+}
+
+/**
+ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
+ * @fb_helper: fbcon to restore
+ *
+ * This should be called from driver's drm ->lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ */
+bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ bool ret;
+ drm_modeset_lock_all(dev);
+ ret = restore_fbdev_mode(fb_helper);
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
/*
* restore fbcon display for all kms driver's using this helper, used for sysrq
@@ -326,12 +351,25 @@ static bool drm_fb_helper_force_kernel_mode(void)
return false;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
- if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ struct drm_device *dev = helper->dev;
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+
+ /* NOTE: we use lockless flag below to avoid grabbing other
+ * modeset locks. So just trylock the underlying mutex
+ * directly:
+ */
+ if (!mutex_trylock(&dev->mode_config.mutex)) {
+ error = true;
continue;
+ }
ret = drm_fb_helper_restore_fbdev_mode(helper);
if (ret)
error = true;
+
+ mutex_unlock(&dev->mode_config.mutex);
}
return error;
}
@@ -811,7 +849,6 @@ EXPORT_SYMBOL(drm_fb_helper_check_var);
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
if (var->pixclock != 0) {
@@ -819,9 +856,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- drm_modeset_lock_all(dev);
- drm_fb_helper_restore_fbdev_mode(fb_helper);
- drm_modeset_unlock_all(dev);
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e1eba0b7cd45..021fe5d11df5 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -43,8 +43,7 @@
DEFINE_MUTEX(drm_global_mutex);
EXPORT_SYMBOL(drm_global_mutex);
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_minor *minor);
+static int drm_open_helper(struct file *filp, struct drm_minor *minor);
static int drm_setup(struct drm_device * dev)
{
@@ -95,7 +94,7 @@ int drm_open(struct inode *inode, struct file *filp)
/* share address_space across all char-devs of a single device */
filp->f_mapping = dev->anon_inode->i_mapping;
- retcode = drm_open_helper(inode, filp, minor);
+ retcode = drm_open_helper(filp, minor);
if (retcode)
goto err_undo;
if (need_setup) {
@@ -171,7 +170,6 @@ static int drm_cpu_valid(void)
/**
* Called whenever a process opens /dev/drm.
*
- * \param inode device inode.
* \param filp file pointer.
* \param minor acquired minor-object.
* \return zero on success or a negative number on failure.
@@ -179,8 +177,7 @@ static int drm_cpu_valid(void)
* Creates and initializes a drm_file structure for the file private data in \p
* filp and add it into the double linked list in \p dev.
*/
-static int drm_open_helper(struct inode *inode, struct file *filp,
- struct drm_minor *minor)
+static int drm_open_helper(struct file *filp, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct drm_file *priv;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 9909bef59800..f7d71190aad5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -474,21 +474,10 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
goto fail;
pages[i] = p;
- /* There is a hypothetical issue w/ drivers that require
- * buffer memory in the low 4GB.. if the pages are un-
- * pinned, and swapped out, they can end up swapped back
- * in above 4GB. If pages are already in memory, then
- * shmem_read_mapping_page_gfp will ignore the gfpmask,
- * even if the already in-memory page disobeys the mask.
- *
- * It is only a theoretical issue today, because none of
- * the devices with this limitation can be populated with
- * enough memory to trigger the issue. But this BUG_ON()
- * is here as a reminder in case the problem with
- * shmem_read_mapping_page_gfp() isn't solved by the time
- * it does become a real issue.
- *
- * See this thread: http://lkml.org/lkml/2011/7/11/238
+ /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
+ * correct region during swapin. Note that this requires
+ * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
+ * so shmem can relocate pages during swapin if required.
*/
BUG_ON((gfpmask & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 7473035dd28b..86feedd5e6f6 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -47,18 +47,16 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
- const char *bus_name;
if (!master)
return 0;
- bus_name = dev->driver->bus->get_name(dev);
if (master->unique) {
seq_printf(m, "%s %s %s\n",
- bus_name,
+ dev->driver->name,
dev_name(dev->dev), master->unique);
} else {
seq_printf(m, "%s %s\n",
- bus_name, dev_name(dev->dev));
+ dev->driver->name, dev_name(dev->dev));
}
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 93a42040bedb..69c61f392e66 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -72,9 +72,6 @@ static void
drm_unset_busid(struct drm_device *dev,
struct drm_master *master)
{
- kfree(dev->devname);
- dev->devname = NULL;
-
kfree(master->unique);
master->unique = NULL;
master->unique_len = 0;
@@ -93,7 +90,8 @@ drm_unset_busid(struct drm_device *dev,
* Copies the bus id from userspace into drm_device::unique, and verifies that
* it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
* in interface version 1.1 and will return EBUSY when setversion has requested
- * version 1.1 or greater.
+ * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
+ * UMS was only ever supported on pci devices.
*/
int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -108,10 +106,13 @@ int drm_setunique(struct drm_device *dev, void *data,
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
- if (!dev->driver->bus->set_unique)
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
+ if (WARN_ON(!dev->pdev))
return -EINVAL;
- ret = dev->driver->bus->set_unique(dev, master, u);
+ ret = drm_pci_set_unique(dev, master, u);
if (ret)
goto err;
@@ -130,13 +131,25 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
if (master->unique != NULL)
drm_unset_busid(dev, master);
- ret = dev->driver->bus->set_busid(dev, master);
- if (ret)
- goto err;
+ if (dev->driver->bus && dev->driver->bus->set_busid) {
+ ret = dev->driver->bus->set_busid(dev, master);
+ if (ret) {
+ drm_unset_busid(dev, master);
+ return ret;
+ }
+ } else {
+ if (WARN(dev->unique == NULL,
+ "No drm_bus.set_busid() implementation provided by "
+ "%ps. Use drm_dev_set_unique() to set the unique "
+ "name explicitly.", dev->driver))
+ return -EINVAL;
+
+ master->unique = kstrdup(dev->unique, GFP_KERNEL);
+ if (master->unique)
+ master->unique_len = strlen(dev->unique);
+ }
+
return 0;
-err:
- drm_unset_busid(dev, master);
- return ret;
}
/**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index ec5c3f4cdd01..0de123afdb34 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1,6 +1,5 @@
-/**
- * \file drm_irq.c
- * IRQ support
+/*
+ * drm_irq.c IRQ and vblank support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
@@ -56,33 +55,6 @@
*/
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
-/**
- * Get interrupt from bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_irq_busid structure.
- * \return zero on success or a negative number on failure.
- *
- * Finds the PCI device with the specified bus id and gets its IRQ number.
- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
- * to that of the device that this DRM instance attached to.
- */
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_irq_busid *p = data;
-
- if (!dev->driver->bus->irq_by_busid)
- return -EINVAL;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
- return dev->driver->bus->irq_by_busid(dev, p);
-}
-
/*
* Clear vblank timestamp buffer for a crtc.
*/
@@ -167,33 +139,40 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
static void vblank_disable_fn(unsigned long arg)
{
- struct drm_device *dev = (struct drm_device *)arg;
+ struct drm_vblank_crtc *vblank = (void *)arg;
+ struct drm_device *dev = vblank->dev;
unsigned long irqflags;
- int i;
+ int crtc = vblank->crtc;
if (!dev->vblank_disable_allowed)
return;
- for (i = 0; i < dev->num_crtcs; i++) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- if (atomic_read(&dev->vblank[i].refcount) == 0 &&
- dev->vblank[i].enabled) {
- DRM_DEBUG("disabling vblank on crtc %d\n", i);
- vblank_disable_and_save(dev, i);
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
+ DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
+ vblank_disable_and_save(dev, crtc);
}
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
+/**
+ * drm_vblank_cleanup - cleanup vblank support
+ * @dev: DRM device
+ *
+ * This function cleans up any resources allocated in drm_vblank_init.
+ */
void drm_vblank_cleanup(struct drm_device *dev)
{
+ int crtc;
+
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
- del_timer_sync(&dev->vblank_disable_timer);
-
- vblank_disable_fn((unsigned long)dev);
+ for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+ del_timer_sync(&dev->vblank[crtc].disable_timer);
+ vblank_disable_fn((unsigned long)&dev->vblank[crtc]);
+ }
kfree(dev->vblank);
@@ -201,12 +180,20 @@ void drm_vblank_cleanup(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_vblank_cleanup);
+/**
+ * drm_vblank_init - initialize vblank support
+ * @dev: drm_device
+ * @num_crtcs: number of crtcs supported by @dev
+ *
+ * This function initializes vblank support for @num_crtcs display pipelines.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = -ENOMEM;
- setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
- (unsigned long)dev);
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
@@ -216,8 +203,13 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
if (!dev->vblank)
goto err;
- for (i = 0; i < num_crtcs; i++)
+ for (i = 0; i < num_crtcs; i++) {
+ dev->vblank[i].dev = dev;
+ dev->vblank[i].crtc = i;
init_waitqueue_head(&dev->vblank[i].queue);
+ setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
+ (unsigned long)&dev->vblank[i]);
+ }
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -261,42 +253,42 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
}
/**
- * Install IRQ handler.
- *
- * \param dev DRM device.
+ * drm_irq_install - install IRQ handler
+ * @dev: DRM device
+ * @irq: IRQ number to install the handler for
*
* Initializes the IRQ related data. Installs the handler, calling the driver
- * \c irq_preinstall() and \c irq_postinstall() functions
- * before and after the installation.
+ * irq_preinstall() and irq_postinstall() functions before and after the
+ * installation.
+ *
+ * This is the simplified helper interface provided for drivers with no special
+ * needs. Drivers which need to install interrupt handlers for multiple
+ * interrupts must instead set drm_device->irq_enabled to signal the DRM core
+ * that vblank interrupts are available.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
*/
-int drm_irq_install(struct drm_device *dev)
+int drm_irq_install(struct drm_device *dev, int irq)
{
int ret;
unsigned long sh_flags = 0;
- char *irqname;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if (drm_dev_to_irq(dev) == 0)
+ if (irq == 0)
return -EINVAL;
- mutex_lock(&dev->struct_mutex);
-
/* Driver must have been initialized */
- if (!dev->dev_private) {
- mutex_unlock(&dev->struct_mutex);
+ if (!dev->dev_private)
return -EINVAL;
- }
- if (dev->irq_enabled) {
- mutex_unlock(&dev->struct_mutex);
+ if (dev->irq_enabled)
return -EBUSY;
- }
dev->irq_enabled = true;
- mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
+ DRM_DEBUG("irq=%d\n", irq);
/* Before installing handler */
if (dev->driver->irq_preinstall)
@@ -306,18 +298,11 @@ int drm_irq_install(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
- if (dev->devname)
- irqname = dev->devname;
- else
- irqname = dev->driver->name;
-
- ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
- sh_flags, irqname, dev);
+ ret = request_irq(irq, dev->driver->irq_handler,
+ sh_flags, dev->driver->name, dev);
if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
dev->irq_enabled = false;
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -329,12 +314,12 @@ int drm_irq_install(struct drm_device *dev)
ret = dev->driver->irq_postinstall(dev);
if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
dev->irq_enabled = false;
- mutex_unlock(&dev->struct_mutex);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
- free_irq(drm_dev_to_irq(dev), dev);
+ free_irq(irq, dev);
+ } else {
+ dev->irq = irq;
}
return ret;
@@ -342,11 +327,20 @@ int drm_irq_install(struct drm_device *dev)
EXPORT_SYMBOL(drm_irq_install);
/**
- * Uninstall the IRQ handler.
+ * drm_irq_uninstall - uninstall the IRQ handler
+ * @dev: DRM device
+ *
+ * Calls the driver's irq_uninstall() function and unregisters the IRQ handler.
+ * This should only be called by drivers which used drm_irq_install() to set up
+ * their interrupt handler. Other drivers must only reset
+ * drm_device->irq_enabled to false.
*
- * \param dev DRM device.
+ * Note that for kernel modesetting drivers it is a bug if this function fails.
+ * The sanity checks are only to catch buggy user modesetting drivers which call
+ * the same function through an ioctl.
*
- * Calls the driver's \c irq_uninstall() function, and stops the irq.
+ * Returns:
+ * Zero on success or a negative error code on failure.
*/
int drm_irq_uninstall(struct drm_device *dev)
{
@@ -357,10 +351,8 @@ int drm_irq_uninstall(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- mutex_lock(&dev->struct_mutex);
irq_enabled = dev->irq_enabled;
dev->irq_enabled = false;
- mutex_unlock(&dev->struct_mutex);
/*
* Wake up any waiters so they don't hang.
@@ -379,7 +371,7 @@ int drm_irq_uninstall(struct drm_device *dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
+ DRM_DEBUG("irq=%d\n", dev->irq);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -387,13 +379,13 @@ int drm_irq_uninstall(struct drm_device *dev)
if (dev->driver->irq_uninstall)
dev->driver->irq_uninstall(dev);
- free_irq(drm_dev_to_irq(dev), dev);
+ free_irq(dev->irq, dev);
return 0;
}
EXPORT_SYMBOL(drm_irq_uninstall);
-/**
+/*
* IRQ control ioctl.
*
* \param inode device inode.
@@ -408,43 +400,52 @@ int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_control *ctl = data;
+ int ret = 0, irq;
/* if we haven't irq we fallback for compatibility reasons -
* this used to be a separate function in drm_dma.h
*/
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+ /* UMS was only ever support on pci devices. */
+ if (WARN_ON(!dev->pdev))
+ return -EINVAL;
switch (ctl->func) {
case DRM_INST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
+ irq = dev->pdev->irq;
+
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != drm_dev_to_irq(dev))
+ ctl->irq != irq)
return -EINVAL;
- return drm_irq_install(dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_irq_install(dev, irq);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
case DRM_UNINST_HANDLER:
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
- return drm_irq_uninstall(dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_irq_uninstall(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
default:
return -EINVAL;
}
}
/**
- * drm_calc_timestamping_constants - Calculate vblank timestamp constants
- *
- * @crtc drm_crtc whose timestamp constants should be updated.
- * @mode display mode containing the scanout timings
+ * drm_calc_timestamping_constants - calculate vblank timestamp constants
+ * @crtc: drm_crtc whose timestamp constants should be updated.
+ * @mode: display mode containing the scanout timings
*
* Calculate and store various constants which are later
* needed by vblank and swap-completion timestamping, e.g,
* by drm_calc_vbltimestamp_from_scanoutpos(). They are
- * derived from crtc's true scanout timing, so they take
+ * derived from CRTC's true scanout timing, so they take
* things like panel scaling or other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
@@ -489,11 +490,22 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
- * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
- * drivers. Implements calculation of exact vblank timestamps from
- * given drm_display_mode timings and current video scanout position
- * of a crtc. This can be called from within get_vblank_timestamp()
- * implementation of a kms driver to implement the actual timestamping.
+ * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
+ * @dev: DRM device
+ * @crtc: Which CRTC's vblank timestamp to retrieve
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
+ * On return contains true maximum error of timestamp
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default,
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
+ * @refcrtc: CRTC which defines scanout timing
+ * @mode: mode which defines the scanout timings
+ *
+ * Implements calculation of exact vblank timestamps from given drm_display_mode
+ * timings and current video scanout position of a CRTC. This can be called from
+ * within get_vblank_timestamp() implementation of a kms driver to implement the
+ * actual timestamping.
*
* Should return timestamps conforming to the OML_sync_control OpenML
* extension specification. The timestamp corresponds to the end of
@@ -508,21 +520,11 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* returns as no operation if a doublescan or interlaced video mode is
* active. Higher level code is expected to handle this.
*
- * @dev: DRM device.
- * @crtc: Which crtc's vblank timestamp to retrieve.
- * @max_error: Desired maximum allowable error in timestamps (nanosecs).
- * On return contains true maximum error of timestamp.
- * @vblank_time: Pointer to struct timeval which should receive the timestamp.
- * @flags: Flags to pass to driver:
- * 0 = Default.
- * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
- * @refcrtc: drm_crtc* of crtc which defines scanout timing.
- * @mode: mode which defines the scanout timings
- *
- * Returns negative value on error, failure or if not supported in current
+ * Returns:
+ * Negative value on error, failure or if not supported in current
* video mode:
*
- * -EINVAL - Invalid crtc.
+ * -EINVAL - Invalid CRTC.
* -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
* -ENOTSUPP - Function not supported in current display mode.
* -EIO - Failed, e.g., due to failed scanout position query.
@@ -671,23 +673,23 @@ static struct timeval get_drm_timestamp(void)
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
- * vblank interval.
- *
+ * vblank interval
* @dev: DRM device
- * @crtc: which crtc's vblank timestamp to retrieve
+ * @crtc: which CRTC's vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
* @flags: Flags to pass to driver:
- * 0 = Default.
- * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * 0 = Default,
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
*
* Fetches the system timestamp corresponding to the time of the most recent
- * vblank interval on specified crtc. May call into kms-driver to
+ * vblank interval on specified CRTC. May call into kms-driver to
* compute the timestamp with a high-precision GPU specific method.
*
* Returns zero if timestamp originates from uncorrected do_gettimeofday()
* call, i.e., it isn't very precisely locked to the true vblank.
*
- * Returns non-zero if timestamp is considered to be very precise.
+ * Returns:
+ * Non-zero if timestamp is considered to be very precise, zero otherwise.
*/
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags)
@@ -722,6 +724,9 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
+ *
+ * Returns:
+ * The software vblank counter.
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
@@ -740,8 +745,7 @@ EXPORT_SYMBOL(drm_vblank_count);
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
- * of the vblank interval that corresponds to the current value vblank counter
- * value.
+ * of the vblank interval that corresponds to the current vblank counter value.
*/
u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime)
@@ -870,6 +874,42 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
}
/**
+ * drm_vblank_enable - enable the vblank interrupt on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ */
+static int drm_vblank_enable(struct drm_device *dev, int crtc)
+{
+ int ret = 0;
+
+ assert_spin_locked(&dev->vbl_lock);
+
+ spin_lock(&dev->vblank_time_lock);
+
+ if (!dev->vblank[crtc].enabled) {
+ /*
+ * Enable vblank irqs under vblank_time_lock protection.
+ * All vblank count & timestamp updates are held off
+ * until we are done reinitializing master counter and
+ * timestamps. Filtercode in drm_handle_vblank() will
+ * prevent double-accounting of same vblank interval.
+ */
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank[crtc].refcount);
+ else {
+ dev->vblank[crtc].enabled = true;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+
+ spin_unlock(&dev->vblank_time_lock);
+
+ return ret;
+}
+
+/**
* drm_vblank_get - get a reference count on vblank events
* @dev: DRM device
* @crtc: which CRTC to own
@@ -877,36 +917,20 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
- * RETURNS
+ * This is the legacy version of drm_crtc_vblank_get().
+ *
+ * Returns:
* Zero on success, nonzero on failure.
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
- unsigned long irqflags, irqflags2;
+ unsigned long irqflags;
int ret = 0;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
- spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
- if (!dev->vblank[crtc].enabled) {
- /* Enable vblank irqs under vblank_time_lock protection.
- * All vblank count & timestamp updates are held off
- * until we are done reinitializing master counter and
- * timestamps. Filtercode in drm_handle_vblank() will
- * prevent double-accounting of same vblank interval.
- */
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
- crtc, ret);
- if (ret)
- atomic_dec(&dev->vblank[crtc].refcount);
- else {
- dev->vblank[crtc].enabled = true;
- drm_update_vblank_count(dev, crtc);
- }
- }
- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+ ret = drm_vblank_enable(dev, crtc);
} else {
if (!dev->vblank[crtc].enabled) {
atomic_dec(&dev->vblank[crtc].refcount);
@@ -920,12 +944,32 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_get);
/**
+ * drm_crtc_vblank_get - get a reference count on vblank events
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * This is the native kms version of drm_vblank_off().
+ *
+ * Returns:
+ * Zero on success, nonzero on failure.
+ */
+int drm_crtc_vblank_get(struct drm_crtc *crtc)
+{
+ return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_get);
+
+/**
* drm_vblank_put - give up ownership of vblank events
* @dev: DRM device
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ *
+ * This is the legacy version of drm_crtc_vblank_put().
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
@@ -934,17 +978,39 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
(drm_vblank_offdelay > 0))
- mod_timer(&dev->vblank_disable_timer,
+ mod_timer(&dev->vblank[crtc].disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
EXPORT_SYMBOL(drm_vblank_put);
/**
+ * drm_crtc_vblank_put - give up ownership of vblank events
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ *
+ * This is the native kms version of drm_vblank_put().
+ */
+void drm_crtc_vblank_put(struct drm_crtc *crtc)
+{
+ drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_put);
+
+/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
*
- * Caller must hold event lock.
+ * Drivers can use this function to shut down the vblank interrupt handling when
+ * disabling a crtc. This function ensures that the latest vblank frame count is
+ * stored so that drm_vblank_on() can restore it again.
+ *
+ * Drivers must use this function when the hardware vblank counter can get
+ * reset, e.g. when suspending.
+ *
+ * This is the legacy version of drm_crtc_vblank_off().
*/
void drm_vblank_off(struct drm_device *dev, int crtc)
{
@@ -978,12 +1044,87 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_off);
/**
+ * drm_crtc_vblank_off - disable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * Drivers can use this function to shut down the vblank interrupt handling when
+ * disabling a crtc. This function ensures that the latest vblank frame count is
+ * stored so that drm_vblank_on can restore it again.
+ *
+ * Drivers must use this function when the hardware vblank counter can get
+ * reset, e.g. when suspending.
+ *
+ * This is the native kms version of drm_vblank_off().
+ */
+void drm_crtc_vblank_off(struct drm_crtc *crtc)
+{
+ drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_off);
+
+/**
+ * drm_vblank_on - enable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_vblank_off() again. Note that calls to drm_vblank_on() and
+ * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * in driver load code to reflect the current hardware state of the crtc.
+ *
+ * This is the legacy version of drm_crtc_vblank_on().
+ */
+void drm_vblank_on(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /* re-enable interrupts if there's are users left */
+ if (atomic_read(&dev->vblank[crtc].refcount) != 0)
+ WARN_ON(drm_vblank_enable(dev, crtc));
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_on);
+
+/**
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_vblank_off() again. Note that calls to drm_vblank_on() and
+ * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * in driver load code to reflect the current hardware state of the crtc.
+ *
+ * This is the native kms version of drm_vblank_on().
+ */
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_on);
+
+/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
* @crtc: CRTC in question
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
+ *
+ * This is done by grabbing a temporary vblank reference to ensure that the
+ * vblank interrupt keeps running across the modeset sequence. With this the
+ * software-side vblank frame counting will ensure that there are no jumps or
+ * discontinuities.
+ *
+ * Unfortunately this approach is racy and also doesn't work when the vblank
+ * interrupt stops running, e.g. across system suspend resume. It is therefore
+ * highly recommended that drivers use the newer drm_vblank_off() and
+ * drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
+ * using "cooked" software vblank frame counters and not relying on any hardware
+ * counters.
+ *
+ * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
+ * again.
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
@@ -1005,6 +1146,14 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
+/**
+ * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * This function again drops the temporary vblank reference acquired in
+ * drm_vblank_pre_modeset.
+ */
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
@@ -1026,7 +1175,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
-/**
+/*
* drm_modeset_ctl - handle vblank event counter changes across mode switch
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
@@ -1139,7 +1288,7 @@ err_put:
return ret;
}
-/**
+/*
* Wait for VBLANK.
*
* \param inode device inode.
@@ -1150,7 +1299,7 @@ err_put:
*
* This function enables the vblank interrupt on the pipe requested, then
* sleeps waiting for the requested sequence number to occur, and drops
- * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
* after a timeout with no further vblank waits scheduled).
*/
int drm_wait_vblank(struct drm_device *dev, void *data,
@@ -1160,9 +1309,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret;
unsigned int flags, seq, crtc, high_crtc;
- if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
- return -EINVAL;
+ if (!dev->irq_enabled)
+ return -EINVAL;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
@@ -1222,6 +1370,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
+ !dev->vblank[crtc].enabled ||
!dev->irq_enabled));
if (ret != -EINTR) {
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 09821f46d768..e633df2f68d8 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -282,6 +282,14 @@ static int mipi_dsi_drv_remove(struct device *dev)
return drv->remove(dsi);
}
+static void mipi_dsi_drv_shutdown(struct device *dev)
+{
+ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ drv->shutdown(dsi);
+}
+
/**
* mipi_dsi_driver_register - register a driver for DSI devices
* @drv: DSI driver structure
@@ -293,6 +301,8 @@ int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
drv->driver.probe = mipi_dsi_drv_probe;
if (drv->remove)
drv->driver.remove = mipi_dsi_drv_remove;
+ if (drv->shutdown)
+ drv->driver.shutdown = mipi_dsi_drv_shutdown;
return driver_register(&drv->driver);
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 8b410576fce4..bedf1894e17e 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1013,6 +1013,7 @@ EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
+ * @merge_type_bits: whether to merge or overright type bits.
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
@@ -1021,7 +1022,8 @@ EXPORT_SYMBOL(drm_mode_sort);
* This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves.
*/
-void drm_mode_connector_list_update(struct drm_connector *connector)
+void drm_mode_connector_list_update(struct drm_connector *connector,
+ bool merge_type_bits)
{
struct drm_display_mode *mode;
struct drm_display_mode *pmode, *pt;
@@ -1039,7 +1041,10 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
- mode->type |= pmode->type;
+ if (merge_type_bits)
+ mode->type |= pmode->type;
+ else
+ mode->type = pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
new file mode 100644
index 000000000000..0dc57d5ecd10
--- /dev/null
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_lock.h>
+
+/**
+ * DOC: kms locking
+ *
+ * As KMS moves toward more fine grained locking, and atomic ioctl where
+ * userspace can indirectly control locking order, it becomes necessary
+ * to use ww_mutex and acquire-contexts to avoid deadlocks. But because
+ * the locking is more distributed around the driver code, we want a bit
+ * of extra utility/tracking out of our acquire-ctx. This is provided
+ * by drm_modeset_lock / drm_modeset_acquire_ctx.
+ *
+ * For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
+ *
+ * The basic usage pattern is to:
+ *
+ * drm_modeset_acquire_init(&ctx)
+ * retry:
+ * foreach (lock in random_ordered_set_of_locks) {
+ * ret = drm_modeset_lock(lock, &ctx)
+ * if (ret == -EDEADLK) {
+ * drm_modeset_backoff(&ctx);
+ * goto retry;
+ * }
+ * }
+ *
+ * ... do stuff ...
+ *
+ * drm_modeset_drop_locks(&ctx);
+ * drm_modeset_acquire_fini(&ctx);
+ */
+
+
+/**
+ * drm_modeset_acquire_init - initialize acquire context
+ * @ctx: the acquire context
+ * @flags: for future
+ */
+void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
+ uint32_t flags)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
+ INIT_LIST_HEAD(&ctx->locked);
+}
+EXPORT_SYMBOL(drm_modeset_acquire_init);
+
+/**
+ * drm_modeset_acquire_fini - cleanup acquire context
+ * @ctx: the acquire context
+ */
+void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
+{
+ ww_acquire_fini(&ctx->ww_ctx);
+}
+EXPORT_SYMBOL(drm_modeset_acquire_fini);
+
+/**
+ * drm_modeset_drop_locks - drop all locks
+ * @ctx: the acquire context
+ *
+ * Drop all locks currently held against this acquire context.
+ */
+void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
+{
+ WARN_ON(ctx->contended);
+ while (!list_empty(&ctx->locked)) {
+ struct drm_modeset_lock *lock;
+
+ lock = list_first_entry(&ctx->locked,
+ struct drm_modeset_lock, head);
+
+ drm_modeset_unlock(lock);
+ }
+}
+EXPORT_SYMBOL(drm_modeset_drop_locks);
+
+static inline int modeset_lock(struct drm_modeset_lock *lock,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool interruptible, bool slow)
+{
+ int ret;
+
+ WARN_ON(ctx->contended);
+
+ if (interruptible && slow) {
+ ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
+ } else if (interruptible) {
+ ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
+ } else if (slow) {
+ ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
+ ret = 0;
+ } else {
+ ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
+ }
+ if (!ret) {
+ WARN_ON(!list_empty(&lock->head));
+ list_add(&lock->head, &ctx->locked);
+ } else if (ret == -EALREADY) {
+ /* we already hold the lock.. this is fine. For atomic
+ * we will need to be able to drm_modeset_lock() things
+ * without having to keep track of what is already locked
+ * or not.
+ */
+ ret = 0;
+ } else if (ret == -EDEADLK) {
+ ctx->contended = lock;
+ }
+
+ return ret;
+}
+
+static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
+ bool interruptible)
+{
+ struct drm_modeset_lock *contended = ctx->contended;
+
+ ctx->contended = NULL;
+
+ if (WARN_ON(!contended))
+ return 0;
+
+ drm_modeset_drop_locks(ctx);
+
+ return modeset_lock(contended, ctx, interruptible, true);
+}
+
+/**
+ * drm_modeset_backoff - deadlock avoidance backoff
+ * @ctx: the acquire context
+ *
+ * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
+ * you must call this function to drop all currently held locks and
+ * block until the contended lock becomes available.
+ */
+void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
+{
+ modeset_backoff(ctx, false);
+}
+EXPORT_SYMBOL(drm_modeset_backoff);
+
+/**
+ * drm_modeset_backoff_interruptible - deadlock avoidance backoff
+ * @ctx: the acquire context
+ *
+ * Interruptible version of drm_modeset_backoff()
+ */
+int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
+{
+ return modeset_backoff(ctx, true);
+}
+EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
+
+/**
+ * drm_modeset_lock - take modeset lock
+ * @lock: lock to take
+ * @ctx: acquire ctx
+ *
+ * If ctx is not NULL, then its ww acquire context is used and the
+ * lock will be tracked by the context and can be released by calling
+ * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
+ * deadlock scenario has been detected and it is an error to attempt
+ * to take any more locks without first calling drm_modeset_backoff().
+ */
+int drm_modeset_lock(struct drm_modeset_lock *lock,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ if (ctx)
+ return modeset_lock(lock, ctx, false, false);
+
+ ww_mutex_lock(&lock->mutex, NULL);
+ return 0;
+}
+EXPORT_SYMBOL(drm_modeset_lock);
+
+/**
+ * drm_modeset_lock_interruptible - take modeset lock
+ * @lock: lock to take
+ * @ctx: acquire ctx
+ *
+ * Interruptible version of drm_modeset_lock()
+ */
+int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ if (ctx)
+ return modeset_lock(lock, ctx, true, false);
+
+ return ww_mutex_lock_interruptible(&lock->mutex, NULL);
+}
+EXPORT_SYMBOL(drm_modeset_lock_interruptible);
+
+/**
+ * drm_modeset_unlock - drop modeset lock
+ * @lock: lock to release
+ */
+void drm_modeset_unlock(struct drm_modeset_lock *lock)
+{
+ list_del_init(&lock->head);
+ ww_mutex_unlock(&lock->mutex);
+}
+EXPORT_SYMBOL(drm_modeset_unlock);
+
+/* Temporary.. until we have sufficiently fine grained locking, there
+ * are a couple scenarios where it is convenient to grab all crtc locks.
+ * It is planned to remove this:
+ */
+int drm_modeset_lock_all_crtcs(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ list_for_each_entry(crtc, &config->crtc_list, head) {
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 9c696a5ad74d..020cfd934854 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -1,17 +1,3 @@
-/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
-/**
- * \file drm_pci.c
- * \brief Functions and ioctls to manage PCI memory
- *
- * \warning These interfaces aren't stable yet.
- *
- * \todo Implement the remaining ioctl's for the PCI pools.
- * \todo The wrappers here are so thin that they would be better off inlined..
- *
- * \author José Fonseca <jrfonseca@tungstengraphics.com>
- * \author Leif Delgass <ldelgass@retinalburn.net>
- */
-
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
@@ -42,12 +28,14 @@
#include <linux/export.h>
#include <drm/drmP.h>
-/**********************************************************************/
-/** \name PCI memory */
-/*@{*/
-
/**
- * \brief Allocate a PCI consistent memory block, for DMA.
+ * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
+ * @dev: DRM device
+ * @size: size of block to allocate
+ * @align: alignment of block
+ *
+ * Return: A handle to the allocated memory block on success or NULL on
+ * failure.
*/
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
@@ -88,8 +76,8 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
EXPORT_SYMBOL(drm_pci_alloc);
-/**
- * \brief Free a PCI consistent memory block without freeing its descriptor.
+/*
+ * Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
@@ -111,7 +99,9 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
}
/**
- * \brief Free a PCI consistent memory block
+ * drm_pci_free - Free a PCI consistent memory block
+ * @dev: DRM device
+ * @dmah: handle to memory block
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
@@ -137,21 +127,9 @@ static int drm_get_pci_domain(struct drm_device *dev)
return pci_domain_nr(dev->pdev->bus);
}
-static int drm_pci_get_irq(struct drm_device *dev)
-{
- return dev->pdev->irq;
-}
-
-static const char *drm_pci_get_name(struct drm_device *dev)
-{
- struct pci_driver *pdriver = dev->driver->kdriver.pci;
- return pdriver->name;
-}
-
static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
- struct pci_driver *pdriver = dev->driver->kdriver.pci;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
@@ -173,29 +151,16 @@ static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
} else
master->unique_len = len;
- dev->devname =
- kmalloc(strlen(pdriver->name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", pdriver->name,
- master->unique);
-
return 0;
err:
return ret;
}
-static int drm_pci_set_unique(struct drm_device *dev,
- struct drm_master *master,
- struct drm_unique *u)
+int drm_pci_set_unique(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_unique *u)
{
int domain, bus, slot, func, ret;
- const char *bus_name;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
@@ -212,17 +177,6 @@ static int drm_pci_set_unique(struct drm_device *dev,
master->unique[master->unique_len] = '\0';
- bus_name = dev->driver->bus->get_name(dev);
- dev->devname = kmalloc(strlen(bus_name) +
- strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", bus_name,
- master->unique);
-
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
@@ -247,7 +201,6 @@ err:
return ret;
}
-
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
@@ -262,6 +215,36 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
return 0;
}
+/**
+ * drm_irq_by_busid - Get interrupt from bus ID
+ * @dev: DRM device
+ * @data: IOCTL parameter pointing to a drm_irq_busid structure
+ * @file_priv: DRM file private.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_irq_busid *p = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* UMS was only ever support on PCI devices. */
+ if (WARN_ON(!dev->pdev))
+ return -EINVAL;
+
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
+
+ return drm_pci_irq_by_busid(dev, p);
+}
+
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
@@ -287,24 +270,20 @@ void drm_pci_agp_destroy(struct drm_device *dev)
}
static struct drm_bus drm_pci_bus = {
- .bus_type = DRIVER_BUS_PCI,
- .get_irq = drm_pci_get_irq,
- .get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
- .set_unique = drm_pci_set_unique,
- .irq_by_busid = drm_pci_irq_by_busid,
};
/**
- * Register.
- *
- * \param pdev - PCI device structure
- * \param ent entry from the PCI ID table with device type flags
- * \return zero on success or a negative number on failure.
+ * drm_get_pci_dev - Register a PCI device with the DRM subsystem
+ * @pdev: PCI device
+ * @ent: entry from the PCI ID table that matches @pdev
+ * @driver: DRM device driver
*
* Attempt to gets inter module "drm" information. If we are first
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
+ *
+ * Return: 0 on success or a negative error code on failure.
*/
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver)
@@ -357,15 +336,14 @@ err_free:
EXPORT_SYMBOL(drm_get_pci_dev);
/**
- * PCI device initialization. Called direct from modules at load time.
- *
- * \return zero on success or a negative number on failure.
+ * drm_pci_init - Register matching PCI devices with the DRM subsystem
+ * @driver: DRM device driver
+ * @pdriver: PCI device driver
*
- * Initializes a drm_device structures,registering the
- * stubs and initializing the AGP device.
+ * Initializes a drm_device structures, registering the stubs and initializing
+ * the AGP device.
*
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
+ * Return: 0 on success or a negative error code on failure.
*/
int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
@@ -375,7 +353,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_DEBUG("\n");
- driver->kdriver.pci = pdriver;
driver->bus = &drm_pci_bus;
if (driver->driver_features & DRIVER_MODESET)
@@ -453,11 +430,31 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
}
void drm_pci_agp_destroy(struct drm_device *dev) {}
+
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -EINVAL;
+}
+
+int drm_pci_set_unique(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_unique *u)
+{
+ return -EINVAL;
+}
#endif
EXPORT_SYMBOL(drm_pci_init);
-/*@}*/
+/**
+ * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem
+ * @driver: DRM device driver
+ * @pdriver: PCI device driver
+ *
+ * Unregisters one or more devices matched by a PCI driver from the DRM
+ * subsystem.
+ */
void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index d2b1c03b3d71..6d133149cc74 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -25,7 +25,9 @@
#include <linux/list.h>
#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_plane_helper.h>
#define SUBPIXEL_MASK 0xffff
@@ -36,9 +38,9 @@
* creating the primary plane. However drivers that still call
* drm_plane_init() will use this minimal format list as the default.
*/
-const static uint32_t safe_modeset_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
+static const uint32_t safe_modeset_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
};
/*
@@ -54,6 +56,13 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
struct drm_connector *connector;
int count = 0;
+ /*
+ * Note: Once we change the plane hooks to more fine-grained locking we
+ * need to grab the connection_mutex here to be able to make these
+ * checks.
+ */
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder && connector->encoder->crtc == crtc) {
if (connector_list != NULL && count < num_connectors)
@@ -66,6 +75,79 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
}
/**
+ * drm_plane_helper_check_update() - Check plane update for validity
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @src: source coordinates in 16.16 fixed point
+ * @dest: integer destination coordinates
+ * @clip: integer clipping coordinates
+ * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
+ * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
+ * @can_position: is it legal to position the plane such that it
+ * doesn't cover the entire crtc? This will generally
+ * only be false for primary planes.
+ * @can_update_disabled: can the plane be updated while the crtc
+ * is disabled?
+ * @visible: output parameter indicating whether plane is still visible after
+ * clipping
+ *
+ * Checks that a desired plane update is valid. Drivers that provide
+ * their own plane handling rather than helper-provided implementations may
+ * still wish to call this function to avoid duplication of error checking
+ * code.
+ *
+ * RETURNS:
+ * Zero if update appears valid, error code on failure
+ */
+int drm_plane_helper_check_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dest,
+ const struct drm_rect *clip,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible)
+{
+ int hscale, vscale;
+
+ if (!crtc->enabled && !can_update_disabled) {
+ DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
+ return -EINVAL;
+ }
+
+ /* Check scaling */
+ hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
+ if (hscale < 0 || vscale < 0) {
+ DRM_DEBUG_KMS("Invalid scaling of plane\n");
+ return -ERANGE;
+ }
+
+ *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
+ if (!*visible)
+ /*
+ * Plane isn't visible; some drivers can handle this
+ * so we just return success here. Drivers that can't
+ * (including those that use the primary plane helper's
+ * update function) will return an error from their
+ * update_plane handler.
+ */
+ return 0;
+
+ if (!can_position && !drm_rect_equals(dest, clip)) {
+ DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_helper_check_update);
+
+/**
* drm_primary_helper_update() - Helper for primary plane update
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
@@ -113,56 +195,42 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
.x = src_x >> 16,
.y = src_y >> 16,
};
+ struct drm_rect src = {
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
struct drm_rect dest = {
.x1 = crtc_x,
.y1 = crtc_y,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
- struct drm_rect clip = {
+ const struct drm_rect clip = {
.x2 = crtc->mode.hdisplay,
.y2 = crtc->mode.vdisplay,
};
struct drm_connector **connector_list;
- struct drm_framebuffer *tmpfb;
int num_connectors, ret;
+ bool visible;
- if (!crtc->enabled) {
- DRM_DEBUG_KMS("Cannot update primary plane of a disabled CRTC.\n");
- return -EINVAL;
- }
-
- /* Disallow subpixel positioning */
- if ((src_x | src_y | src_w | src_h) & SUBPIXEL_MASK) {
- DRM_DEBUG_KMS("Primary plane does not support subpixel positioning\n");
- return -EINVAL;
- }
-
- /* Primary planes are locked to their owning CRTC */
- if (plane->possible_crtcs != drm_crtc_mask(crtc)) {
- DRM_DEBUG_KMS("Cannot change primary plane CRTC\n");
- return -EINVAL;
- }
-
- /* Disallow scaling */
- if (crtc_w != src_w || crtc_h != src_h) {
- DRM_DEBUG_KMS("Can't scale primary plane\n");
- return -EINVAL;
- }
-
- /* Make sure primary plane covers entire CRTC */
- drm_rect_intersect(&dest, &clip);
- if (dest.x1 != 0 || dest.y1 != 0 ||
- dest.x2 != crtc->mode.hdisplay || dest.y2 != crtc->mode.vdisplay) {
- DRM_DEBUG_KMS("Primary plane must cover entire CRTC\n");
- return -EINVAL;
- }
-
- /* Framebuffer must be big enough to cover entire plane */
- ret = drm_crtc_check_viewport(crtc, crtc_x, crtc_y, &crtc->mode, fb);
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, false, &visible);
if (ret)
return ret;
+ if (!visible)
+ /*
+ * Primary plane isn't visible. Note that unless a driver
+ * provides their own disable function, this will just
+ * wind up returning -EINVAL to userspace.
+ */
+ return plane->funcs->disable_plane(plane);
+
/* Find current connectors for CRTC */
num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
BUG_ON(num_connectors == 0);
@@ -176,21 +244,14 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
set.num_connectors = num_connectors;
/*
- * set_config() adjusts crtc->primary->fb; however the DRM setplane
- * code that called us expects to handle the framebuffer update and
- * reference counting; save and restore the current fb before
- * calling it.
- *
- * N.B., we call set_config() directly here rather than using
+ * We call set_config() directly here rather than using
* drm_mode_set_config_internal. We're reprogramming the same
* connectors that were already in use, so we shouldn't need the extra
* cross-CRTC fb refcounting to accomodate stealing connectors.
* drm_mode_setplane() already handles the basic refcounting for the
* framebuffers involved in this operation.
*/
- tmpfb = plane->fb;
ret = crtc->funcs->set_config(&set);
- plane->fb = tmpfb;
kfree(connector_list);
return ret;
@@ -232,7 +293,6 @@ EXPORT_SYMBOL(drm_primary_helper_disable);
*/
void drm_primary_helper_destroy(struct drm_plane *plane)
{
- plane->funcs->disable_plane(plane);
drm_plane_cleanup(plane);
kfree(plane);
}
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 319ff5385601..d5b76f148c12 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -68,16 +68,6 @@ err_free:
return ret;
}
-static int drm_platform_get_irq(struct drm_device *dev)
-{
- return platform_get_irq(dev->platformdev, 0);
-}
-
-static const char *drm_platform_get_name(struct drm_device *dev)
-{
- return dev->platformdev->name;
-}
-
static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret, id;
@@ -106,46 +96,30 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas
goto err;
}
- dev->devname =
- kmalloc(strlen(dev->platformdev->name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->platformdev->name,
- master->unique);
return 0;
err:
return ret;
}
static struct drm_bus drm_platform_bus = {
- .bus_type = DRIVER_BUS_PLATFORM,
- .get_irq = drm_platform_get_irq,
- .get_name = drm_platform_get_name,
.set_busid = drm_platform_set_busid,
};
/**
- * Platform device initialization. Called direct from modules.
+ * drm_platform_init - Register a platform device with the DRM subsystem
+ * @driver: DRM device driver
+ * @platform_device: platform device to register
*
- * \return zero on success or a negative number on failure.
- *
- * Initializes a drm_device structures,registering the
- * stubs
+ * Registers the specified DRM device driver and platform device with the DRM
+ * subsystem, initializing a drm_device structure and calling the driver's
+ * .load() function.
*
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
+ * Return: 0 on success or a negative error code on failure.
*/
-
int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
{
DRM_DEBUG("\n");
- driver->kdriver.platform_device = platform_device;
driver->bus = &drm_platform_bus;
return drm_get_platform_dev(platform_device, driver);
}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index e70f54d4a581..d22676b89cbb 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,26 +82,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
return;
}
-/**
- * drm_helper_probe_single_connector_modes - get complete set of display modes
- * @connector: connector to probe
- * @maxX: max width for modes
- * @maxY: max height for modes
- *
- * Based on the helper callbacks implemented by @connector try to detect all
- * valid modes. Modes will first be added to the connector's probed_modes list,
- * then culled (based on validity and the @maxX, @maxY parameters) and put into
- * the normal modes list.
- *
- * Intended to be use as a generic implementation of the ->fill_modes()
- * @connector vfunc for drivers that use the crtc helpers for output mode
- * filtering and detection.
- *
- * Returns:
- * The number of modes found on @connector.
- */
-int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
+static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY, bool merge_type_bits)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
@@ -114,7 +96,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
- drm_get_connector_name(connector));
+ connector->name);
/* set all modes to the unverified state */
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
@@ -138,7 +120,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
drm_mode_connector_update_edid_property(connector, NULL);
verbose_prune = false;
goto prune;
@@ -155,7 +137,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (count == 0)
goto prune;
- drm_mode_connector_list_update(connector);
+ drm_mode_connector_list_update(connector, merge_type_bits);
if (maxX && maxY)
drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
@@ -169,7 +151,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry(mode, &connector->modes, head) {
- if (mode->status == MODE_OK)
+ if (mode->status == MODE_OK && connector_funcs->mode_valid)
mode->status = connector_funcs->mode_valid(connector,
mode);
}
@@ -186,7 +168,7 @@ prune:
drm_mode_sort(&connector->modes);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
- drm_get_connector_name(connector));
+ connector->name);
list_for_each_entry(mode, &connector->modes, head) {
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_debug_printmodeline(mode);
@@ -194,9 +176,49 @@ prune:
return count;
}
+
+/**
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
+ * @connector: connector to probe
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes. Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
+ *
+ * Intended to be use as a generic implementation of the ->fill_modes()
+ * @connector vfunc for drivers that use the crtc helpers for output mode
+ * filtering and detection.
+ *
+ * Returns:
+ * The number of modes found on @connector.
+ */
+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
+}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
/**
+ * drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
+ * @connector: connector to probe
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * This operates like drm_hehlper_probe_single_connector_modes except it
+ * replaces the mode bits instead of merging them for preferred modes.
+ */
+int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
+}
+EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
+
+/**
* drm_kms_helper_hotplug_event - fire off KMS hotplug events
* @dev: drm_device whose connector state changed
*
@@ -264,7 +286,7 @@ static void output_poll_execute(struct work_struct *work)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
"status updated from %s to %s\n",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
old, new);
changed = true;
@@ -409,7 +431,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
if (old_status != connector->status)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 4c24c3ac1efa..14d16464000a 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -1,16 +1,11 @@
-/**
- * \file drm_stub.h
- * Stub support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- */
-
/*
* Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -128,7 +123,10 @@ struct drm_master *drm_master_create(struct drm_minor *minor)
kref_init(&master->refcount);
spin_lock_init(&master->lock.spinlock);
init_waitqueue_head(&master->lock.lock_queue);
- drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+ if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
+ kfree(master);
+ return NULL;
+ }
INIT_LIST_HEAD(&master->magicfree);
master->minor = minor;
@@ -166,9 +164,6 @@ static void drm_master_destroy(struct kref *kref)
master->unique_len = 0;
}
- kfree(dev->devname);
- dev->devname = NULL;
-
list_for_each_entry_safe(pt, next, &master->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&master->magiclist, &pt->hash_item);
@@ -294,6 +289,7 @@ static void drm_minor_free(struct drm_device *dev, unsigned int type)
slot = drm_minor_get_slot(dev, type);
if (*slot) {
+ drm_mode_group_destroy(&(*slot)->mode_group);
kfree(*slot);
*slot = NULL;
}
@@ -424,11 +420,15 @@ void drm_minor_release(struct drm_minor *minor)
}
/**
- * Called via drm_exit() at module unload time or when pci device is
- * unplugged.
+ * drm_put_dev - Unregister and release a DRM device
+ * @dev: DRM device
*
- * Cleans up all DRM device, calling drm_lastclose().
+ * Called at module unload time or when a PCI device is unplugged.
*
+ * Use of this function is discouraged. It will eventually go away completely.
+ * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
*/
void drm_put_dev(struct drm_device *dev)
{
@@ -535,7 +535,7 @@ static void drm_fs_inode_free(struct inode *inode)
}
/**
- * drm_dev_alloc - Allocate new drm device
+ * drm_dev_alloc - Allocate new DRM device
* @driver: DRM driver to allocate device for
* @parent: Parent device object
*
@@ -569,7 +569,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
- spin_lock_init(&dev->count_lock);
+ spin_lock_init(&dev->buf_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
@@ -648,9 +648,8 @@ static void drm_dev_release(struct kref *ref)
drm_minor_free(dev, DRM_MINOR_RENDER);
drm_minor_free(dev, DRM_MINOR_CONTROL);
- kfree(dev->devname);
-
mutex_destroy(&dev->master_mutex);
+ kfree(dev->unique);
kfree(dev);
}
@@ -690,6 +689,7 @@ EXPORT_SYMBOL(drm_dev_unref);
/**
* drm_dev_register - Register DRM device
* @dev: Device to register
+ * @flags: Flags passed to the driver's .load() function
*
* Register the DRM device @dev with the system, advertise device to user-space
* and start normal device operation. @dev must be allocated via drm_dev_alloc()
@@ -778,3 +778,28 @@ void drm_dev_unregister(struct drm_device *dev)
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
}
EXPORT_SYMBOL(drm_dev_unregister);
+
+/**
+ * drm_dev_set_unique - Set the unique name of a DRM device
+ * @dev: device of which to set the unique name
+ * @fmt: format string for unique name
+ *
+ * Sets the unique name of a DRM device using the specified format string and
+ * a variable list of arguments. Drivers can use this at driver probe time if
+ * the unique name of the devices they drive is static.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
+{
+ va_list ap;
+
+ kfree(dev->unique);
+
+ va_start(ap, fmt);
+ dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(drm_dev_set_unique);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index c22c3097c3e8..369b26278e76 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -380,9 +380,9 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
connector->kdev = device_create(drm_class, dev->primary->kdev,
0, connector, "card%d-%s",
- dev->primary->index, drm_get_connector_name(connector));
+ dev->primary->index, connector->name);
DRM_DEBUG("adding \"%s\" to sysfs\n",
- drm_get_connector_name(connector));
+ connector->name);
if (IS_ERR(connector->kdev)) {
DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
@@ -460,7 +460,7 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
if (!connector->kdev)
return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
- drm_get_connector_name(connector));
+ connector->name);
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
device_remove_file(connector->kdev, &connector_attrs[i]);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index c3406aad2944..f2fe94aab901 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -36,16 +36,6 @@ err_free:
}
EXPORT_SYMBOL(drm_get_usb_dev);
-static int drm_usb_get_irq(struct drm_device *dev)
-{
- return 0;
-}
-
-static const char *drm_usb_get_name(struct drm_device *dev)
-{
- return "USB";
-}
-
static int drm_usb_set_busid(struct drm_device *dev,
struct drm_master *master)
{
@@ -53,18 +43,24 @@ static int drm_usb_set_busid(struct drm_device *dev,
}
static struct drm_bus drm_usb_bus = {
- .bus_type = DRIVER_BUS_USB,
- .get_irq = drm_usb_get_irq,
- .get_name = drm_usb_get_name,
.set_busid = drm_usb_set_busid,
};
-
+
+/**
+ * drm_usb_init - Register matching USB devices with the DRM subsystem
+ * @driver: DRM device driver
+ * @udriver: USB device driver
+ *
+ * Registers one or more devices matched by a USB driver with the DRM
+ * subsystem.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
{
int res;
DRM_DEBUG("\n");
- driver->kdriver.usb = udriver;
driver->bus = &drm_usb_bus;
res = usb_register(udriver);
@@ -72,6 +68,14 @@ int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
}
EXPORT_SYMBOL(drm_usb_init);
+/**
+ * drm_usb_exit - Unregister matching USB devices from the DRM subsystem
+ * @driver: DRM device driver
+ * @udriver: USB device driver
+ *
+ * Unregisters one or more devices matched by a USB driver from the DRM
+ * subsystem.
+ */
void drm_usb_exit(struct drm_driver *driver,
struct usb_driver *udriver)
{
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 5bf5bca94f56..178d2a9672a8 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -26,14 +26,14 @@ config DRM_EXYNOS_DMABUF
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
- depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS && !FB_S3C
select FB_MODE_HELPERS
help
Choose this option if you want to use Exynos FIMD for DRM.
config DRM_EXYNOS_DPI
bool "EXYNOS DRM parallel output support"
- depends on DRM_EXYNOS
+ depends on DRM_EXYNOS_FIMD
select DRM_PANEL
default n
help
@@ -41,7 +41,7 @@ config DRM_EXYNOS_DPI
config DRM_EXYNOS_DSI
bool "EXYNOS DRM MIPI-DSI driver support"
- depends on DRM_EXYNOS
+ depends on DRM_EXYNOS_FIMD
select DRM_MIPI_DSI
select DRM_PANEL
default n
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI
config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
- depends on DRM_EXYNOS && ARCH_EXYNOS
+ depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS
help
This enables support for DP device.
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
deleted file mode 100644
index 6a8c84e7c839..000000000000
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * Authors:
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- * Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <drm/drmP.h>
-
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/of.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_hdmi.h"
-
-static int s5p_ddc_probe(struct i2c_client *client,
- const struct i2c_device_id *dev_id)
-{
- hdmi_attach_ddc_client(client);
-
- dev_info(&client->adapter->dev,
- "attached %s into i2c adapter successfully\n",
- client->name);
-
- return 0;
-}
-
-static int s5p_ddc_remove(struct i2c_client *client)
-{
- dev_info(&client->adapter->dev,
- "detached %s from i2c adapter successfully\n",
- client->name);
-
- return 0;
-}
-
-static struct of_device_id hdmiddc_match_types[] = {
- {
- .compatible = "samsung,exynos5-hdmiddc",
- }, {
- .compatible = "samsung,exynos4210-hdmiddc",
- }, {
- /* end node */
- }
-};
-
-struct i2c_driver ddc_driver = {
- .driver = {
- .name = "exynos-hdmiddc",
- .owner = THIS_MODULE,
- .of_match_table = hdmiddc_match_types,
- },
- .probe = s5p_ddc_probe,
- .remove = s5p_ddc_remove,
- .command = NULL,
-};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index aed533bbfd31..a8ffc8c1477b 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -18,6 +18,9 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/component.h>
#include <linux/phy/phy.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@@ -141,15 +144,15 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
return -EIO;
}
- exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_TEST_REQUEST,
+ exynos_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
&test_vector);
- if (test_vector & DPCD_TEST_EDID_READ) {
+ if (test_vector & DP_TEST_LINK_EDID_READ) {
exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TEST_EDID_CHECKSUM,
+ DP_TEST_EDID_CHECKSUM,
edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TEST_RESPONSE,
- DPCD_TEST_EDID_CHECKSUM_WRITE);
+ DP_TEST_RESPONSE,
+ DP_TEST_EDID_CHECKSUM_WRITE);
}
} else {
dev_info(dp->dev, "EDID data does not include any extensions.\n");
@@ -171,15 +174,15 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
}
exynos_dp_read_byte_from_dpcd(dp,
- DPCD_ADDR_TEST_REQUEST,
+ DP_TEST_REQUEST,
&test_vector);
- if (test_vector & DPCD_TEST_EDID_READ) {
+ if (test_vector & DP_TEST_LINK_EDID_READ) {
exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TEST_EDID_CHECKSUM,
+ DP_TEST_EDID_CHECKSUM,
edid[EDID_CHECKSUM]);
exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TEST_RESPONSE,
- DPCD_TEST_EDID_CHECKSUM_WRITE);
+ DP_TEST_RESPONSE,
+ DP_TEST_EDID_CHECKSUM_WRITE);
}
}
@@ -193,8 +196,8 @@ static int exynos_dp_handle_edid(struct exynos_dp_device *dp)
int i;
int retval;
- /* Read DPCD DPCD_ADDR_DPCD_REV~RECEIVE_PORT1_CAP_1 */
- retval = exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_DPCD_REV,
+ /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
+ retval = exynos_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV,
12, buf);
if (retval)
return retval;
@@ -214,14 +217,14 @@ static void exynos_dp_enable_rx_to_enhanced_mode(struct exynos_dp_device *dp,
{
u8 data;
- exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_LANE_COUNT_SET, &data);
+ exynos_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
if (enable)
- exynos_dp_write_byte_to_dpcd(dp, DPCD_ADDR_LANE_COUNT_SET,
- DPCD_ENHANCED_FRAME_EN |
+ exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
+ DP_LANE_COUNT_ENHANCED_FRAME_EN |
DPCD_LANE_COUNT_SET(data));
else
- exynos_dp_write_byte_to_dpcd(dp, DPCD_ADDR_LANE_COUNT_SET,
+ exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
DPCD_LANE_COUNT_SET(data));
}
@@ -230,7 +233,7 @@ static int exynos_dp_is_enhanced_mode_available(struct exynos_dp_device *dp)
u8 data;
int retval;
- exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_MAX_LANE_COUNT, &data);
+ exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
retval = DPCD_ENHANCED_FRAME_CAP(data);
return retval;
@@ -250,8 +253,8 @@ static void exynos_dp_training_pattern_dis(struct exynos_dp_device *dp)
exynos_dp_set_training_pattern(dp, DP_NONE);
exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
- DPCD_TRAINING_PATTERN_DISABLED);
+ DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
}
static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp,
@@ -295,7 +298,7 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp)
/* Setup RX configuration */
buf[0] = dp->link_train.link_rate;
buf[1] = dp->link_train.lane_count;
- retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_LINK_BW_SET,
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET,
2, buf);
if (retval)
return retval;
@@ -322,16 +325,16 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp)
/* Set RX training pattern */
retval = exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
- DPCD_SCRAMBLING_DISABLED | DPCD_TRAINING_PATTERN_1);
+ DP_TRAINING_PATTERN_SET,
+ DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
if (retval)
return retval;
for (lane = 0; lane < lane_count; lane++)
- buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 |
- DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0;
+ buf[lane] = DP_TRAIN_PRE_EMPHASIS_0 |
+ DP_TRAIN_VOLTAGE_SWING_400;
- retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET,
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
lane_count, buf);
return retval;
@@ -352,7 +355,7 @@ static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
for (lane = 0; lane < lane_count; lane++) {
lane_status = exynos_dp_get_lane_status(link_status, lane);
- if ((lane_status & DPCD_LANE_CR_DONE) == 0)
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
return -EINVAL;
}
return 0;
@@ -364,13 +367,13 @@ static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
int lane;
u8 lane_status;
- if ((link_align & DPCD_INTERLANE_ALIGN_DONE) == 0)
+ if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0)
return -EINVAL;
for (lane = 0; lane < lane_count; lane++) {
lane_status = exynos_dp_get_lane_status(link_status, lane);
- lane_status &= DPCD_CHANNEL_EQ_BITS;
- if (lane_status != DPCD_CHANNEL_EQ_BITS)
+ lane_status &= DP_CHANNEL_EQ_BITS;
+ if (lane_status != DP_CHANNEL_EQ_BITS)
return -EINVAL;
}
@@ -468,9 +471,9 @@ static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp,
DPCD_PRE_EMPHASIS_SET(pre_emphasis);
if (voltage_swing == VOLTAGE_LEVEL_3)
- training_lane |= DPCD_MAX_SWING_REACHED;
+ training_lane |= DP_TRAIN_MAX_SWING_REACHED;
if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
- training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
+ training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
dp->link_train.training_lane[lane] = training_lane;
}
@@ -487,12 +490,12 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
lane_count = dp->link_train.lane_count;
retval = exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_LANE0_1_STATUS, 2, link_status);
+ DP_LANE0_1_STATUS, 2, link_status);
if (retval)
return retval;
retval = exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
if (retval)
return retval;
@@ -501,9 +504,9 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
retval = exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
- DPCD_SCRAMBLING_DISABLED |
- DPCD_TRAINING_PATTERN_2);
+ DP_TRAINING_PATTERN_SET,
+ DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_2);
if (retval)
return retval;
@@ -543,7 +546,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
dp->link_train.training_lane[lane], lane);
retval = exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET, lane_count,
+ DP_TRAINING_LANE0_SET, lane_count,
dp->link_train.training_lane);
if (retval)
return retval;
@@ -562,7 +565,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
lane_count = dp->link_train.lane_count;
retval = exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_LANE0_1_STATUS, 2, link_status);
+ DP_LANE0_1_STATUS, 2, link_status);
if (retval)
return retval;
@@ -572,12 +575,12 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
}
retval = exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
if (retval)
return retval;
retval = exynos_dp_read_byte_from_dpcd(dp,
- DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED, &link_align);
+ DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
if (retval)
return retval;
@@ -619,7 +622,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
exynos_dp_set_lane_link_training(dp,
dp->link_train.training_lane[lane], lane);
- retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET,
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
lane_count, dp->link_train.training_lane);
return retval;
@@ -634,7 +637,7 @@ static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
* For DP rev.1.1, Maximum link rate of Main Link lanes
* 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps
*/
- exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_MAX_LINK_RATE, &data);
+ exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
*bandwidth = data;
}
@@ -647,7 +650,7 @@ static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp,
* For DP rev.1.1, Maximum number of Main Link lanes
* 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
*/
- exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_MAX_LANE_COUNT, &data);
+ exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
*lane_count = DPCD_MAX_LANE_COUNT(data);
}
@@ -819,20 +822,20 @@ static void exynos_dp_enable_scramble(struct exynos_dp_device *dp, bool enable)
exynos_dp_enable_scrambling(dp);
exynos_dp_read_byte_from_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_SET,
&data);
exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
- (u8)(data & ~DPCD_SCRAMBLING_DISABLED));
+ DP_TRAINING_PATTERN_SET,
+ (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
} else {
exynos_dp_disable_scrambling(dp);
exynos_dp_read_byte_from_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_SET,
&data);
exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
- (u8)(data | DPCD_SCRAMBLING_DISABLED));
+ DP_TRAINING_PATTERN_SET,
+ (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
}
}
@@ -949,12 +952,6 @@ static int exynos_dp_get_modes(struct drm_connector *connector)
return 1;
}
-static int exynos_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static struct drm_encoder *exynos_dp_best_encoder(
struct drm_connector *connector)
{
@@ -965,20 +962,9 @@ static struct drm_encoder *exynos_dp_best_encoder(
static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
.get_modes = exynos_dp_get_modes,
- .mode_valid = exynos_dp_mode_valid,
.best_encoder = exynos_dp_best_encoder,
};
-static int exynos_dp_initialize(struct exynos_drm_display *display,
- struct drm_device *drm_dev)
-{
- struct exynos_dp_device *dp = display->ctx;
-
- dp->drm_dev = drm_dev;
-
- return 0;
-}
-
static bool find_bridge(const char *compat, struct bridge_init *bridge)
{
bridge->client = NULL;
@@ -1101,12 +1087,11 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
break;
default:
break;
- };
+ }
dp->dpms_mode = mode;
}
static struct exynos_drm_display_ops exynos_dp_display_ops = {
- .initialize = exynos_dp_initialize,
.create_connector = exynos_dp_create_connector,
.dpms = exynos_dp_dpms,
};
@@ -1123,10 +1108,8 @@ static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
dp_video_config = devm_kzalloc(dev,
sizeof(*dp_video_config), GFP_KERNEL);
- if (!dp_video_config) {
- dev_err(dev, "memory allocation for video config failed\n");
+ if (!dp_video_config)
return ERR_PTR(-ENOMEM);
- }
dp_video_config->h_sync_polarity =
of_property_read_bool(dp_node, "hsync-active-high");
@@ -1185,10 +1168,7 @@ static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
if (!dp_phy_node) {
dp->phy = devm_phy_get(dp->dev, "dp");
- if (IS_ERR(dp->phy))
- return PTR_ERR(dp->phy);
- else
- return 0;
+ return PTR_ERR_OR_ZERO(dp->phy);
}
if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
@@ -1230,19 +1210,20 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
return 0;
}
-static int exynos_dp_probe(struct platform_device *pdev)
+static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm_dev = data;
struct resource *res;
struct exynos_dp_device *dp;
+ unsigned int irq_flags;
int ret = 0;
dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
GFP_KERNEL);
- if (!dp) {
- dev_err(&pdev->dev, "no memory for device data\n");
+ if (!dp)
return -ENOMEM;
- }
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1273,7 +1254,30 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (IS_ERR(dp->reg_base))
return PTR_ERR(dp->reg_base);
- dp->irq = platform_get_irq(pdev, 0);
+ dp->hpd_gpio = of_get_named_gpio(dev->of_node, "samsung,hpd-gpio", 0);
+
+ if (gpio_is_valid(dp->hpd_gpio)) {
+ /*
+ * Set up the hotplug GPIO from the device tree as an interrupt.
+ * Simply specifying a different interrupt in the device tree
+ * doesn't work since we handle hotplug rather differently when
+ * using a GPIO. We also need the actual GPIO specifier so
+ * that we can get the current state of the GPIO.
+ */
+ ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
+ "hpd_gpio");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get hpd gpio\n");
+ return ret;
+ }
+ dp->irq = gpio_to_irq(dp->hpd_gpio);
+ irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ } else {
+ dp->hpd_gpio = -ENODEV;
+ dp->irq = platform_get_irq(pdev, 0);
+ irq_flags = 0;
+ }
+
if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
return -ENODEV;
@@ -1285,28 +1289,61 @@ static int exynos_dp_probe(struct platform_device *pdev)
exynos_dp_init_dp(dp);
- ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
- "exynos-dp", dp);
+ ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler,
+ irq_flags, "exynos-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
return ret;
}
disable_irq(dp->irq);
+ dp->drm_dev = drm_dev;
exynos_dp_display.ctx = dp;
platform_set_drvdata(pdev, &exynos_dp_display);
- exynos_drm_display_register(&exynos_dp_display);
- return 0;
+ return exynos_drm_create_enc_conn(drm_dev, &exynos_dp_display);
}
-static int exynos_dp_remove(struct platform_device *pdev)
+static void exynos_dp_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct exynos_drm_display *display = platform_get_drvdata(pdev);
+ struct exynos_drm_display *display = dev_get_drvdata(dev);
+ struct exynos_dp_device *dp = display->ctx;
+ struct drm_encoder *encoder = dp->encoder;
exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
- exynos_drm_display_unregister(&exynos_dp_display);
+
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&dp->connector);
+}
+
+static const struct component_ops exynos_dp_ops = {
+ .bind = exynos_dp_bind,
+ .unbind = exynos_dp_unbind,
+};
+
+static int exynos_dp_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ exynos_dp_display.type);
+ if (ret)
+ return ret;
+
+ ret = component_add(&pdev->dev, &exynos_dp_ops);
+ if (ret)
+ exynos_drm_component_del(&pdev->dev,
+ EXYNOS_DEVICE_TYPE_CONNECTOR);
+
+ return ret;
+}
+
+static int exynos_dp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &exynos_dp_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index d6a900d4ee40..02cc4f9ab903 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -14,6 +14,7 @@
#define _EXYNOS_DP_CORE_H
#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
#include <drm/exynos_drm.h>
#define DP_TIMEOUT_LOOP_COUNT 100
@@ -159,6 +160,7 @@ struct exynos_dp_device {
struct work_struct hotplug_work;
struct phy *phy;
int dpms_mode;
+ int hpd_gpio;
struct exynos_drm_panel_info panel;
};
@@ -261,69 +263,17 @@ void exynos_dp_disable_scrambling(struct exynos_dp_device *dp);
#define EDID_EXTENSION_FLAG 0x7e
#define EDID_CHECKSUM 0x7f
-/* Definition for DPCD Register */
-#define DPCD_ADDR_DPCD_REV 0x0000
-#define DPCD_ADDR_MAX_LINK_RATE 0x0001
-#define DPCD_ADDR_MAX_LANE_COUNT 0x0002
-#define DPCD_ADDR_LINK_BW_SET 0x0100
-#define DPCD_ADDR_LANE_COUNT_SET 0x0101
-#define DPCD_ADDR_TRAINING_PATTERN_SET 0x0102
-#define DPCD_ADDR_TRAINING_LANE0_SET 0x0103
-#define DPCD_ADDR_LANE0_1_STATUS 0x0202
-#define DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED 0x0204
-#define DPCD_ADDR_ADJUST_REQUEST_LANE0_1 0x0206
-#define DPCD_ADDR_ADJUST_REQUEST_LANE2_3 0x0207
-#define DPCD_ADDR_TEST_REQUEST 0x0218
-#define DPCD_ADDR_TEST_RESPONSE 0x0260
-#define DPCD_ADDR_TEST_EDID_CHECKSUM 0x0261
-#define DPCD_ADDR_SINK_POWER_STATE 0x0600
-
-/* DPCD_ADDR_MAX_LANE_COUNT */
+/* DP_MAX_LANE_COUNT */
#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1)
#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f)
-/* DPCD_ADDR_LANE_COUNT_SET */
-#define DPCD_ENHANCED_FRAME_EN (0x1 << 7)
+/* DP_LANE_COUNT_SET */
#define DPCD_LANE_COUNT_SET(x) ((x) & 0x1f)
-/* DPCD_ADDR_TRAINING_PATTERN_SET */
-#define DPCD_SCRAMBLING_DISABLED (0x1 << 5)
-#define DPCD_SCRAMBLING_ENABLED (0x0 << 5)
-#define DPCD_TRAINING_PATTERN_2 (0x2 << 0)
-#define DPCD_TRAINING_PATTERN_1 (0x1 << 0)
-#define DPCD_TRAINING_PATTERN_DISABLED (0x0 << 0)
-
-/* DPCD_ADDR_TRAINING_LANE0_SET */
-#define DPCD_MAX_PRE_EMPHASIS_REACHED (0x1 << 5)
+/* DP_TRAINING_LANE0_SET */
#define DPCD_PRE_EMPHASIS_SET(x) (((x) & 0x3) << 3)
#define DPCD_PRE_EMPHASIS_GET(x) (((x) >> 3) & 0x3)
-#define DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 (0x0 << 3)
-#define DPCD_MAX_SWING_REACHED (0x1 << 2)
#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0)
#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3)
-#define DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0 (0x0 << 0)
-
-/* DPCD_ADDR_LANE0_1_STATUS */
-#define DPCD_LANE_SYMBOL_LOCKED (0x1 << 2)
-#define DPCD_LANE_CHANNEL_EQ_DONE (0x1 << 1)
-#define DPCD_LANE_CR_DONE (0x1 << 0)
-#define DPCD_CHANNEL_EQ_BITS (DPCD_LANE_CR_DONE| \
- DPCD_LANE_CHANNEL_EQ_DONE|\
- DPCD_LANE_SYMBOL_LOCKED)
-
-/* DPCD_ADDR_LANE_ALIGN__STATUS_UPDATED */
-#define DPCD_LINK_STATUS_UPDATED (0x1 << 7)
-#define DPCD_DOWNSTREAM_PORT_STATUS_CHANGED (0x1 << 6)
-#define DPCD_INTERLANE_ALIGN_DONE (0x1 << 0)
-
-/* DPCD_ADDR_TEST_REQUEST */
-#define DPCD_TEST_EDID_READ (0x1 << 2)
-
-/* DPCD_ADDR_TEST_RESPONSE */
-#define DPCD_TEST_EDID_CHECKSUM_WRITE (0x1 << 2)
-
-/* DPCD_ADDR_SINK_POWER_STATE */
-#define DPCD_SET_POWER_STATE_D0 (0x1 << 0)
-#define DPCD_SET_POWER_STATE_D4 (0x2 << 0)
#endif /* _EXYNOS_DP_CORE_H */
diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.c b/drivers/gpu/drm/exynos/exynos_dp_reg.c
index b70da5052ff0..c1f87a2a9284 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_reg.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_reg.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include "exynos_dp_core.h"
#include "exynos_dp_reg.h"
@@ -326,6 +327,9 @@ void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp)
{
u32 reg;
+ if (gpio_is_valid(dp->hpd_gpio))
+ return;
+
reg = HOTPLUG_CHG | HPD_LOST | PLUG;
writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
@@ -337,6 +341,9 @@ void exynos_dp_init_hpd(struct exynos_dp_device *dp)
{
u32 reg;
+ if (gpio_is_valid(dp->hpd_gpio))
+ return;
+
exynos_dp_clear_hotplug_interrupts(dp);
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
@@ -348,19 +355,27 @@ enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp)
{
u32 reg;
- /* Parse hotplug interrupt status register */
- reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
+ if (gpio_is_valid(dp->hpd_gpio)) {
+ reg = gpio_get_value(dp->hpd_gpio);
+ if (reg)
+ return DP_IRQ_TYPE_HP_CABLE_IN;
+ else
+ return DP_IRQ_TYPE_HP_CABLE_OUT;
+ } else {
+ /* Parse hotplug interrupt status register */
+ reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
- if (reg & PLUG)
- return DP_IRQ_TYPE_HP_CABLE_IN;
+ if (reg & PLUG)
+ return DP_IRQ_TYPE_HP_CABLE_IN;
- if (reg & HPD_LOST)
- return DP_IRQ_TYPE_HP_CABLE_OUT;
+ if (reg & HPD_LOST)
+ return DP_IRQ_TYPE_HP_CABLE_OUT;
- if (reg & HOTPLUG_CHG)
- return DP_IRQ_TYPE_HP_CHANGE;
+ if (reg & HOTPLUG_CHG)
+ return DP_IRQ_TYPE_HP_CHANGE;
- return DP_IRQ_TYPE_UNKNOWN;
+ return DP_IRQ_TYPE_UNKNOWN;
+ }
}
void exynos_dp_reset_aux(struct exynos_dp_device *dp)
@@ -386,7 +401,7 @@ void exynos_dp_init_aux(struct exynos_dp_device *dp)
/* Disable AUX transaction H/W retry */
reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)|
AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL) ;
+ writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL);
/* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
reg = DEFER_CTRL_EN | DEFER_COUNT(1);
@@ -402,9 +417,14 @@ int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp)
{
u32 reg;
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
- if (reg & HPD_STATUS)
- return 0;
+ if (gpio_is_valid(dp->hpd_gpio)) {
+ if (gpio_get_value(dp->hpd_gpio))
+ return 0;
+ } else {
+ reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
+ if (reg & HPD_STATUS)
+ return 0;
+ }
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 0e9e06ce36b8..4c9f972eaa07 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -19,21 +19,19 @@
#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
-static LIST_HEAD(exynos_drm_manager_list);
-static LIST_HEAD(exynos_drm_display_list);
-static int exynos_drm_create_enc_conn(struct drm_device *dev,
+int exynos_drm_create_enc_conn(struct drm_device *dev,
struct exynos_drm_display *display)
{
struct drm_encoder *encoder;
- struct exynos_drm_manager *manager;
int ret;
unsigned long possible_crtcs = 0;
- /* Find possible crtcs for this display */
- list_for_each_entry(manager, &exynos_drm_manager_list, list)
- if (manager->type == display->type)
- possible_crtcs |= 1 << manager->pipe;
+ ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
+ if (ret < 0)
+ return ret;
+
+ possible_crtcs |= 1 << ret;
/* create and initialize a encoder for this sub driver. */
encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
@@ -57,127 +55,29 @@ err_destroy_encoder:
return ret;
}
-static int exynos_drm_subdrv_probe(struct drm_device *dev,
- struct exynos_drm_subdrv *subdrv)
-{
- if (subdrv->probe) {
- int ret;
-
- subdrv->drm_dev = dev;
-
- /*
- * this probe callback would be called by sub driver
- * after setting of all resources to this sub driver,
- * such as clock, irq and register map are done or by load()
- * of exynos drm driver.
- *
- * P.S. note that this driver is considered for modularization.
- */
- ret = subdrv->probe(dev, subdrv->dev);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void exynos_drm_subdrv_remove(struct drm_device *dev,
- struct exynos_drm_subdrv *subdrv)
-{
- if (subdrv->remove)
- subdrv->remove(dev, subdrv->dev);
-}
-
-int exynos_drm_initialize_managers(struct drm_device *dev)
+int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
{
- struct exynos_drm_manager *manager, *n;
- int ret, pipe = 0;
-
- list_for_each_entry(manager, &exynos_drm_manager_list, list) {
- if (manager->ops->initialize) {
- ret = manager->ops->initialize(manager, dev, pipe);
- if (ret) {
- DRM_ERROR("Mgr init [%d] failed with %d\n",
- manager->type, ret);
- goto err;
- }
- }
+ if (!subdrv)
+ return -EINVAL;
- manager->drm_dev = dev;
- manager->pipe = pipe++;
+ list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
- ret = exynos_drm_crtc_create(manager);
- if (ret) {
- DRM_ERROR("CRTC create [%d] failed with %d\n",
- manager->type, ret);
- goto err;
- }
- }
return 0;
-
-err:
- list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list) {
- if (pipe-- > 0)
- exynos_drm_manager_unregister(manager);
- else
- list_del(&manager->list);
- }
- return ret;
-}
-
-void exynos_drm_remove_managers(struct drm_device *dev)
-{
- struct exynos_drm_manager *manager, *n;
-
- list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list)
- exynos_drm_manager_unregister(manager);
}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
-int exynos_drm_initialize_displays(struct drm_device *dev)
+int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
{
- struct exynos_drm_display *display, *n;
- int ret, initialized = 0;
-
- list_for_each_entry(display, &exynos_drm_display_list, list) {
- if (display->ops->initialize) {
- ret = display->ops->initialize(display, dev);
- if (ret) {
- DRM_ERROR("Display init [%d] failed with %d\n",
- display->type, ret);
- goto err;
- }
- }
+ if (!subdrv)
+ return -EINVAL;
- initialized++;
+ list_del(&subdrv->list);
- ret = exynos_drm_create_enc_conn(dev, display);
- if (ret) {
- DRM_ERROR("Encoder create [%d] failed with %d\n",
- display->type, ret);
- goto err;
- }
- }
return 0;
-
-err:
- list_for_each_entry_safe(display, n, &exynos_drm_display_list, list) {
- if (initialized-- > 0)
- exynos_drm_display_unregister(display);
- else
- list_del(&display->list);
- }
- return ret;
-}
-
-void exynos_drm_remove_displays(struct drm_device *dev)
-{
- struct exynos_drm_display *display, *n;
-
- list_for_each_entry_safe(display, n, &exynos_drm_display_list, list)
- exynos_drm_display_unregister(display);
}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
-int exynos_drm_device_register(struct drm_device *dev)
+int exynos_drm_device_subdrv_probe(struct drm_device *dev)
{
struct exynos_drm_subdrv *subdrv, *n;
int err;
@@ -186,19 +86,28 @@ int exynos_drm_device_register(struct drm_device *dev)
return -EINVAL;
list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
- err = exynos_drm_subdrv_probe(dev, subdrv);
- if (err) {
- DRM_DEBUG("exynos drm subdrv probe failed.\n");
- list_del(&subdrv->list);
- continue;
+ if (subdrv->probe) {
+ subdrv->drm_dev = dev;
+
+ /*
+ * this probe callback would be called by sub driver
+ * after setting of all resources to this sub driver,
+ * such as clock, irq and register map are done.
+ */
+ err = subdrv->probe(dev, subdrv->dev);
+ if (err) {
+ DRM_DEBUG("exynos drm subdrv probe failed.\n");
+ list_del(&subdrv->list);
+ continue;
+ }
}
}
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_device_register);
+EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
-int exynos_drm_device_unregister(struct drm_device *dev)
+int exynos_drm_device_subdrv_remove(struct drm_device *dev)
{
struct exynos_drm_subdrv *subdrv;
@@ -208,66 +117,13 @@ int exynos_drm_device_unregister(struct drm_device *dev)
}
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- exynos_drm_subdrv_remove(dev, subdrv);
+ if (subdrv->remove)
+ subdrv->remove(dev, subdrv->dev);
}
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
-
-int exynos_drm_manager_register(struct exynos_drm_manager *manager)
-{
- BUG_ON(!manager->ops);
- list_add_tail(&manager->list, &exynos_drm_manager_list);
- return 0;
-}
-
-int exynos_drm_manager_unregister(struct exynos_drm_manager *manager)
-{
- if (manager->ops->remove)
- manager->ops->remove(manager);
-
- list_del(&manager->list);
- return 0;
-}
-
-int exynos_drm_display_register(struct exynos_drm_display *display)
-{
- BUG_ON(!display->ops);
- list_add_tail(&display->list, &exynos_drm_display_list);
- return 0;
-}
-
-int exynos_drm_display_unregister(struct exynos_drm_display *display)
-{
- if (display->ops->remove)
- display->ops->remove(display);
-
- list_del(&display->list);
- return 0;
-}
-
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
-
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_del(&subdrv->list);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
+EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 1ef5ab9c9d51..95c9435d0266 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -368,6 +368,7 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
return -ENOMEM;
}
+ manager->crtc = &exynos_crtc->drm_crtc;
crtc = &exynos_crtc->drm_crtc;
private->crtc[manager->pipe] = crtc;
@@ -491,3 +492,19 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
manager->ops->wait_for_vblank(manager);
}
}
+
+int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+ unsigned int out_type)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
+ struct exynos_drm_crtc *exynos_crtc;
+
+ exynos_crtc = to_exynos_crtc(crtc);
+ if (exynos_crtc->manager->type == out_type)
+ return exynos_crtc->manager->pipe;
+ }
+
+ return -EPERM;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c27b66cc5d24..9f74b10a8a01 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -32,4 +32,8 @@ void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
+/* This function gets pipe value to crtc device matched with out_type. */
+int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+ unsigned int out_type);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 2b09c7c0bfcc..9e530f205ad2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -40,20 +40,10 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
{
struct exynos_dpi *ctx = connector_to_dpi(connector);
- /* panels supported only by boot-loader are always connected */
- if (!ctx->panel_node)
- return connector_status_connected;
-
- if (!ctx->panel) {
- ctx->panel = of_drm_find_panel(ctx->panel_node);
- if (ctx->panel)
- drm_panel_attach(ctx->panel, &ctx->connector);
- }
-
- if (ctx->panel)
- return connector_status_connected;
+ if (ctx->panel && !ctx->panel->connector)
+ drm_panel_attach(ctx->panel, &ctx->connector);
- return connector_status_disconnected;
+ return connector_status_connected;
}
static void exynos_dpi_connector_destroy(struct drm_connector *connector)
@@ -94,12 +84,6 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
return 0;
}
-static int exynos_dpi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static struct drm_encoder *
exynos_dpi_best_encoder(struct drm_connector *connector)
{
@@ -110,7 +94,6 @@ exynos_dpi_best_encoder(struct drm_connector *connector)
static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
.get_modes = exynos_dpi_get_modes,
- .mode_valid = exynos_dpi_mode_valid,
.best_encoder = exynos_dpi_best_encoder,
};
@@ -123,10 +106,7 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
ctx->encoder = encoder;
- if (ctx->panel_node)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- else
- connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(encoder->dev, connector,
&exynos_dpi_connector_funcs,
@@ -172,7 +152,7 @@ static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
break;
default:
break;
- };
+ }
ctx->dpms_mode = mode;
}
@@ -294,8 +274,10 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
return -ENOMEM;
ret = of_get_videomode(dn, vm, 0);
- if (ret < 0)
+ if (ret < 0) {
+ devm_kfree(dev, vm);
return ret;
+ }
ctx->vm = vm;
@@ -308,32 +290,58 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
return 0;
}
-int exynos_dpi_probe(struct device *dev)
+struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
{
struct exynos_dpi *ctx;
int ret;
+ ret = exynos_drm_component_add(dev,
+ EXYNOS_DEVICE_TYPE_CONNECTOR,
+ exynos_dpi_display.type);
+ if (ret)
+ return ERR_PTR(ret);
+
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -ENOMEM;
+ goto err_del_component;
ctx->dev = dev;
exynos_dpi_display.ctx = ctx;
ctx->dpms_mode = DRM_MODE_DPMS_OFF;
ret = exynos_dpi_parse_dt(ctx);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ devm_kfree(dev, ctx);
+ goto err_del_component;
+ }
- exynos_drm_display_register(&exynos_dpi_display);
+ if (ctx->panel_node) {
+ ctx->panel = of_drm_find_panel(ctx->panel_node);
+ if (!ctx->panel) {
+ exynos_drm_component_del(dev,
+ EXYNOS_DEVICE_TYPE_CONNECTOR);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ }
- return 0;
+ return &exynos_dpi_display;
+
+err_del_component:
+ exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+
+ return NULL;
}
int exynos_dpi_remove(struct device *dev)
{
+ struct drm_encoder *encoder = exynos_dpi_display.encoder;
+ struct exynos_dpi *ctx = exynos_dpi_display.ctx;
+
exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
- exynos_drm_display_unregister(&exynos_dpi_display);
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&ctx->connector);
+
+ exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2d27ba23a6a8..ab7d182063c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_crtc_helper.h>
#include <linux/anon_inodes.h>
+#include <linux/component.h>
#include <drm/exynos_drm.h>
@@ -40,9 +41,19 @@
#define VBLANK_OFF_DELAY 50000
-/* platform device pointer for eynos drm device. */
static struct platform_device *exynos_drm_pdev;
+static DEFINE_MUTEX(drm_component_lock);
+static LIST_HEAD(drm_component_list);
+
+struct component_dev {
+ struct list_head list;
+ struct device *crtc_dev;
+ struct device *conn_dev;
+ enum exynos_drm_output_type out_type;
+ unsigned int dev_type_flag;
+};
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -73,38 +84,21 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_mode_config_init(dev);
- ret = exynos_drm_initialize_managers(dev);
- if (ret)
- goto err_mode_config_cleanup;
-
for (nr = 0; nr < MAX_PLANE; nr++) {
struct drm_plane *plane;
unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_manager_cleanup;
+ goto err_mode_config_cleanup;
}
- ret = exynos_drm_initialize_displays(dev);
- if (ret)
- goto err_manager_cleanup;
-
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(dev);
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_display_cleanup;
-
- /*
- * probe sub drivers such as display controller and hdmi driver,
- * that were registered at probe() of platform driver
- * to the sub driver and create encoder and connector for them.
- */
- ret = exynos_drm_device_register(dev);
- if (ret)
- goto err_vblank;
+ goto err_mode_config_cleanup;
/* setup possible_clones. */
exynos_drm_encoder_setup(dev);
@@ -113,17 +107,25 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
platform_set_drvdata(dev->platformdev, dev);
+ /* Try to bind all sub drivers. */
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ goto err_cleanup_vblank;
+
+ /* Probe non kms sub drivers and virtual display driver. */
+ ret = exynos_drm_device_subdrv_probe(dev);
+ if (ret)
+ goto err_unbind_all;
+
/* force connectors detection */
drm_helper_hpd_irq_event(dev);
return 0;
-err_vblank:
+err_unbind_all:
+ component_unbind_all(dev->dev, dev);
+err_cleanup_vblank:
drm_vblank_cleanup(dev);
-err_display_cleanup:
- exynos_drm_remove_displays(dev);
-err_manager_cleanup:
- exynos_drm_remove_managers(dev);
err_mode_config_cleanup:
drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
@@ -135,17 +137,17 @@ err_free_private:
static int exynos_drm_unload(struct drm_device *dev)
{
+ exynos_drm_device_subdrv_remove(dev);
+
exynos_drm_fbdev_fini(dev);
- exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
- exynos_drm_remove_displays(dev);
- exynos_drm_remove_managers(dev);
drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
+ component_unbind_all(dev->dev, dev);
dev->dev_private = NULL;
return 0;
@@ -183,9 +185,9 @@ static int exynos_drm_resume(struct drm_device *dev)
if (connector->funcs->dpms)
connector->funcs->dpms(connector, connector->dpms);
}
+ drm_modeset_unlock_all(dev);
drm_helper_resume_force_mode(dev);
- drm_modeset_unlock_all(dev);
return 0;
}
@@ -323,8 +325,7 @@ static const struct file_operations exynos_drm_driver_fops = {
};
static struct drm_driver exynos_drm_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
- DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
.suspend = exynos_drm_suspend,
@@ -355,27 +356,6 @@ static struct drm_driver exynos_drm_driver = {
.minor = DRIVER_MINOR,
};
-static int exynos_drm_platform_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
- return drm_platform_init(&exynos_drm_driver, pdev);
-}
-
-static int exynos_drm_platform_remove(struct platform_device *pdev)
-{
- drm_put_dev(platform_get_drvdata(pdev));
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int exynos_drm_sys_suspend(struct device *dev)
{
@@ -400,196 +380,319 @@ static int exynos_drm_sys_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM_RUNTIME
-static int exynos_drm_runtime_suspend(struct device *dev)
+static const struct dev_pm_ops exynos_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume)
+};
+
+int exynos_drm_component_add(struct device *dev,
+ enum exynos_drm_device_type dev_type,
+ enum exynos_drm_output_type out_type)
{
- struct drm_device *drm_dev = dev_get_drvdata(dev);
- pm_message_t message;
+ struct component_dev *cdev;
- if (pm_runtime_suspended(dev))
- return 0;
+ if (dev_type != EXYNOS_DEVICE_TYPE_CRTC &&
+ dev_type != EXYNOS_DEVICE_TYPE_CONNECTOR) {
+ DRM_ERROR("invalid device type.\n");
+ return -EINVAL;
+ }
- message.event = PM_EVENT_SUSPEND;
- return exynos_drm_suspend(drm_dev, message);
+ mutex_lock(&drm_component_lock);
+
+ /*
+ * Make sure to check if there is a component which has two device
+ * objects, for connector and for encoder/connector.
+ * It should make sure that crtc and encoder/connector drivers are
+ * ready before exynos drm core binds them.
+ */
+ list_for_each_entry(cdev, &drm_component_list, list) {
+ if (cdev->out_type == out_type) {
+ /*
+ * If crtc and encoder/connector device objects are
+ * added already just return.
+ */
+ if (cdev->dev_type_flag == (EXYNOS_DEVICE_TYPE_CRTC |
+ EXYNOS_DEVICE_TYPE_CONNECTOR)) {
+ mutex_unlock(&drm_component_lock);
+ return 0;
+ }
+
+ if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
+ cdev->crtc_dev = dev;
+ cdev->dev_type_flag |= dev_type;
+ }
+
+ if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
+ cdev->conn_dev = dev;
+ cdev->dev_type_flag |= dev_type;
+ }
+
+ mutex_unlock(&drm_component_lock);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&drm_component_lock);
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ if (dev_type == EXYNOS_DEVICE_TYPE_CRTC)
+ cdev->crtc_dev = dev;
+ if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR)
+ cdev->conn_dev = dev;
+
+ cdev->out_type = out_type;
+ cdev->dev_type_flag = dev_type;
+
+ mutex_lock(&drm_component_lock);
+ list_add_tail(&cdev->list, &drm_component_list);
+ mutex_unlock(&drm_component_lock);
+
+ return 0;
}
-static int exynos_drm_runtime_resume(struct device *dev)
+void exynos_drm_component_del(struct device *dev,
+ enum exynos_drm_device_type dev_type)
{
- struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct component_dev *cdev, *next;
- if (!pm_runtime_suspended(dev))
- return 0;
+ mutex_lock(&drm_component_lock);
- return exynos_drm_resume(drm_dev);
+ list_for_each_entry_safe(cdev, next, &drm_component_list, list) {
+ if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
+ if (cdev->crtc_dev == dev) {
+ cdev->crtc_dev = NULL;
+ cdev->dev_type_flag &= ~dev_type;
+ }
+ }
+
+ if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
+ if (cdev->conn_dev == dev) {
+ cdev->conn_dev = NULL;
+ cdev->dev_type_flag &= ~dev_type;
+ }
+ }
+
+ /*
+ * Release cdev object only in case that both of crtc and
+ * encoder/connector device objects are NULL.
+ */
+ if (!cdev->crtc_dev && !cdev->conn_dev) {
+ list_del(&cdev->list);
+ kfree(cdev);
+ }
+
+ break;
+ }
+
+ mutex_unlock(&drm_component_lock);
}
-#endif
-static const struct dev_pm_ops exynos_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume)
- SET_RUNTIME_PM_OPS(exynos_drm_runtime_suspend,
- exynos_drm_runtime_resume, NULL)
-};
+static int compare_of(struct device *dev, void *data)
+{
+ return dev == (struct device *)data;
+}
-static struct platform_driver exynos_drm_platform_driver = {
- .probe = exynos_drm_platform_probe,
- .remove = exynos_drm_platform_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "exynos-drm",
- .pm = &exynos_drm_pm_ops,
- },
+static int exynos_drm_add_components(struct device *dev, struct master *m)
+{
+ struct component_dev *cdev;
+ unsigned int attach_cnt = 0;
+
+ mutex_lock(&drm_component_lock);
+
+ list_for_each_entry(cdev, &drm_component_list, list) {
+ int ret;
+
+ /*
+ * Add components to master only in case that crtc and
+ * encoder/connector device objects exist.
+ */
+ if (!cdev->crtc_dev || !cdev->conn_dev)
+ continue;
+
+ attach_cnt++;
+
+ mutex_unlock(&drm_component_lock);
+
+ /*
+ * fimd and dpi modules have same device object so add
+ * only crtc device object in this case.
+ *
+ * TODO. if dpi module follows driver-model driver then
+ * below codes can be removed.
+ */
+ if (cdev->crtc_dev == cdev->conn_dev) {
+ ret = component_master_add_child(m, compare_of,
+ cdev->crtc_dev);
+ if (ret < 0)
+ return ret;
+
+ goto out_lock;
+ }
+
+ /*
+ * Do not chage below call order.
+ * crtc device first should be added to master because
+ * connector/encoder need pipe number of crtc when they
+ * are created.
+ */
+ ret = component_master_add_child(m, compare_of, cdev->crtc_dev);
+ ret |= component_master_add_child(m, compare_of,
+ cdev->conn_dev);
+ if (ret < 0)
+ return ret;
+
+out_lock:
+ mutex_lock(&drm_component_lock);
+ }
+
+ mutex_unlock(&drm_component_lock);
+
+ return attach_cnt ? 0 : -ENODEV;
+}
+
+static int exynos_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
+}
+
+static void exynos_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops exynos_drm_ops = {
+ .add_components = exynos_drm_add_components,
+ .bind = exynos_drm_bind,
+ .unbind = exynos_drm_unbind,
};
-static int __init exynos_drm_init(void)
+static int exynos_drm_platform_probe(struct platform_device *pdev)
{
int ret;
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+ ret = platform_driver_register(&fimd_driver);
+ if (ret < 0)
+ return ret;
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_DP
ret = platform_driver_register(&dp_driver);
if (ret < 0)
- goto out_dp;
+ goto err_unregister_fimd_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_DSI
ret = platform_driver_register(&dsi_driver);
if (ret < 0)
- goto out_dsi;
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMD
- ret = platform_driver_register(&fimd_driver);
- if (ret < 0)
- goto out_fimd;
+ goto err_unregister_dp_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
- ret = platform_driver_register(&hdmi_driver);
- if (ret < 0)
- goto out_hdmi;
ret = platform_driver_register(&mixer_driver);
if (ret < 0)
- goto out_mixer;
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_VIDI
- ret = platform_driver_register(&vidi_driver);
+ goto err_unregister_dsi_drv;
+ ret = platform_driver_register(&hdmi_driver);
if (ret < 0)
- goto out_vidi;
+ goto err_unregister_mixer_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D
ret = platform_driver_register(&g2d_driver);
if (ret < 0)
- goto out_g2d;
+ goto err_unregister_hdmi_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
ret = platform_driver_register(&fimc_driver);
if (ret < 0)
- goto out_fimc;
+ goto err_unregister_g2d_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
ret = platform_driver_register(&rotator_driver);
if (ret < 0)
- goto out_rotator;
+ goto err_unregister_fimc_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
ret = platform_driver_register(&gsc_driver);
if (ret < 0)
- goto out_gsc;
+ goto err_unregister_rotator_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_IPP
ret = platform_driver_register(&ipp_driver);
if (ret < 0)
- goto out_ipp;
+ goto err_unregister_gsc_drv;
ret = exynos_platform_device_ipp_register();
if (ret < 0)
- goto out_ipp_dev;
+ goto err_unregister_ipp_drv;
#endif
- ret = platform_driver_register(&exynos_drm_platform_driver);
+ ret = component_master_add(&pdev->dev, &exynos_drm_ops);
if (ret < 0)
- goto out_drm;
-
- exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
- NULL, 0);
- if (IS_ERR(exynos_drm_pdev)) {
- ret = PTR_ERR(exynos_drm_pdev);
- goto out;
- }
+ DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n");
return 0;
-out:
- platform_driver_unregister(&exynos_drm_platform_driver);
-
-out_drm:
#ifdef CONFIG_DRM_EXYNOS_IPP
- exynos_platform_device_ipp_unregister();
-out_ipp_dev:
+err_unregister_ipp_drv:
platform_driver_unregister(&ipp_driver);
-out_ipp:
+err_unregister_gsc_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
platform_driver_unregister(&gsc_driver);
-out_gsc:
+err_unregister_rotator_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
platform_driver_unregister(&rotator_driver);
-out_rotator:
+err_unregister_fimc_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
platform_driver_unregister(&fimc_driver);
-out_fimc:
+err_unregister_g2d_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
-out_g2d:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_VIDI
- platform_driver_unregister(&vidi_driver);
-out_vidi:
+err_unregister_hdmi_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
- platform_driver_unregister(&mixer_driver);
-out_mixer:
platform_driver_unregister(&hdmi_driver);
-out_hdmi:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMD
- platform_driver_unregister(&fimd_driver);
-out_fimd:
+err_unregister_mixer_drv:
+ platform_driver_unregister(&mixer_driver);
+err_unregister_dsi_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_DSI
platform_driver_unregister(&dsi_driver);
-out_dsi:
+err_unregister_dp_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_DP
platform_driver_unregister(&dp_driver);
-out_dp:
+err_unregister_fimd_drv:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+ platform_driver_unregister(&fimd_driver);
#endif
return ret;
}
-static void __exit exynos_drm_exit(void)
+static int exynos_drm_platform_remove(struct platform_device *pdev)
{
- platform_device_unregister(exynos_drm_pdev);
-
- platform_driver_unregister(&exynos_drm_platform_driver);
-
#ifdef CONFIG_DRM_EXYNOS_IPP
exynos_platform_device_ipp_unregister();
platform_driver_unregister(&ipp_driver);
@@ -616,10 +719,6 @@ static void __exit exynos_drm_exit(void)
platform_driver_unregister(&hdmi_driver);
#endif
-#ifdef CONFIG_DRM_EXYNOS_VIDI
- platform_driver_unregister(&vidi_driver);
-#endif
-
#ifdef CONFIG_DRM_EXYNOS_FIMD
platform_driver_unregister(&fimd_driver);
#endif
@@ -631,6 +730,59 @@ static void __exit exynos_drm_exit(void)
#ifdef CONFIG_DRM_EXYNOS_DP
platform_driver_unregister(&dp_driver);
#endif
+ component_master_del(&pdev->dev, &exynos_drm_ops);
+ return 0;
+}
+
+static struct platform_driver exynos_drm_platform_driver = {
+ .probe = exynos_drm_platform_probe,
+ .remove = exynos_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "exynos-drm",
+ .pm = &exynos_drm_pm_ops,
+ },
+};
+
+static int exynos_drm_init(void)
+{
+ int ret;
+
+ exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+ NULL, 0);
+ if (IS_ERR(exynos_drm_pdev))
+ return PTR_ERR(exynos_drm_pdev);
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ ret = exynos_drm_probe_vidi();
+ if (ret < 0)
+ goto err_unregister_pd;
+#endif
+
+ ret = platform_driver_register(&exynos_drm_platform_driver);
+ if (ret)
+ goto err_remove_vidi;
+
+ return 0;
+
+err_remove_vidi:
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ exynos_drm_remove_vidi();
+
+err_unregister_pd:
+#endif
+ platform_device_unregister(exynos_drm_pdev);
+
+ return ret;
+}
+
+static void exynos_drm_exit(void)
+{
+ platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ exynos_drm_remove_vidi();
+#endif
+ platform_device_unregister(exynos_drm_pdev);
}
module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index ce3e6a30deaa..06cde4506278 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -42,6 +42,13 @@ struct drm_connector;
extern unsigned int drm_vblank_offdelay;
+/* This enumerates device type. */
+enum exynos_drm_device_type {
+ EXYNOS_DEVICE_TYPE_NONE,
+ EXYNOS_DEVICE_TYPE_CRTC,
+ EXYNOS_DEVICE_TYPE_CONNECTOR,
+};
+
/* this enumerates display type. */
enum exynos_drm_output_type {
EXYNOS_DISPLAY_TYPE_NONE,
@@ -122,7 +129,6 @@ struct exynos_drm_overlay {
* Exynos DRM Display Structure.
* - this structure is common to analog tv, digital tv and lcd panel.
*
- * @initialize: initializes the display with drm_dev
* @remove: cleans up the display for removal
* @mode_fixup: fix mode data comparing to hw specific display mode.
* @mode_set: convert drm_display_mode to hw specific display mode and
@@ -133,8 +139,6 @@ struct exynos_drm_overlay {
*/
struct exynos_drm_display;
struct exynos_drm_display_ops {
- int (*initialize)(struct exynos_drm_display *display,
- struct drm_device *drm_dev);
int (*create_connector)(struct exynos_drm_display *display,
struct drm_encoder *encoder);
void (*remove)(struct exynos_drm_display *display);
@@ -172,8 +176,6 @@ struct exynos_drm_display {
/*
* Exynos drm manager ops
*
- * @initialize: initializes the manager with drm_dev
- * @remove: cleans up the manager for removal
* @dpms: control device power.
* @mode_fixup: fix mode data before applying it
* @mode_set: set the given mode to the manager
@@ -189,9 +191,6 @@ struct exynos_drm_display {
*/
struct exynos_drm_manager;
struct exynos_drm_manager_ops {
- int (*initialize)(struct exynos_drm_manager *mgr,
- struct drm_device *drm_dev, int pipe);
- void (*remove)(struct exynos_drm_manager *mgr);
void (*dpms)(struct exynos_drm_manager *mgr, int mode);
bool (*mode_fixup)(struct exynos_drm_manager *mgr,
const struct drm_display_mode *mode,
@@ -215,6 +214,7 @@ struct exynos_drm_manager_ops {
* @list: the list entry for this manager
* @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
* @drm_dev: pointer to the drm device
+ * @crtc: crtc object.
* @pipe: the pipe number for this crtc/manager
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the manager's implementation specific context
@@ -223,6 +223,7 @@ struct exynos_drm_manager {
struct list_head list;
enum exynos_drm_output_type type;
struct drm_device *drm_dev;
+ struct drm_crtc *crtc;
int pipe;
struct exynos_drm_manager_ops *ops;
void *ctx;
@@ -254,6 +255,7 @@ struct drm_exynos_file_private {
* otherwise default one.
* @da_space_size: size of device address space.
* if 0 then default value is used for it.
+ * @pipe: the pipe number for this crtc/manager.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -271,6 +273,8 @@ struct exynos_drm_private {
unsigned long da_start;
unsigned long da_space_size;
+
+ unsigned int pipe;
};
/*
@@ -281,11 +285,11 @@ struct exynos_drm_private {
* @drm_dev: pointer to drm_device and this pointer would be set
* when sub driver calls exynos_drm_subdrv_register().
* @manager: subdrv has its own manager to control a hardware appropriately
- * and we can access a hardware drawing on this manager.
+ * and we can access a hardware drawing on this manager.
* @probe: this callback would be called by exynos drm driver after
- * subdrv is registered to it.
+ * subdrv is registered to it.
* @remove: this callback is used to release resources created
- * by probe callback.
+ * by probe callback.
* @open: this would be called with drm device file open.
* @close: this would be called with drm device file close.
*/
@@ -302,39 +306,14 @@ struct exynos_drm_subdrv {
struct drm_file *file);
};
-/*
- * this function calls a probe callback registered to sub driver list and
- * create its own encoder and connector and then set drm_device object
- * to global one.
- */
-int exynos_drm_device_register(struct drm_device *dev);
-/*
- * this function calls a remove callback registered to sub driver list and
- * destroy its own encoder and connetor.
- */
-int exynos_drm_device_unregister(struct drm_device *dev);
-
-int exynos_drm_initialize_managers(struct drm_device *dev);
-void exynos_drm_remove_managers(struct drm_device *dev);
-int exynos_drm_initialize_displays(struct drm_device *dev);
-void exynos_drm_remove_displays(struct drm_device *dev);
-
-int exynos_drm_manager_register(struct exynos_drm_manager *manager);
-int exynos_drm_manager_unregister(struct exynos_drm_manager *manager);
-int exynos_drm_display_register(struct exynos_drm_display *display);
-int exynos_drm_display_unregister(struct exynos_drm_display *display);
-
-/*
- * this function would be called by sub drivers such as display controller
- * or hdmi driver to register this sub driver object to exynos drm driver
- * and when a sub driver is registered to exynos drm driver a probe callback
- * of the sub driver is called and creates its own encoder and connector.
- */
+ /* This function would be called by non kms drivers such as g2d and ipp. */
int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
/* this function removes subdrv list from exynos drm driver */
int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
+int exynos_drm_device_subdrv_probe(struct drm_device *dev);
+int exynos_drm_device_subdrv_remove(struct drm_device *dev);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
@@ -360,18 +339,40 @@ int exynos_platform_device_ipp_register(void);
void exynos_platform_device_ipp_unregister(void);
#ifdef CONFIG_DRM_EXYNOS_DPI
-int exynos_dpi_probe(struct device *dev);
+struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct device *dev);
#else
-static inline int exynos_dpi_probe(struct device *dev) { return 0; }
+static inline struct exynos_drm_display *
+exynos_dpi_probe(struct device *dev) { return NULL; }
static inline int exynos_dpi_remove(struct device *dev) { return 0; }
#endif
+/*
+ * this function registers exynos drm vidi platform device/driver.
+ */
+int exynos_drm_probe_vidi(void);
+
+/*
+ * this function unregister exynos drm vidi platform device/driver.
+ */
+void exynos_drm_remove_vidi(void);
+
+/* This function creates a encoder and a connector, and initializes them. */
+int exynos_drm_create_enc_conn(struct drm_device *dev,
+ struct exynos_drm_display *display);
+
+int exynos_drm_component_add(struct device *dev,
+ enum exynos_drm_device_type dev_type,
+ enum exynos_drm_output_type out_type);
+
+void exynos_drm_component_del(struct device *dev,
+ enum exynos_drm_device_type dev_type);
+
+extern struct platform_driver fimd_driver;
extern struct platform_driver dp_driver;
extern struct platform_driver dsi_driver;
-extern struct platform_driver fimd_driver;
-extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
+extern struct platform_driver hdmi_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 4ac438187568..6302aa64f6c1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
+#include <linux/component.h>
#include <video/mipi_display.h>
#include <video/videomode.h>
@@ -1378,16 +1379,60 @@ end:
return ret;
}
+static int exynos_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm_dev = data;
+ struct exynos_dsi *dsi;
+ int ret;
+
+ ret = exynos_drm_create_enc_conn(drm_dev, &exynos_dsi_display);
+ if (ret) {
+ DRM_ERROR("Encoder create [%d] failed with %d\n",
+ exynos_dsi_display.type, ret);
+ return ret;
+ }
+
+ dsi = exynos_dsi_display.ctx;
+
+ return mipi_dsi_host_register(&dsi->dsi_host);
+}
+
+static void exynos_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+ struct drm_encoder *encoder = dsi->encoder;
+
+ exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
+
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&dsi->connector);
+}
+
+static const struct component_ops exynos_dsi_component_ops = {
+ .bind = exynos_dsi_bind,
+ .unbind = exynos_dsi_unbind,
+};
+
static int exynos_dsi_probe(struct platform_device *pdev)
{
struct resource *res;
struct exynos_dsi *dsi;
int ret;
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ exynos_dsi_display.type);
+ if (ret)
+ return ret;
+
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi) {
dev_err(&pdev->dev, "failed to allocate dsi object.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_del_component;
}
init_completion(&dsi->completed);
@@ -1401,7 +1446,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
ret = exynos_dsi_parse_dt(dsi);
if (ret)
- return ret;
+ goto err_del_component;
dsi->supplies[0].supply = "vddcore";
dsi->supplies[1].supply = "vddio";
@@ -1415,32 +1460,37 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
if (IS_ERR(dsi->pll_clk)) {
dev_info(&pdev->dev, "failed to get dsi pll input clock\n");
- return -EPROBE_DEFER;
+ ret = PTR_ERR(dsi->pll_clk);
+ goto err_del_component;
}
dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(dsi->bus_clk)) {
dev_info(&pdev->dev, "failed to get dsi bus clock\n");
- return -EPROBE_DEFER;
+ ret = PTR_ERR(dsi->bus_clk);
+ goto err_del_component;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dsi->reg_base)) {
dev_err(&pdev->dev, "failed to remap io region\n");
- return PTR_ERR(dsi->reg_base);
+ ret = PTR_ERR(dsi->reg_base);
+ goto err_del_component;
}
dsi->phy = devm_phy_get(&pdev->dev, "dsim");
if (IS_ERR(dsi->phy)) {
dev_info(&pdev->dev, "failed to get dsim phy\n");
- return -EPROBE_DEFER;
+ ret = PTR_ERR(dsi->phy);
+ goto err_del_component;
}
dsi->irq = platform_get_irq(pdev, 0);
if (dsi->irq < 0) {
dev_err(&pdev->dev, "failed to request dsi irq resource\n");
- return dsi->irq;
+ ret = dsi->irq;
+ goto err_del_component;
}
irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
@@ -1449,58 +1499,31 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request dsi irq\n");
- return ret;
+ goto err_del_component;
}
exynos_dsi_display.ctx = dsi;
platform_set_drvdata(pdev, &exynos_dsi_display);
- exynos_drm_display_register(&exynos_dsi_display);
-
- return mipi_dsi_host_register(&dsi->dsi_host);
-}
-
-static int exynos_dsi_remove(struct platform_device *pdev)
-{
- struct exynos_dsi *dsi = exynos_dsi_display.ctx;
-
- exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
-
- exynos_drm_display_unregister(&exynos_dsi_display);
- mipi_dsi_host_unregister(&dsi->dsi_host);
-
- return 0;
-}
-#if CONFIG_PM_SLEEP
-static int exynos_dsi_resume(struct device *dev)
-{
- struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+ ret = component_add(&pdev->dev, &exynos_dsi_component_ops);
+ if (ret)
+ goto err_del_component;
- if (dsi->state & DSIM_STATE_ENABLED) {
- dsi->state &= ~DSIM_STATE_ENABLED;
- exynos_dsi_enable(dsi);
- }
+ return ret;
- return 0;
+err_del_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+ return ret;
}
-static int exynos_dsi_suspend(struct device *dev)
+static int exynos_dsi_remove(struct platform_device *pdev)
{
- struct exynos_dsi *dsi = exynos_dsi_display.ctx;
-
- if (dsi->state & DSIM_STATE_ENABLED) {
- exynos_dsi_disable(dsi);
- dsi->state |= DSIM_STATE_ENABLED;
- }
+ component_del(&pdev->dev, &exynos_dsi_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return 0;
}
-#endif
-
-static const struct dev_pm_ops exynos_dsi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume)
-};
static struct of_device_id exynos_dsi_of_match[] = {
{ .compatible = "samsung,exynos4210-mipi-dsi" },
@@ -1513,7 +1536,6 @@ struct platform_driver dsi_driver = {
.driver = {
.name = "exynos-dsi",
.owner = THIS_MODULE,
- .pm = &exynos_dsi_pm_ops,
.of_match_table = exynos_dsi_of_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index addbf7536da4..d771b467cf0c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -121,16 +121,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitches[0];
- dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- if (is_drm_iommu_supported(dev))
- fbi->fix.smem_start = (unsigned long)
- (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
- else
- fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
-
fbi->screen_size = size;
- fbi->fix.smem_len = size;
return 0;
}
@@ -237,7 +229,7 @@ static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
-bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
+static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
{
struct drm_connector *connector;
bool ret = false;
@@ -375,7 +367,5 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
if (!private || !private->fb_helper)
return;
- drm_modeset_lock_all(dev);
- drm_fb_helper_restore_fbdev_mode(private->fb_helper);
- drm_modeset_unlock_all(dev);
+ drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 30d76b2ff9c2..831dde9034c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/spinlock.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -57,7 +58,6 @@
#define FIMC_SHFACTOR 10
#define FIMC_BUF_STOP 1
#define FIMC_BUF_START 2
-#define FIMC_REG_SZ 32
#define FIMC_WIDTH_ITU_709 1280
#define FIMC_REFRESH_MAX 60
#define FIMC_REFRESH_MIN 12
@@ -69,9 +69,6 @@
#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
struct fimc_context, ippdrv);
-#define fimc_read(offset) readl(ctx->regs + (offset))
-#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
-
enum fimc_wb {
FIMC_WB_NONE,
FIMC_WB_A,
@@ -161,7 +158,7 @@ struct fimc_context {
struct exynos_drm_ippdrv ippdrv;
struct resource *regs_res;
void __iomem *regs;
- struct mutex lock;
+ spinlock_t lock;
struct clk *clocks[FIMC_CLKS_MAX];
u32 clk_frequency;
struct regmap *sysreg;
@@ -172,39 +169,53 @@ struct fimc_context {
bool suspended;
};
+static u32 fimc_read(struct fimc_context *ctx, u32 reg)
+{
+ return readl(ctx->regs + reg);
+}
+
+static void fimc_write(struct fimc_context *ctx, u32 val, u32 reg)
+{
+ writel(val, ctx->regs + reg);
+}
+
+static void fimc_set_bits(struct fimc_context *ctx, u32 reg, u32 bits)
+{
+ void __iomem *r = ctx->regs + reg;
+
+ writel(readl(r) | bits, r);
+}
+
+static void fimc_clear_bits(struct fimc_context *ctx, u32 reg, u32 bits)
+{
+ void __iomem *r = ctx->regs + reg;
+
+ writel(readl(r) & ~bits, r);
+}
+
static void fimc_sw_reset(struct fimc_context *ctx)
{
u32 cfg;
/* stop dma operation */
- cfg = fimc_read(EXYNOS_CISTATUS);
- if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
- cfg = fimc_read(EXYNOS_MSCTRL);
- cfg &= ~EXYNOS_MSCTRL_ENVID;
- fimc_write(cfg, EXYNOS_MSCTRL);
- }
+ cfg = fimc_read(ctx, EXYNOS_CISTATUS);
+ if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg))
+ fimc_clear_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
- cfg = fimc_read(EXYNOS_CISRCFMT);
- cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
- fimc_write(cfg, EXYNOS_CISRCFMT);
+ fimc_set_bits(ctx, EXYNOS_CISRCFMT, EXYNOS_CISRCFMT_ITU601_8BIT);
/* disable image capture */
- cfg = fimc_read(EXYNOS_CIIMGCPT);
- cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
- fimc_write(cfg, EXYNOS_CIIMGCPT);
+ fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
+ EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
/* s/w reset */
- cfg = fimc_read(EXYNOS_CIGCTRL);
- cfg |= (EXYNOS_CIGCTRL_SWRST);
- fimc_write(cfg, EXYNOS_CIGCTRL);
+ fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
/* s/w reset complete */
- cfg = fimc_read(EXYNOS_CIGCTRL);
- cfg &= ~EXYNOS_CIGCTRL_SWRST;
- fimc_write(cfg, EXYNOS_CIGCTRL);
+ fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
/* reset sequence */
- fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+ fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
}
static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
@@ -220,7 +231,7 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
DRM_DEBUG_KMS("wb[%d]\n", wb);
- cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
@@ -246,7 +257,7 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
break;
}
- fimc_write(cfg, EXYNOS_CIGCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_set_polarity(struct fimc_context *ctx,
@@ -259,7 +270,7 @@ static void fimc_set_polarity(struct fimc_context *ctx,
DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n",
pol->inv_href, pol->inv_hsync);
- cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
@@ -272,7 +283,7 @@ static void fimc_set_polarity(struct fimc_context *ctx,
if (pol->inv_hsync)
cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
- fimc_write(cfg, EXYNOS_CIGCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
@@ -281,70 +292,54 @@ static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
DRM_DEBUG_KMS("enable[%d]\n", enable);
- cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable)
cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
else
cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
- fimc_write(cfg, EXYNOS_CIGCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
-static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
- bool overflow, bool level)
+static void fimc_mask_irq(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n",
- enable, overflow, level);
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
- cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable) {
- cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
- cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
- if (overflow)
- cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
- if (level)
- cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+ cfg &= ~EXYNOS_CIGCTRL_IRQ_OVFEN;
+ cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE | EXYNOS_CIGCTRL_IRQ_LEVEL;
} else
- cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
-
- fimc_write(cfg, EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_IRQ_ENABLE;
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_clear_irq(struct fimc_context *ctx)
{
- u32 cfg;
-
- cfg = fimc_read(EXYNOS_CIGCTRL);
- cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
- fimc_write(cfg, EXYNOS_CIGCTRL);
+ fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_CLR);
}
static bool fimc_check_ovf(struct fimc_context *ctx)
{
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
- u32 cfg, status, flag;
+ u32 status, flag;
- status = fimc_read(EXYNOS_CISTATUS);
+ status = fimc_read(ctx, EXYNOS_CISTATUS);
flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
EXYNOS_CISTATUS_OVFICR;
DRM_DEBUG_KMS("flag[0x%x]\n", flag);
if (status & flag) {
- cfg = fimc_read(EXYNOS_CIWDOFST);
- cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ fimc_set_bits(ctx, EXYNOS_CIWDOFST,
+ EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
-
- fimc_write(cfg, EXYNOS_CIWDOFST);
-
- cfg = fimc_read(EXYNOS_CIWDOFST);
- cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ fimc_clear_bits(ctx, EXYNOS_CIWDOFST,
+ EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
- fimc_write(cfg, EXYNOS_CIWDOFST);
-
dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
return true;
@@ -357,7 +352,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
{
u32 cfg;
- cfg = fimc_read(EXYNOS_CISTATUS);
+ cfg = fimc_read(ctx, EXYNOS_CISTATUS);
DRM_DEBUG_KMS("cfg[0x%x]\n", cfg);
@@ -365,7 +360,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
return false;
cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
- fimc_write(cfg, EXYNOS_CISTATUS);
+ fimc_write(ctx, cfg, EXYNOS_CISTATUS);
return true;
}
@@ -375,7 +370,7 @@ static int fimc_get_buf_id(struct fimc_context *ctx)
u32 cfg;
int frame_cnt, buf_id;
- cfg = fimc_read(EXYNOS_CISTATUS2);
+ cfg = fimc_read(ctx, EXYNOS_CISTATUS2);
frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
if (frame_cnt == 0)
@@ -402,13 +397,13 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
DRM_DEBUG_KMS("enable[%d]\n", enable);
- cfg = fimc_read(EXYNOS_CIOCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
if (enable)
cfg |= EXYNOS_CIOCTRL_LASTENDEN;
else
cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
- fimc_write(cfg, EXYNOS_CIOCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
}
@@ -420,18 +415,18 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
/* RGB */
- cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
switch (fmt) {
case DRM_FORMAT_RGB565:
cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
- fimc_write(cfg, EXYNOS_CISCCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return 0;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
- fimc_write(cfg, EXYNOS_CISCCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return 0;
default:
/* bypass */
@@ -439,7 +434,7 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
}
/* YUV */
- cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
EXYNOS_MSCTRL_C_INT_IN_2PLANE |
EXYNOS_MSCTRL_ORDER422_YCBYCR);
@@ -479,7 +474,7 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
return -EINVAL;
}
- fimc_write(cfg, EXYNOS_MSCTRL);
+ fimc_write(ctx, cfg, EXYNOS_MSCTRL);
return 0;
}
@@ -492,7 +487,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
- cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
switch (fmt) {
@@ -527,9 +522,9 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
return -EINVAL;
}
- fimc_write(cfg, EXYNOS_MSCTRL);
+ fimc_write(ctx, cfg, EXYNOS_MSCTRL);
- cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
if (fmt == DRM_FORMAT_NV12MT)
@@ -537,7 +532,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
else
cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
- fimc_write(cfg, EXYNOS_CIDMAPARAM);
+ fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
return fimc_src_set_fmt_order(ctx, fmt);
}
@@ -552,11 +547,11 @@ static int fimc_src_set_transf(struct device *dev,
DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
- cfg1 = fimc_read(EXYNOS_MSCTRL);
+ cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
EXYNOS_MSCTRL_FLIP_Y_MIRROR);
- cfg2 = fimc_read(EXYNOS_CITRGFMT);
+ cfg2 = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
switch (degree) {
@@ -595,8 +590,8 @@ static int fimc_src_set_transf(struct device *dev,
return -EINVAL;
}
- fimc_write(cfg1, EXYNOS_MSCTRL);
- fimc_write(cfg2, EXYNOS_CITRGFMT);
+ fimc_write(ctx, cfg1, EXYNOS_MSCTRL);
+ fimc_write(ctx, cfg2, EXYNOS_CITRGFMT);
*swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
return 0;
@@ -621,17 +616,17 @@ static int fimc_set_window(struct fimc_context *ctx,
* set window offset 1, 2 size
* check figure 43-21 in user manual
*/
- cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg = fimc_read(ctx, EXYNOS_CIWDOFST);
cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
EXYNOS_CIWDOFST_WINVEROFST_MASK);
cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
EXYNOS_CIWDOFST_WINVEROFST(v1));
cfg |= EXYNOS_CIWDOFST_WINOFSEN;
- fimc_write(cfg, EXYNOS_CIWDOFST);
+ fimc_write(ctx, cfg, EXYNOS_CIWDOFST);
cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
EXYNOS_CIWDOFST2_WINVEROFST2(v2));
- fimc_write(cfg, EXYNOS_CIWDOFST2);
+ fimc_write(ctx, cfg, EXYNOS_CIWDOFST2);
return 0;
}
@@ -651,7 +646,7 @@ static int fimc_src_set_size(struct device *dev, int swap,
cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
- fimc_write(cfg, EXYNOS_ORGISIZE);
+ fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
@@ -663,12 +658,12 @@ static int fimc_src_set_size(struct device *dev, int swap,
}
/* set input DMA image size */
- cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+ cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
- fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+ fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE);
/*
* set input FIFO image size
@@ -677,18 +672,18 @@ static int fimc_src_set_size(struct device *dev, int swap,
cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
- fimc_write(cfg, EXYNOS_CISRCFMT);
+ fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
/* offset Y(RGB), Cb, Cr */
cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
- fimc_write(cfg, EXYNOS_CIIYOFF);
+ fimc_write(ctx, cfg, EXYNOS_CIIYOFF);
cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
- fimc_write(cfg, EXYNOS_CIICBOFF);
+ fimc_write(ctx, cfg, EXYNOS_CIICBOFF);
cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIICROFF_VERTICAL(img_pos.y));
- fimc_write(cfg, EXYNOS_CIICROFF);
+ fimc_write(ctx, cfg, EXYNOS_CIICROFF);
return fimc_set_window(ctx, &img_pos, &img_sz);
}
@@ -722,25 +717,25 @@ static int fimc_src_set_addr(struct device *dev,
switch (buf_type) {
case IPP_BUF_ENQUEUE:
config = &property->config[EXYNOS_DRM_OPS_SRC];
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
EXYNOS_CIIYSA(buf_id));
if (config->fmt == DRM_FORMAT_YVU420) {
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIICBSA(buf_id));
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIICRSA(buf_id));
} else {
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIICBSA(buf_id));
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIICRSA(buf_id));
}
break;
case IPP_BUF_DEQUEUE:
- fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
- fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
- fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id));
break;
default:
/* bypass */
@@ -765,22 +760,22 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
/* RGB */
- cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
switch (fmt) {
case DRM_FORMAT_RGB565:
cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
- fimc_write(cfg, EXYNOS_CISCCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return 0;
case DRM_FORMAT_RGB888:
cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
- fimc_write(cfg, EXYNOS_CISCCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return 0;
case DRM_FORMAT_XRGB8888:
cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
- fimc_write(cfg, EXYNOS_CISCCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
break;
default:
/* bypass */
@@ -788,7 +783,7 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
}
/* YUV */
- cfg = fimc_read(EXYNOS_CIOCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
EXYNOS_CIOCTRL_ORDER422_MASK |
EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
@@ -830,7 +825,7 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
return -EINVAL;
}
- fimc_write(cfg, EXYNOS_CIOCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
return 0;
}
@@ -843,16 +838,16 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
- cfg = fimc_read(EXYNOS_CIEXTEN);
+ cfg = fimc_read(ctx, EXYNOS_CIEXTEN);
if (fmt == DRM_FORMAT_AYUV) {
cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
- fimc_write(cfg, EXYNOS_CIEXTEN);
+ fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
} else {
cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
- fimc_write(cfg, EXYNOS_CIEXTEN);
+ fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
- cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
switch (fmt) {
@@ -885,10 +880,10 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
return -EINVAL;
}
- fimc_write(cfg, EXYNOS_CITRGFMT);
+ fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
}
- cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
if (fmt == DRM_FORMAT_NV12MT)
@@ -896,7 +891,7 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
else
cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
- fimc_write(cfg, EXYNOS_CIDMAPARAM);
+ fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
return fimc_dst_set_fmt_order(ctx, fmt);
}
@@ -911,7 +906,7 @@ static int fimc_dst_set_transf(struct device *dev,
DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
- cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
@@ -951,53 +946,23 @@ static int fimc_dst_set_transf(struct device *dev,
return -EINVAL;
}
- fimc_write(cfg, EXYNOS_CITRGFMT);
+ fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
*swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
return 0;
}
-static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
-{
- DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
-
- if (src >= dst * 64) {
- DRM_ERROR("failed to make ratio and shift.\n");
- return -EINVAL;
- } else if (src >= dst * 32) {
- *ratio = 32;
- *shift = 5;
- } else if (src >= dst * 16) {
- *ratio = 16;
- *shift = 4;
- } else if (src >= dst * 8) {
- *ratio = 8;
- *shift = 3;
- } else if (src >= dst * 4) {
- *ratio = 4;
- *shift = 2;
- } else if (src >= dst * 2) {
- *ratio = 2;
- *shift = 1;
- } else {
- *ratio = 1;
- *shift = 0;
- }
-
- return 0;
-}
-
static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
{
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
u32 cfg, cfg_ext, shfactor;
u32 pre_dst_width, pre_dst_height;
- u32 pre_hratio, hfactor, pre_vratio, vfactor;
+ u32 hfactor, vfactor;
int ret = 0;
u32 src_w, src_h, dst_w, dst_h;
- cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+ cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT);
if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
src_w = src->h;
src_h = src->w;
@@ -1014,24 +979,24 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
dst_h = dst->h;
}
- ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
- if (ret) {
+ /* fimc_ippdrv_check_property assures that dividers are not null */
+ hfactor = fls(src_w / dst_w / 2);
+ if (hfactor > FIMC_SHFACTOR / 2) {
dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
- return ret;
+ return -EINVAL;
}
- ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
- if (ret) {
+ vfactor = fls(src_h / dst_h / 2);
+ if (vfactor > FIMC_SHFACTOR / 2) {
dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
- return ret;
+ return -EINVAL;
}
- pre_dst_width = src_w / pre_hratio;
- pre_dst_height = src_h / pre_vratio;
+ pre_dst_width = src_w >> hfactor;
+ pre_dst_height = src_h >> vfactor;
DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n",
pre_dst_width, pre_dst_height);
- DRM_DEBUG_KMS("pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
- pre_hratio, hfactor, pre_vratio, vfactor);
+ DRM_DEBUG_KMS("hfactor[%d]vfactor[%d]\n", hfactor, vfactor);
sc->hratio = (src_w << 14) / (dst_w << hfactor);
sc->vratio = (src_h << 14) / (dst_h << vfactor);
@@ -1044,13 +1009,13 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
DRM_DEBUG_KMS("shfactor[%d]\n", shfactor);
cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
- EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
- EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
- fimc_write(cfg, EXYNOS_CISCPRERATIO);
+ EXYNOS_CISCPRERATIO_PREHORRATIO(1 << hfactor) |
+ EXYNOS_CISCPRERATIO_PREVERRATIO(1 << vfactor));
+ fimc_write(ctx, cfg, EXYNOS_CISCPRERATIO);
cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
- fimc_write(cfg, EXYNOS_CISCPREDST);
+ fimc_write(ctx, cfg, EXYNOS_CISCPREDST);
return ret;
}
@@ -1064,7 +1029,7 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n",
sc->hratio, sc->vratio);
- cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
@@ -1084,14 +1049,14 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
- fimc_write(cfg, EXYNOS_CISCCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
- cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+ cfg_ext = fimc_read(ctx, EXYNOS_CIEXTEN);
cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
- fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+ fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN);
}
static int fimc_dst_set_size(struct device *dev, int swap,
@@ -1109,12 +1074,12 @@ static int fimc_dst_set_size(struct device *dev, int swap,
cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
- fimc_write(cfg, EXYNOS_ORGOSIZE);
+ fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
/* CSC ITU */
- cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
if (sz->hsize >= FIMC_WIDTH_ITU_709)
@@ -1122,7 +1087,7 @@ static int fimc_dst_set_size(struct device *dev, int swap,
else
cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
- fimc_write(cfg, EXYNOS_CIGCTRL);
+ fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
if (swap) {
img_pos.w = pos->h;
@@ -1132,41 +1097,38 @@ static int fimc_dst_set_size(struct device *dev, int swap,
}
/* target image size */
- cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
EXYNOS_CITRGFMT_TARGETV_MASK);
cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
- fimc_write(cfg, EXYNOS_CITRGFMT);
+ fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
/* target area */
cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
- fimc_write(cfg, EXYNOS_CITAREA);
+ fimc_write(ctx, cfg, EXYNOS_CITAREA);
/* offset Y(RGB), Cb, Cr */
cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
- fimc_write(cfg, EXYNOS_CIOYOFF);
+ fimc_write(ctx, cfg, EXYNOS_CIOYOFF);
cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
- fimc_write(cfg, EXYNOS_CIOCBOFF);
+ fimc_write(ctx, cfg, EXYNOS_CIOCBOFF);
cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
- fimc_write(cfg, EXYNOS_CIOCROFF);
+ fimc_write(ctx, cfg, EXYNOS_CIOCROFF);
return 0;
}
-static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+static int fimc_dst_get_buf_count(struct fimc_context *ctx)
{
- u32 cfg, i, buf_num = 0;
- u32 mask = 0x00000001;
+ u32 cfg, buf_num;
- cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+ cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
- for (i = 0; i < FIMC_REG_SZ; i++)
- if (cfg & (mask << i))
- buf_num++;
+ buf_num = hweight32(cfg);
DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
@@ -1181,13 +1143,14 @@ static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
u32 cfg;
u32 mask = 0x00000001 << buf_id;
int ret = 0;
+ unsigned long flags;
DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
- mutex_lock(&ctx->lock);
+ spin_lock_irqsave(&ctx->lock, flags);
/* mask register set */
- cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+ cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
switch (buf_type) {
case IPP_BUF_ENQUEUE:
@@ -1205,20 +1168,20 @@ static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
/* sequence id */
cfg &= ~mask;
cfg |= (enable << buf_id);
- fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+ fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
/* interrupt enable */
if (buf_type == IPP_BUF_ENQUEUE &&
- fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
- fimc_handle_irq(ctx, true, false, true);
+ fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
+ fimc_mask_irq(ctx, true);
/* interrupt disable */
if (buf_type == IPP_BUF_DEQUEUE &&
- fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
- fimc_handle_irq(ctx, false, false, true);
+ fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP)
+ fimc_mask_irq(ctx, false);
err_unlock:
- mutex_unlock(&ctx->lock);
+ spin_unlock_irqrestore(&ctx->lock, flags);
return ret;
}
@@ -1252,25 +1215,25 @@ static int fimc_dst_set_addr(struct device *dev,
case IPP_BUF_ENQUEUE:
config = &property->config[EXYNOS_DRM_OPS_DST];
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
EXYNOS_CIOYSA(buf_id));
if (config->fmt == DRM_FORMAT_YVU420) {
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIOCBSA(buf_id));
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIOCRSA(buf_id));
} else {
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIOCBSA(buf_id));
- fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIOCRSA(buf_id));
}
break;
case IPP_BUF_DEQUEUE:
- fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
- fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
- fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id));
+ fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id));
break;
default:
/* bypass */
@@ -1342,11 +1305,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
{
- struct drm_exynos_ipp_prop_list *prop_list;
-
- prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
- if (!prop_list)
- return -ENOMEM;
+ struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
prop_list->version = 1;
prop_list->writeback = 1;
@@ -1371,8 +1330,6 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
prop_list->scale_min.hsize = FIMC_SCALE_MIN;
prop_list->scale_min.vsize = FIMC_SCALE_MIN;
- ippdrv->prop_list = prop_list;
-
return 0;
}
@@ -1395,7 +1352,7 @@ static int fimc_ippdrv_check_property(struct device *dev,
{
struct fimc_context *ctx = get_fimc_context(dev);
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
- struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
struct drm_exynos_ipp_config *config;
struct drm_exynos_pos *pos;
struct drm_exynos_sz *sz;
@@ -1508,15 +1465,15 @@ static void fimc_clear_addr(struct fimc_context *ctx)
int i;
for (i = 0; i < FIMC_MAX_SRC; i++) {
- fimc_write(0, EXYNOS_CIIYSA(i));
- fimc_write(0, EXYNOS_CIICBSA(i));
- fimc_write(0, EXYNOS_CIICRSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIIYSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIICBSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIICRSA(i));
}
for (i = 0; i < FIMC_MAX_DST; i++) {
- fimc_write(0, EXYNOS_CIOYSA(i));
- fimc_write(0, EXYNOS_CIOCBSA(i));
- fimc_write(0, EXYNOS_CIOCRSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIOYSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIOCBSA(i));
+ fimc_write(ctx, 0, EXYNOS_CIOCRSA(i));
}
}
@@ -1556,7 +1513,7 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
property = &c_node->property;
- fimc_handle_irq(ctx, true, false, true);
+ fimc_mask_irq(ctx, true);
for_each_ipp_ops(i) {
config = &property->config[i];
@@ -1582,10 +1539,10 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
fimc_handle_lastend(ctx, false);
/* setup dma */
- cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
- fimc_write(cfg0, EXYNOS_MSCTRL);
+ fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
break;
case IPP_CMD_WB:
fimc_set_type_ctrl(ctx, FIMC_WB_A);
@@ -1610,41 +1567,33 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
}
/* Reset status */
- fimc_write(0x0, EXYNOS_CISTATUS);
+ fimc_write(ctx, 0x0, EXYNOS_CISTATUS);
- cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+ cfg0 = fimc_read(ctx, EXYNOS_CIIMGCPT);
cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
/* Scaler */
- cfg1 = fimc_read(EXYNOS_CISCCTRL);
+ cfg1 = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
EXYNOS_CISCCTRL_SCALERSTART);
- fimc_write(cfg1, EXYNOS_CISCCTRL);
+ fimc_write(ctx, cfg1, EXYNOS_CISCCTRL);
/* Enable image capture*/
cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
- fimc_write(cfg0, EXYNOS_CIIMGCPT);
+ fimc_write(ctx, cfg0, EXYNOS_CIIMGCPT);
/* Disable frame end irq */
- cfg0 = fimc_read(EXYNOS_CIGCTRL);
- cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
- fimc_write(cfg0, EXYNOS_CIGCTRL);
+ fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
- cfg0 = fimc_read(EXYNOS_CIOCTRL);
- cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
- fimc_write(cfg0, EXYNOS_CIOCTRL);
+ fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
if (cmd == IPP_CMD_M2M) {
- cfg0 = fimc_read(EXYNOS_MSCTRL);
- cfg0 |= EXYNOS_MSCTRL_ENVID;
- fimc_write(cfg0, EXYNOS_MSCTRL);
+ fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
- cfg0 = fimc_read(EXYNOS_MSCTRL);
- cfg0 |= EXYNOS_MSCTRL_ENVID;
- fimc_write(cfg0, EXYNOS_MSCTRL);
+ fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
}
return 0;
@@ -1661,10 +1610,10 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
switch (cmd) {
case IPP_CMD_M2M:
/* Source clear */
- cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
cfg &= ~EXYNOS_MSCTRL_ENVID;
- fimc_write(cfg, EXYNOS_MSCTRL);
+ fimc_write(ctx, cfg, EXYNOS_MSCTRL);
break;
case IPP_CMD_WB:
exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
@@ -1675,25 +1624,20 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
break;
}
- fimc_handle_irq(ctx, false, false, true);
+ fimc_mask_irq(ctx, false);
/* reset sequence */
- fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+ fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
/* Scaler disable */
- cfg = fimc_read(EXYNOS_CISCCTRL);
- cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
- fimc_write(cfg, EXYNOS_CISCCTRL);
+ fimc_clear_bits(ctx, EXYNOS_CISCCTRL, EXYNOS_CISCCTRL_SCALERSTART);
/* Disable image capture */
- cfg = fimc_read(EXYNOS_CIIMGCPT);
- cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
- fimc_write(cfg, EXYNOS_CIIMGCPT);
+ fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
+ EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
/* Enable frame end irq */
- cfg = fimc_read(EXYNOS_CIGCTRL);
- cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
- fimc_write(cfg, EXYNOS_CIGCTRL);
+ fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
}
static void fimc_put_clocks(struct fimc_context *ctx)
@@ -1848,7 +1792,7 @@ static int fimc_probe(struct platform_device *pdev)
DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
- mutex_init(&ctx->lock);
+ spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
pm_runtime_set_active(dev);
@@ -1879,7 +1823,6 @@ static int fimc_remove(struct platform_device *pdev)
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
exynos_drm_ippdrv_unregister(ippdrv);
- mutex_destroy(&ctx->lock);
fimc_put_clocks(ctx);
pm_runtime_set_suspended(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 40fd6ccfcd6f..33161ad38201 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
+#include <linux/component.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@@ -38,6 +39,7 @@
*/
#define FIMD_DEFAULT_FRAMERATE 60
+#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
/* position control register for hardware window 0, 2 ~ 4.*/
#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
@@ -122,6 +124,7 @@ struct fimd_context {
struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
+ struct exynos_drm_display *display;
};
static const struct of_device_id fimd_driver_dt_match[] = {
@@ -143,13 +146,57 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
return (struct fimd_driver_data *)of_id->data;
}
+static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+
+ if (ctx->suspended)
+ return;
+
+ atomic_set(&ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for FIMD to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(ctx->wait_vsync_queue,
+ !atomic_read(&ctx->wait_vsync_event),
+ HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+
+static void fimd_clear_channel(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+ int win, ch_enabled = 0;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* Check if any channel is enabled. */
+ for (win = 0; win < WINDOWS_NR; win++) {
+ u32 val = readl(ctx->regs + SHADOWCON);
+ if (val & SHADOWCON_CHx_ENABLE(win)) {
+ val &= ~SHADOWCON_CHx_ENABLE(win);
+ writel(val, ctx->regs + SHADOWCON);
+ ch_enabled = 1;
+ }
+ }
+
+ /* Wait for vsync, as disable channel takes effect at next vsync */
+ if (ch_enabled)
+ fimd_wait_for_vblank(mgr);
+}
+
static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
- struct drm_device *drm_dev, int pipe)
+ struct drm_device *drm_dev)
{
struct fimd_context *ctx = mgr->ctx;
+ struct exynos_drm_private *priv;
+ priv = drm_dev->dev_private;
- ctx->drm_dev = drm_dev;
- ctx->pipe = pipe;
+ mgr->drm_dev = ctx->drm_dev = drm_dev;
+ mgr->pipe = ctx->pipe = priv->pipe++;
/*
* enable drm irq mode.
@@ -169,8 +216,14 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
drm_dev->vblank_disable_allowed = true;
/* attach this sub driver to iommu mapping if supported. */
- if (is_drm_iommu_supported(ctx->drm_dev))
+ if (is_drm_iommu_supported(ctx->drm_dev)) {
+ /*
+ * If any channel is already active, iommu will throw
+ * a PAGE FAULT when enabled. So clear any channel if enabled.
+ */
+ fimd_clear_channel(mgr);
drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
+ }
return 0;
}
@@ -324,25 +377,6 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
}
}
-static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
-{
- struct fimd_context *ctx = mgr->ctx;
-
- if (ctx->suspended)
- return;
-
- atomic_set(&ctx->wait_vsync_event, 1);
-
- /*
- * wait for FIMD to signal VSYNC interrupt or return after
- * timeout which is set to 50ms (refresh rate of 20).
- */
- if (!wait_event_timeout(ctx->wait_vsync_queue,
- !atomic_read(&ctx->wait_vsync_event),
- HZ/20))
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
struct exynos_drm_overlay *overlay)
{
@@ -446,6 +480,19 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+ /*
+ * In case of exynos, setting dma-burst to 16Word causes permanent
+ * tearing for very small buffers, e.g. cursor buffer. Burst Mode
+ * switching which is based on overlay size is not recommended as
+ * overlay size varies alot towards the end of the screen and rapid
+ * movement causes unstable DMA which results into iommu crash/tear.
+ */
+
+ if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ val &= ~WINCONx_BURSTLEN_MASK;
+ val |= WINCONx_BURSTLEN_4WORD;
+ }
+
writel(val, ctx->regs + WINCON(win));
}
@@ -656,19 +703,6 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
win_data->enabled = false;
}
-static void fimd_clear_win(struct fimd_context *ctx, int win)
-{
- writel(0, ctx->regs + WINCON(win));
- writel(0, ctx->regs + VIDOSD_A(win));
- writel(0, ctx->regs + VIDOSD_B(win));
- writel(0, ctx->regs + VIDOSD_C(win));
-
- if (win == 1 || win == 2)
- writel(0, ctx->regs + VIDOSD_D(win));
-
- fimd_shadow_protect_win(ctx, win, false);
-}
-
static void fimd_window_suspend(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
@@ -707,6 +741,8 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
win_data = &ctx->win_data[i];
if (win_data->enabled)
fimd_win_commit(mgr, i);
+ else
+ fimd_win_disable(mgr, i);
}
fimd_commit(mgr);
@@ -803,8 +839,6 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
}
static struct exynos_drm_manager_ops fimd_manager_ops = {
- .initialize = fimd_mgr_initialize,
- .remove = fimd_mgr_remove,
.dpms = fimd_dpms,
.mode_fixup = fimd_mode_fixup,
.mode_set = fimd_mode_set,
@@ -849,20 +883,64 @@ out:
return IRQ_HANDLED;
}
+static int fimd_bind(struct device *dev, struct device *master, void *data)
+{
+ struct fimd_context *ctx = fimd_manager.ctx;
+ struct drm_device *drm_dev = data;
+
+ fimd_mgr_initialize(&fimd_manager, drm_dev);
+ exynos_drm_crtc_create(&fimd_manager);
+ if (ctx->display)
+ exynos_drm_create_enc_conn(drm_dev, ctx->display);
+
+ return 0;
+
+}
+
+static void fimd_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
+ struct fimd_context *ctx = fimd_manager.ctx;
+ struct drm_crtc *crtc = mgr->crtc;
+
+ fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
+
+ if (ctx->display)
+ exynos_dpi_remove(dev);
+
+ fimd_mgr_remove(mgr);
+
+ crtc->funcs->destroy(crtc);
+}
+
+static const struct component_ops fimd_component_ops = {
+ .bind = fimd_bind,
+ .unbind = fimd_unbind,
+};
+
static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
struct resource *res;
- int win;
int ret = -EINVAL;
- if (!dev->of_node)
- return -ENODEV;
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
+ fimd_manager.type);
+ if (ret)
+ return ret;
+
+ if (!dev->of_node) {
+ ret = -ENODEV;
+ goto err_del_component;
+ }
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err_del_component;
+ }
ctx->dev = dev;
ctx->suspended = true;
@@ -875,32 +953,37 @@ static int fimd_probe(struct platform_device *pdev)
ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(ctx->bus_clk);
+ ret = PTR_ERR(ctx->bus_clk);
+ goto err_del_component;
}
ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
- return PTR_ERR(ctx->lcd_clk);
+ ret = PTR_ERR(ctx->lcd_clk);
+ goto err_del_component;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(ctx->regs))
- return PTR_ERR(ctx->regs);
+ if (IS_ERR(ctx->regs)) {
+ ret = PTR_ERR(ctx->regs);
+ goto err_del_component;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
if (!res) {
dev_err(dev, "irq request failed.\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto err_del_component;
}
ret = devm_request_irq(dev, res->start, fimd_irq_handler,
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
- return ret;
+ goto err_del_component;
}
ctx->driver_data = drm_fimd_get_driver_data(pdev);
@@ -910,30 +993,34 @@ static int fimd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &fimd_manager);
fimd_manager.ctx = ctx;
- exynos_drm_manager_register(&fimd_manager);
- exynos_dpi_probe(ctx->dev);
+ ctx->display = exynos_dpi_probe(dev);
+ if (IS_ERR(ctx->display))
+ return PTR_ERR(ctx->display);
- pm_runtime_enable(dev);
+ pm_runtime_enable(&pdev->dev);
- for (win = 0; win < WINDOWS_NR; win++)
- fimd_clear_win(ctx, win);
+ ret = component_add(&pdev->dev, &fimd_component_ops);
+ if (ret)
+ goto err_disable_pm_runtime;
- return 0;
+ return ret;
+
+err_disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+err_del_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+ return ret;
}
static int fimd_remove(struct platform_device *pdev)
{
- struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
-
- exynos_dpi_remove(&pdev->dev);
-
- exynos_drm_manager_unregister(&fimd_manager);
-
- fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
-
pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &fimd_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 42d2904d88c7..163a054922cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -612,22 +612,20 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
- EXYNOS_BO_WC, args->size);
- /*
- * If physically contiguous memory allocation fails and if IOMMU is
- * supported then try to get buffer from non physically contiguous
- * memory area.
- */
- if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
- dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+ if (is_drm_iommu_supported(dev)) {
+ exynos_gem_obj = exynos_drm_gem_create(dev,
+ EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
+ args->size);
+ } else {
exynos_gem_obj = exynos_drm_gem_create(dev,
- EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
- args->size);
+ EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
+ args->size);
}
- if (IS_ERR(exynos_gem_obj))
+ if (IS_ERR(exynos_gem_obj)) {
+ dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem_obj);
+ }
ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
&args->handle);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index fa75059a6104..9e3ff1672965 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1335,11 +1335,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
{
- struct drm_exynos_ipp_prop_list *prop_list;
-
- prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
- if (!prop_list)
- return -ENOMEM;
+ struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
prop_list->version = 1;
prop_list->writeback = 1;
@@ -1363,8 +1359,6 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
prop_list->scale_min.hsize = GSC_SCALE_MIN;
prop_list->scale_min.vsize = GSC_SCALE_MIN;
- ippdrv->prop_list = prop_list;
-
return 0;
}
@@ -1387,7 +1381,7 @@ static int gsc_ippdrv_check_property(struct device *dev,
{
struct gsc_context *ctx = get_gsc_context(dev);
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
- struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
struct drm_exynos_ipp_config *config;
struct drm_exynos_pos *pos;
struct drm_exynos_sz *sz;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 3d78144387ac..a1888e128f1d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -167,6 +167,13 @@ static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
return 0;
}
+static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+ mutex_lock(lock);
+ idr_remove(id_idr, id);
+ mutex_unlock(lock);
+}
+
static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
{
void *obj;
@@ -276,11 +283,6 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
- if (list_empty(&exynos_drm_ippdrv_list)) {
- DRM_DEBUG_KMS("ippdrv_list is empty.\n");
- return ERR_PTR(-ENODEV);
- }
-
/*
* This case is search ipp driver by prop_id handle.
* sometimes, ipp subsystem find driver by prop_id.
@@ -289,11 +291,14 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
- if (!list_empty(&ippdrv->cmd_list)) {
- list_for_each_entry(c_node, &ippdrv->cmd_list, list)
- if (c_node->property.prop_id == prop_id)
- return ippdrv;
+ mutex_lock(&ippdrv->cmd_lock);
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+ if (c_node->property.prop_id == prop_id) {
+ mutex_unlock(&ippdrv->cmd_lock);
+ return ippdrv;
+ }
}
+ mutex_unlock(&ippdrv->cmd_lock);
}
return ERR_PTR(-ENODEV);
@@ -325,6 +330,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
if (!prop_list->ipp_id) {
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
count++;
+
/*
* Supports ippdrv list count for user application.
* First step user application getting ippdrv count.
@@ -346,7 +352,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
return PTR_ERR(ippdrv);
}
- prop_list = ippdrv->prop_list;
+ *prop_list = ippdrv->prop_list;
}
return 0;
@@ -386,9 +392,11 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
* when we find this command no using prop_id.
* return property information set in this command node.
*/
+ mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
if ((c_node->property.prop_id == prop_id) &&
(c_node->state == IPP_STATE_STOP)) {
+ mutex_unlock(&ippdrv->cmd_lock);
DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
property->cmd, (int)ippdrv);
@@ -396,6 +404,7 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
return 0;
}
}
+ mutex_unlock(&ippdrv->cmd_lock);
DRM_ERROR("failed to search property.\n");
@@ -499,7 +508,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
c_node->start_work = ipp_create_cmd_work();
if (IS_ERR(c_node->start_work)) {
DRM_ERROR("failed to create start work.\n");
- goto err_clear;
+ goto err_remove_id;
}
c_node->stop_work = ipp_create_cmd_work();
@@ -514,7 +523,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
goto err_free_stop;
}
- mutex_init(&c_node->cmd_lock);
+ mutex_init(&c_node->lock);
mutex_init(&c_node->mem_lock);
mutex_init(&c_node->event_lock);
@@ -526,7 +535,9 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
INIT_LIST_HEAD(&c_node->event_list);
list_splice_init(&priv->event_list, &c_node->event_list);
+ mutex_lock(&ippdrv->cmd_lock);
list_add_tail(&c_node->list, &ippdrv->cmd_list);
+ mutex_unlock(&ippdrv->cmd_lock);
/* make dedicated state without m2m */
if (!ipp_is_m2m_cmd(property->cmd))
@@ -538,18 +549,24 @@ err_free_stop:
kfree(c_node->stop_work);
err_free_start:
kfree(c_node->start_work);
+err_remove_id:
+ ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
err_clear:
kfree(c_node);
return ret;
}
-static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+static void ipp_clean_cmd_node(struct ipp_context *ctx,
+ struct drm_exynos_ipp_cmd_node *c_node)
{
/* delete list */
list_del(&c_node->list);
+ ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
+ c_node->property.prop_id);
+
/* destroy mutex */
- mutex_destroy(&c_node->cmd_lock);
+ mutex_destroy(&c_node->lock);
mutex_destroy(&c_node->mem_lock);
mutex_destroy(&c_node->event_lock);
@@ -567,17 +584,10 @@ static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
struct list_head *head;
int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
- mutex_lock(&c_node->mem_lock);
-
for_each_ipp_ops(i) {
/* source/destination memory list */
head = &c_node->mem_list[i];
- if (list_empty(head)) {
- DRM_DEBUG_KMS("%s memory empty.\n", i ? "dst" : "src");
- continue;
- }
-
/* find memory node entry */
list_for_each_entry(m_node, head, list) {
DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
@@ -602,8 +612,6 @@ static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
ret = max(count[EXYNOS_DRM_OPS_SRC],
count[EXYNOS_DRM_OPS_DST]);
- mutex_unlock(&c_node->mem_lock);
-
return ret;
}
@@ -646,16 +654,13 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
return -EFAULT;
}
- mutex_lock(&c_node->mem_lock);
-
DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
/* get operations callback */
ops = ippdrv->ops[m_node->ops_id];
if (!ops) {
DRM_ERROR("not support ops.\n");
- ret = -EFAULT;
- goto err_unlock;
+ return -EFAULT;
}
/* set address and enable irq */
@@ -664,12 +669,10 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
m_node->buf_id, IPP_BUF_ENQUEUE);
if (ret) {
DRM_ERROR("failed to set addr.\n");
- goto err_unlock;
+ return ret;
}
}
-err_unlock:
- mutex_unlock(&c_node->mem_lock);
return ret;
}
@@ -684,11 +687,9 @@ static struct drm_exynos_ipp_mem_node
void *addr;
int i;
- mutex_lock(&c_node->mem_lock);
-
m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
if (!m_node)
- goto err_unlock;
+ return ERR_PTR(-ENOMEM);
/* clear base address for error handling */
memset(&buf_info, 0x0, sizeof(buf_info));
@@ -722,15 +723,14 @@ static struct drm_exynos_ipp_mem_node
m_node->filp = file;
m_node->buf_info = buf_info;
+ mutex_lock(&c_node->mem_lock);
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
-
mutex_unlock(&c_node->mem_lock);
+
return m_node;
err_clear:
kfree(m_node);
-err_unlock:
- mutex_unlock(&c_node->mem_lock);
return ERR_PTR(-EFAULT);
}
@@ -747,13 +747,6 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
return -EFAULT;
}
- if (list_empty(&m_node->list)) {
- DRM_ERROR("empty memory node.\n");
- return -ENOMEM;
- }
-
- mutex_lock(&c_node->mem_lock);
-
DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
/* put gem buffer */
@@ -768,8 +761,6 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
list_del(&m_node->list);
kfree(m_node);
- mutex_unlock(&c_node->mem_lock);
-
return 0;
}
@@ -805,7 +796,9 @@ static int ipp_get_event(struct drm_device *drm_dev,
e->base.event = &e->event.base;
e->base.file_priv = file;
e->base.destroy = ipp_free_event;
+ mutex_lock(&c_node->event_lock);
list_add_tail(&e->base.link, &c_node->event_list);
+ mutex_unlock(&c_node->event_lock);
return 0;
}
@@ -816,11 +809,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_send_event *e, *te;
int count = 0;
- if (list_empty(&c_node->event_list)) {
- DRM_DEBUG_KMS("event_list is empty.\n");
- return;
- }
-
+ mutex_lock(&c_node->event_lock);
list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
@@ -841,9 +830,13 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
/* delete list */
list_del(&e->base.link);
kfree(e);
- return;
+ goto out_unlock;
}
}
+
+out_unlock:
+ mutex_unlock(&c_node->event_lock);
+ return;
}
static void ipp_handle_cmd_work(struct device *dev,
@@ -887,7 +880,9 @@ static int ipp_queue_buf_with_run(struct device *dev,
return 0;
}
+ mutex_lock(&c_node->mem_lock);
if (!ipp_check_mem_list(c_node)) {
+ mutex_unlock(&c_node->mem_lock);
DRM_DEBUG_KMS("empty memory.\n");
return 0;
}
@@ -904,10 +899,12 @@ static int ipp_queue_buf_with_run(struct device *dev,
} else {
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
+ mutex_unlock(&c_node->mem_lock);
DRM_ERROR("failed to set m node.\n");
return ret;
}
}
+ mutex_unlock(&c_node->mem_lock);
return 0;
}
@@ -918,15 +915,15 @@ static void ipp_clean_queue_buf(struct drm_device *drm_dev,
{
struct drm_exynos_ipp_mem_node *m_node, *tm_node;
- if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
- /* delete list */
- list_for_each_entry_safe(m_node, tm_node,
- &c_node->mem_list[qbuf->ops_id], list) {
- if (m_node->buf_id == qbuf->buf_id &&
- m_node->ops_id == qbuf->ops_id)
- ipp_put_mem_node(drm_dev, c_node, m_node);
- }
+ /* delete list */
+ mutex_lock(&c_node->mem_lock);
+ list_for_each_entry_safe(m_node, tm_node,
+ &c_node->mem_list[qbuf->ops_id], list) {
+ if (m_node->buf_id == qbuf->buf_id &&
+ m_node->ops_id == qbuf->ops_id)
+ ipp_put_mem_node(drm_dev, c_node, m_node);
}
+ mutex_unlock(&c_node->mem_lock);
}
int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
@@ -998,7 +995,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
}
break;
case IPP_BUF_DEQUEUE:
- mutex_lock(&c_node->cmd_lock);
+ mutex_lock(&c_node->lock);
/* put event for destination buffer */
if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
@@ -1006,7 +1003,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
ipp_clean_queue_buf(drm_dev, c_node, qbuf);
- mutex_unlock(&c_node->cmd_lock);
+ mutex_unlock(&c_node->lock);
break;
default:
DRM_ERROR("invalid buffer control.\n");
@@ -1109,12 +1106,12 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
case IPP_CTRL_PLAY:
if (pm_runtime_suspended(ippdrv->dev))
pm_runtime_get_sync(ippdrv->dev);
+
c_node->state = IPP_STATE_START;
cmd_work = c_node->start_work;
cmd_work->ctrl = cmd_ctrl->ctrl;
ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
- c_node->state = IPP_STATE_START;
break;
case IPP_CTRL_STOP:
cmd_work = c_node->stop_work;
@@ -1129,10 +1126,12 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node->state = IPP_STATE_STOP;
ippdrv->dedicated = false;
- ipp_clean_cmd_node(c_node);
+ mutex_lock(&ippdrv->cmd_lock);
+ ipp_clean_cmd_node(ctx, c_node);
if (list_empty(&ippdrv->cmd_list))
pm_runtime_put_sync(ippdrv->dev);
+ mutex_unlock(&ippdrv->cmd_lock);
break;
case IPP_CTRL_PAUSE:
cmd_work = c_node->stop_work;
@@ -1260,9 +1259,11 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
/* store command info in ippdrv */
ippdrv->c_node = c_node;
+ mutex_lock(&c_node->mem_lock);
if (!ipp_check_mem_list(c_node)) {
DRM_DEBUG_KMS("empty memory.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_unlock;
}
/* set current property in ippdrv */
@@ -1270,7 +1271,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
if (ret) {
DRM_ERROR("failed to set property.\n");
ippdrv->c_node = NULL;
- return ret;
+ goto err_unlock;
}
/* check command */
@@ -1285,7 +1286,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
if (!m_node) {
DRM_ERROR("failed to get node.\n");
ret = -EFAULT;
- return ret;
+ goto err_unlock;
}
DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
@@ -1293,7 +1294,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
DRM_ERROR("failed to set m node.\n");
- return ret;
+ goto err_unlock;
}
}
break;
@@ -1305,7 +1306,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
DRM_ERROR("failed to set m node.\n");
- return ret;
+ goto err_unlock;
}
}
break;
@@ -1317,14 +1318,16 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
DRM_ERROR("failed to set m node.\n");
- return ret;
+ goto err_unlock;
}
}
break;
default:
DRM_ERROR("invalid operations.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unlock;
}
+ mutex_unlock(&c_node->mem_lock);
DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
@@ -1333,11 +1336,17 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
ret = ippdrv->start(ippdrv->dev, property->cmd);
if (ret) {
DRM_ERROR("failed to start ops.\n");
+ ippdrv->c_node = NULL;
return ret;
}
}
return 0;
+
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ ippdrv->c_node = NULL;
+ return ret;
}
static int ipp_stop_property(struct drm_device *drm_dev,
@@ -1354,6 +1363,8 @@ static int ipp_stop_property(struct drm_device *drm_dev,
/* put event */
ipp_put_event(c_node, NULL);
+ mutex_lock(&c_node->mem_lock);
+
/* check command */
switch (property->cmd) {
case IPP_CMD_M2M:
@@ -1361,11 +1372,6 @@ static int ipp_stop_property(struct drm_device *drm_dev,
/* source/destination memory list */
head = &c_node->mem_list[i];
- if (list_empty(head)) {
- DRM_DEBUG_KMS("mem_list is empty.\n");
- break;
- }
-
list_for_each_entry_safe(m_node, tm_node,
head, list) {
ret = ipp_put_mem_node(drm_dev, c_node,
@@ -1381,11 +1387,6 @@ static int ipp_stop_property(struct drm_device *drm_dev,
/* destination memory list */
head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
- if (list_empty(head)) {
- DRM_DEBUG_KMS("mem_list is empty.\n");
- break;
- }
-
list_for_each_entry_safe(m_node, tm_node, head, list) {
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret) {
@@ -1398,11 +1399,6 @@ static int ipp_stop_property(struct drm_device *drm_dev,
/* source memory list */
head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
- if (list_empty(head)) {
- DRM_DEBUG_KMS("mem_list is empty.\n");
- break;
- }
-
list_for_each_entry_safe(m_node, tm_node, head, list) {
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret) {
@@ -1418,6 +1414,8 @@ static int ipp_stop_property(struct drm_device *drm_dev,
}
err_clear:
+ mutex_unlock(&c_node->mem_lock);
+
/* stop operations */
if (ippdrv->stop)
ippdrv->stop(ippdrv->dev, property->cmd);
@@ -1446,7 +1444,7 @@ void ipp_sched_cmd(struct work_struct *work)
return;
}
- mutex_lock(&c_node->cmd_lock);
+ mutex_lock(&c_node->lock);
property = &c_node->property;
@@ -1494,7 +1492,7 @@ void ipp_sched_cmd(struct work_struct *work)
DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
err_unlock:
- mutex_unlock(&c_node->cmd_lock);
+ mutex_unlock(&c_node->lock);
}
static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
@@ -1524,14 +1522,18 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
return -EINVAL;
}
+ mutex_lock(&c_node->event_lock);
if (list_empty(&c_node->event_list)) {
DRM_DEBUG_KMS("event list is empty.\n");
- return 0;
+ ret = 0;
+ goto err_event_unlock;
}
+ mutex_lock(&c_node->mem_lock);
if (!ipp_check_mem_list(c_node)) {
DRM_DEBUG_KMS("empty memory.\n");
- return 0;
+ ret = 0;
+ goto err_mem_unlock;
}
/* check command */
@@ -1545,7 +1547,8 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_mem_node, list);
if (!m_node) {
DRM_ERROR("empty memory node.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_mem_unlock;
}
tbuf_id[i] = m_node->buf_id;
@@ -1567,7 +1570,8 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
m_node = ipp_find_mem_node(c_node, &qbuf);
if (!m_node) {
DRM_ERROR("empty memory node.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_mem_unlock;
}
tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
@@ -1584,7 +1588,8 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_mem_node, list);
if (!m_node) {
DRM_ERROR("empty memory node.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_mem_unlock;
}
tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
@@ -1595,8 +1600,10 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
break;
default:
DRM_ERROR("invalid operations.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_mem_unlock;
}
+ mutex_unlock(&c_node->mem_lock);
if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
@@ -1611,11 +1618,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
e = list_first_entry(&c_node->event_list,
struct drm_exynos_ipp_send_event, base.link);
- if (!e) {
- DRM_ERROR("empty event.\n");
- return -EINVAL;
- }
-
do_gettimeofday(&now);
DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
e->event.tv_sec = now.tv_sec;
@@ -1630,11 +1632,18 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ mutex_unlock(&c_node->event_lock);
DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
return 0;
+
+err_mem_unlock:
+ mutex_unlock(&c_node->mem_lock);
+err_event_unlock:
+ mutex_unlock(&c_node->event_lock);
+ return ret;
}
void ipp_sched_event(struct work_struct *work)
@@ -1676,8 +1685,6 @@ void ipp_sched_event(struct work_struct *work)
goto err_completion;
}
- mutex_lock(&c_node->event_lock);
-
ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
if (ret) {
DRM_ERROR("failed to send event.\n");
@@ -1687,8 +1694,6 @@ void ipp_sched_event(struct work_struct *work)
err_completion:
if (ipp_is_m2m_cmd(c_node->property.cmd))
complete(&c_node->start_complete);
-
- mutex_unlock(&c_node->event_lock);
}
static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
@@ -1699,23 +1704,21 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
/* get ipp driver entry */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ u32 ipp_id;
+
ippdrv->drm_dev = drm_dev;
ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
- &ippdrv->ipp_id);
- if (ret) {
+ &ipp_id);
+ if (ret || ipp_id == 0) {
DRM_ERROR("failed to create id.\n");
- goto err_idr;
+ goto err;
}
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
- count++, (int)ippdrv, ippdrv->ipp_id);
+ count++, (int)ippdrv, ipp_id);
- if (ippdrv->ipp_id == 0) {
- DRM_ERROR("failed to get ipp_id[%d]\n",
- ippdrv->ipp_id);
- goto err_idr;
- }
+ ippdrv->prop_list.ipp_id = ipp_id;
/* store parent device for node */
ippdrv->parent_dev = dev;
@@ -1724,39 +1727,46 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
ippdrv->event_workq = ctx->event_workq;
ippdrv->sched_event = ipp_sched_event;
INIT_LIST_HEAD(&ippdrv->cmd_list);
+ mutex_init(&ippdrv->cmd_lock);
if (is_drm_iommu_supported(drm_dev)) {
ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
if (ret) {
DRM_ERROR("failed to activate iommu\n");
- goto err_iommu;
+ goto err;
}
}
}
return 0;
-err_iommu:
+err:
/* get ipp driver entry */
- list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
+ drv_list) {
if (is_drm_iommu_supported(drm_dev))
drm_iommu_detach_device(drm_dev, ippdrv->dev);
-err_idr:
- idr_destroy(&ctx->ipp_idr);
- idr_destroy(&ctx->prop_idr);
+ ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
+ ippdrv->prop_list.ipp_id);
+ }
+
return ret;
}
static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
struct exynos_drm_ippdrv *ippdrv;
+ struct ipp_context *ctx = get_ipp_context(dev);
/* get ipp driver entry */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
if (is_drm_iommu_supported(drm_dev))
drm_iommu_detach_device(drm_dev, ippdrv->dev);
+ ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
+ ippdrv->prop_list.ipp_id);
+
ippdrv->drm_dev = NULL;
exynos_drm_ippdrv_unregister(ippdrv);
}
@@ -1787,20 +1797,14 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
int count = 0;
DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
- if (list_empty(&exynos_drm_ippdrv_list)) {
- DRM_DEBUG_KMS("ippdrv_list is empty.\n");
- goto err_clear;
- }
-
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- if (list_empty(&ippdrv->cmd_list))
- continue;
-
+ mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry_safe(c_node, tc_node,
&ippdrv->cmd_list, list) {
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
@@ -1820,14 +1824,14 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
}
ippdrv->dedicated = false;
- ipp_clean_cmd_node(c_node);
+ ipp_clean_cmd_node(ctx, c_node);
if (list_empty(&ippdrv->cmd_list))
pm_runtime_put_sync(ippdrv->dev);
}
}
+ mutex_unlock(&ippdrv->cmd_lock);
}
-err_clear:
kfree(priv);
return;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index ab1634befc05..7aaeaae757c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -52,7 +52,7 @@ struct drm_exynos_ipp_cmd_work {
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
- * @cmd_lock: lock for synchronization of access to ioctl.
+ * @lock: lock for synchronization of access to ioctl.
* @mem_lock: lock for synchronization of access to memory nodes.
* @event_lock: lock for synchronization of access to scheduled event.
* @start_complete: completion of start of command.
@@ -68,7 +68,7 @@ struct drm_exynos_ipp_cmd_node {
struct list_head list;
struct list_head event_list;
struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
- struct mutex cmd_lock;
+ struct mutex lock;
struct mutex mem_lock;
struct mutex event_lock;
struct completion start_complete;
@@ -83,7 +83,7 @@ struct drm_exynos_ipp_cmd_node {
/*
* A structure of buffer information.
*
- * @gem_objs: Y, Cb, Cr each gem object.
+ * @handles: Y, Cb, Cr each gem object handle.
* @base: Y, Cb, Cr each planar address.
*/
struct drm_exynos_ipp_buf_info {
@@ -142,12 +142,12 @@ struct exynos_drm_ipp_ops {
* @parent_dev: parent device information.
* @dev: platform device.
* @drm_dev: drm device.
- * @ipp_id: id of ipp driver.
* @dedicated: dedicated ipp device.
* @ops: source, destination operations.
* @event_workq: event work queue.
* @c_node: current command information.
* @cmd_list: list head for command information.
+ * @cmd_lock: lock for synchronization of access to cmd_list.
* @prop_list: property informations of current ipp driver.
* @check_property: check property about format, size, buffer.
* @reset: reset ipp block.
@@ -160,13 +160,13 @@ struct exynos_drm_ippdrv {
struct device *parent_dev;
struct device *dev;
struct drm_device *drm_dev;
- u32 ipp_id;
bool dedicated;
struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
struct workqueue_struct *event_workq;
struct drm_exynos_ipp_cmd_node *c_node;
struct list_head cmd_list;
- struct drm_exynos_ipp_prop_list *prop_list;
+ struct mutex cmd_lock;
+ struct drm_exynos_ipp_prop_list prop_list;
int (*check_property)(struct device *dev,
struct drm_exynos_ipp_property *property);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 7b901688defa..f01fbb6dc1f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -158,8 +158,9 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
queue_work(ippdrv->event_workq,
(struct work_struct *)event_work);
- } else
+ } else {
DRM_ERROR("the SFR is set illegally\n");
+ }
return IRQ_HANDLED;
}
@@ -469,11 +470,7 @@ static struct exynos_drm_ipp_ops rot_dst_ops = {
static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
{
- struct drm_exynos_ipp_prop_list *prop_list;
-
- prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
- if (!prop_list)
- return -ENOMEM;
+ struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
prop_list->version = 1;
prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
@@ -486,8 +483,6 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
prop_list->crop = 0;
prop_list->scale = 0;
- ippdrv->prop_list = prop_list;
-
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 852f2dadaebd..2fb8705d6461 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -51,6 +51,7 @@ struct vidi_context {
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector connector;
+ struct exynos_drm_subdrv subdrv;
struct vidi_win_data win_data[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
@@ -294,14 +295,13 @@ static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
}
static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
- struct drm_device *drm_dev, int pipe)
+ struct drm_device *drm_dev)
{
struct vidi_context *ctx = mgr->ctx;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
- DRM_ERROR("vidi initialize ct=%p dev=%p pipe=%d\n", ctx, drm_dev, pipe);
-
- ctx->drm_dev = drm_dev;
- ctx->pipe = pipe;
+ mgr->drm_dev = ctx->drm_dev = drm_dev;
+ mgr->pipe = ctx->pipe = priv->pipe++;
/*
* enable drm irq mode.
@@ -324,7 +324,6 @@ static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
}
static struct exynos_drm_manager_ops vidi_manager_ops = {
- .initialize = vidi_mgr_initialize,
.dpms = vidi_dpms,
.commit = vidi_commit,
.enable_vblank = vidi_enable_vblank,
@@ -533,12 +532,6 @@ static int vidi_get_modes(struct drm_connector *connector)
return drm_add_edid_modes(connector, edid);
}
-static int vidi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
{
struct vidi_context *ctx = ctx_from_connector(connector);
@@ -548,7 +541,6 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
.get_modes = vidi_get_modes,
- .mode_valid = vidi_mode_valid,
.best_encoder = vidi_best_encoder,
};
@@ -586,13 +578,38 @@ static struct exynos_drm_display vidi_display = {
.ops = &vidi_display_ops,
};
+static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
+ struct vidi_context *ctx = mgr->ctx;
+ struct drm_crtc *crtc = ctx->crtc;
+ int ret;
+
+ vidi_mgr_initialize(mgr, drm_dev);
+
+ ret = exynos_drm_crtc_create(&vidi_manager);
+ if (ret) {
+ DRM_ERROR("failed to create crtc.\n");
+ return ret;
+ }
+
+ ret = exynos_drm_create_enc_conn(drm_dev, &vidi_display);
+ if (ret) {
+ crtc->funcs->destroy(crtc);
+ DRM_ERROR("failed to create encoder and connector.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int vidi_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
+ struct exynos_drm_subdrv *subdrv;
struct vidi_context *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -607,28 +624,43 @@ static int vidi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &vidi_manager);
- ret = device_create_file(dev, &dev_attr_connection);
- if (ret < 0)
- DRM_INFO("failed to create connection sysfs.\n");
+ subdrv = &ctx->subdrv;
+ subdrv->dev = &pdev->dev;
+ subdrv->probe = vidi_subdrv_probe;
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register drm vidi device\n");
+ return ret;
+ }
- exynos_drm_manager_register(&vidi_manager);
- exynos_drm_display_register(&vidi_display);
+ ret = device_create_file(&pdev->dev, &dev_attr_connection);
+ if (ret < 0) {
+ exynos_drm_subdrv_unregister(subdrv);
+ DRM_INFO("failed to create connection sysfs.\n");
+ }
return 0;
}
static int vidi_remove(struct platform_device *pdev)
{
- struct vidi_context *ctx = platform_get_drvdata(pdev);
-
- exynos_drm_display_unregister(&vidi_display);
- exynos_drm_manager_unregister(&vidi_manager);
+ struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
+ struct vidi_context *ctx = mgr->ctx;
+ struct drm_encoder *encoder = ctx->encoder;
+ struct drm_crtc *crtc = mgr->crtc;
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
+
+ return -EINVAL;
}
+ crtc->funcs->destroy(crtc);
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&ctx->connector);
+
return 0;
}
@@ -640,3 +672,31 @@ struct platform_driver vidi_driver = {
.owner = THIS_MODULE,
},
};
+
+int exynos_drm_probe_vidi(void)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_register_simple("exynos-drm-vidi", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ ret = platform_driver_register(&vidi_driver);
+ if (ret) {
+ platform_device_unregister(pdev);
+ return ret;
+ }
+
+ return ret;
+}
+
+void exynos_drm_remove_vidi(void)
+{
+ struct vidi_context *ctx = vidi_manager.ctx;
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct platform_device *pdev = to_platform_device(subdrv->dev);
+
+ platform_driver_unregister(&vidi_driver);
+ platform_device_unregister(pdev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 9a6d652a3ef2..aa259b0a873a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -33,13 +33,17 @@
#include <linux/regulator/consumer.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/i2c.h>
+#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/hdmi.h>
+#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
#include "exynos_mixer.h"
#include <linux/gpio.h>
@@ -48,6 +52,8 @@
#define get_hdmi_display(dev) platform_get_drvdata(to_platform_device(dev))
#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
+#define HOTPLUG_DEBOUNCE_MS 1100
+
/* AVI header and aspect ratio */
#define HDMI_AVI_VERSION 0x02
#define HDMI_AVI_LENGTH 0x0D
@@ -66,6 +72,8 @@ enum hdmi_type {
struct hdmi_driver_data {
unsigned int type;
+ const struct hdmiphy_config *phy_confs;
+ unsigned int phy_conf_count;
unsigned int is_apb_phy:1;
};
@@ -74,7 +82,6 @@ struct hdmi_resources {
struct clk *sclk_hdmi;
struct clk *sclk_pixel;
struct clk *sclk_hdmiphy;
- struct clk *hdmiphy;
struct clk *mout_hdmi;
struct regulator_bulk_data *regul_bulk;
int regul_count;
@@ -185,17 +192,23 @@ struct hdmi_context {
void __iomem *regs;
int irq;
+ struct delayed_work hotplug_work;
struct i2c_adapter *ddc_adpt;
struct i2c_client *hdmiphy_port;
/* current hdmiphy conf regs */
+ struct drm_display_mode current_mode;
struct hdmi_conf_regs mode_conf;
struct hdmi_resources res;
int hpd_gpio;
+ void __iomem *regs_hdmiphy;
+ const struct hdmiphy_config *phy_confs;
+ unsigned int phy_conf_count;
+ struct regmap *pmureg;
enum hdmi_type type;
};
@@ -204,14 +217,6 @@ struct hdmiphy_config {
u8 conf[32];
};
-struct hdmi_driver_data exynos4212_hdmi_driver_data = {
- .type = HDMI_TYPE14,
-};
-
-struct hdmi_driver_data exynos5_hdmi_driver_data = {
- .type = HDMI_TYPE14,
-};
-
/* list of phy config settings */
static const struct hdmiphy_config hdmiphy_v13_configs[] = {
{
@@ -319,18 +324,18 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
{
.pixel_clock = 71000000,
.conf = {
- 0x01, 0x91, 0x1e, 0x15, 0x40, 0x3c, 0xce, 0x08,
- 0x04, 0x20, 0xb2, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x01, 0xd1, 0x3b, 0x35, 0x40, 0x0c, 0x04, 0x08,
+ 0x85, 0xa0, 0x63, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xad, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 73250000,
.conf = {
- 0x01, 0xd1, 0x1f, 0x15, 0x40, 0x18, 0xe9, 0x08,
- 0x02, 0xa0, 0xb7, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x01, 0xd1, 0x3d, 0x35, 0x40, 0x18, 0x02, 0x08,
+ 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xa8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
},
},
@@ -362,15 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
},
{
- .pixel_clock = 88750000,
- .conf = {
- 0x01, 0x91, 0x25, 0x17, 0x40, 0x30, 0xfe, 0x08,
- 0x06, 0x20, 0xde, 0xd8, 0x45, 0xa0, 0xac, 0x80,
- 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0x8a, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
- },
- },
- {
.pixel_clock = 106500000,
.conf = {
0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
@@ -391,18 +387,18 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
{
.pixel_clock = 115500000,
.conf = {
- 0x01, 0xd1, 0x30, 0x1a, 0x40, 0x40, 0x10, 0x04,
- 0x04, 0xa0, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80,
- 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x01, 0xd1, 0x30, 0x12, 0x40, 0x40, 0x10, 0x08,
+ 0x80, 0x80, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0xaa, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
},
},
{
.pixel_clock = 119000000,
.conf = {
- 0x01, 0x91, 0x32, 0x14, 0x40, 0x60, 0xd8, 0x08,
- 0x06, 0x20, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
- 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x01, 0xd1, 0x32, 0x1a, 0x40, 0x30, 0xd8, 0x08,
+ 0x04, 0xa0, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
0x54, 0x9d, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
},
},
@@ -426,6 +422,183 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
};
+static const struct hdmiphy_config hdmiphy_5420_configs[] = {
+ {
+ .pixel_clock = 25200000,
+ .conf = {
+ 0x01, 0x52, 0x3F, 0x55, 0x40, 0x01, 0x00, 0xC8,
+ 0x82, 0xC8, 0xBD, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x06, 0x80, 0x01, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xF4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 27000000,
+ .conf = {
+ 0x01, 0xD1, 0x22, 0x51, 0x40, 0x08, 0xFC, 0xE0,
+ 0x98, 0xE8, 0xCB, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x06, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 27027000,
+ .conf = {
+ 0x01, 0xD1, 0x2D, 0x72, 0x40, 0x64, 0x12, 0xC8,
+ 0x43, 0xE8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x26, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 36000000,
+ .conf = {
+ 0x01, 0x51, 0x2D, 0x55, 0x40, 0x40, 0x00, 0xC8,
+ 0x02, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xAB, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 40000000,
+ .conf = {
+ 0x01, 0xD1, 0x21, 0x31, 0x40, 0x3C, 0x28, 0xC8,
+ 0x87, 0xE8, 0xC8, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x9A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 65000000,
+ .conf = {
+ 0x01, 0xD1, 0x36, 0x34, 0x40, 0x0C, 0x04, 0xC8,
+ 0x82, 0xE8, 0x45, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xBD, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 71000000,
+ .conf = {
+ 0x01, 0xD1, 0x3B, 0x35, 0x40, 0x0C, 0x04, 0xC8,
+ 0x85, 0xE8, 0x63, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x57, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 73250000,
+ .conf = {
+ 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x78, 0x8D, 0xC8,
+ 0x81, 0xE8, 0xB7, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xA8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 74176000,
+ .conf = {
+ 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0xC8,
+ 0x81, 0xE8, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 74250000,
+ .conf = {
+ 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08,
+ 0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66,
+ 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 83500000,
+ .conf = {
+ 0x01, 0xD1, 0x23, 0x11, 0x40, 0x0C, 0xFB, 0xC8,
+ 0x85, 0xE8, 0xD1, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x4A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 88750000,
+ .conf = {
+ 0x01, 0xD1, 0x25, 0x11, 0x40, 0x18, 0xFF, 0xC8,
+ 0x83, 0xE8, 0xDE, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x45, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 106500000,
+ .conf = {
+ 0x01, 0xD1, 0x2C, 0x12, 0x40, 0x0C, 0x09, 0xC8,
+ 0x84, 0xE8, 0x0A, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 108000000,
+ .conf = {
+ 0x01, 0x51, 0x2D, 0x15, 0x40, 0x01, 0x00, 0xC8,
+ 0x82, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0xC7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 115500000,
+ .conf = {
+ 0x01, 0xD1, 0x30, 0x14, 0x40, 0x0C, 0x03, 0xC8,
+ 0x88, 0xE8, 0x21, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x6A, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 146250000,
+ .conf = {
+ 0x01, 0xD1, 0x3D, 0x15, 0x40, 0x18, 0xFD, 0xC8,
+ 0x83, 0xE8, 0x6E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66,
+ 0x54, 0x54, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
+ .pixel_clock = 148500000,
+ .conf = {
+ 0x01, 0xD1, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08,
+ 0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66,
+ 0x54, 0x4B, 0x25, 0x03, 0x00, 0x80, 0x01, 0x80,
+ },
+ },
+};
+
+static struct hdmi_driver_data exynos5420_hdmi_driver_data = {
+ .type = HDMI_TYPE14,
+ .phy_confs = hdmiphy_5420_configs,
+ .phy_conf_count = ARRAY_SIZE(hdmiphy_5420_configs),
+ .is_apb_phy = 1,
+};
+
+static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
+ .type = HDMI_TYPE14,
+ .phy_confs = hdmiphy_v14_configs,
+ .phy_conf_count = ARRAY_SIZE(hdmiphy_v14_configs),
+ .is_apb_phy = 0,
+};
+
+static struct hdmi_driver_data exynos5_hdmi_driver_data = {
+ .type = HDMI_TYPE14,
+ .phy_confs = hdmiphy_v13_configs,
+ .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs),
+ .is_apb_phy = 0,
+};
+
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
return readl(hdata->regs + reg_id);
@@ -445,6 +618,48 @@ static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
writel(value, hdata->regs + reg_id);
}
+static int hdmiphy_reg_writeb(struct hdmi_context *hdata,
+ u32 reg_offset, u8 value)
+{
+ if (hdata->hdmiphy_port) {
+ u8 buffer[2];
+ int ret;
+
+ buffer[0] = reg_offset;
+ buffer[1] = value;
+
+ ret = i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+ if (ret == 2)
+ return 0;
+ return ret;
+ } else {
+ writeb(value, hdata->regs_hdmiphy + (reg_offset<<2));
+ return 0;
+ }
+}
+
+static int hdmiphy_reg_write_buf(struct hdmi_context *hdata,
+ u32 reg_offset, const u8 *buf, u32 len)
+{
+ if ((reg_offset + len) > 32)
+ return -EINVAL;
+
+ if (hdata->hdmiphy_port) {
+ int ret;
+
+ ret = i2c_master_send(hdata->hdmiphy_port, buf, len);
+ if (ret == len)
+ return 0;
+ return ret;
+ } else {
+ int i;
+ for (i = 0; i < len; i++)
+ writeb(buf[i], hdata->regs_hdmiphy +
+ ((reg_offset + i)<<2));
+ return 0;
+ }
+}
+
static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix)
{
#define DUMPREG(reg_id) \
@@ -809,6 +1024,8 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
{
struct hdmi_context *hdata = ctx_from_connector(connector);
+ hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
return hdata->hpd ? connector_status_connected :
connector_status_disconnected;
}
@@ -848,20 +1065,10 @@ static int hdmi_get_modes(struct drm_connector *connector)
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
{
- const struct hdmiphy_config *confs;
- int count, i;
-
- if (hdata->type == HDMI_TYPE13) {
- confs = hdmiphy_v13_configs;
- count = ARRAY_SIZE(hdmiphy_v13_configs);
- } else if (hdata->type == HDMI_TYPE14) {
- confs = hdmiphy_v14_configs;
- count = ARRAY_SIZE(hdmiphy_v14_configs);
- } else
- return -EINVAL;
+ int i;
- for (i = 0; i < count; i++)
- if (confs[i].pixel_clock == pixel_clock)
+ for (i = 0; i < hdata->phy_conf_count; i++)
+ if (hdata->phy_confs[i].pixel_clock == pixel_clock)
return i;
DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -928,16 +1135,6 @@ static int hdmi_create_connector(struct exynos_drm_display *display,
return 0;
}
-static int hdmi_initialize(struct exynos_drm_display *display,
- struct drm_device *drm_dev)
-{
- struct hdmi_context *hdata = display->ctx;
-
- hdata->drm_dev = drm_dev;
-
- return 0;
-}
-
static void hdmi_mode_fixup(struct exynos_drm_display *display,
struct drm_connector *connector,
const struct drm_display_mode *mode,
@@ -1136,20 +1333,15 @@ static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK);
}
-static void hdmi_conf_reset(struct hdmi_context *hdata)
+static void hdmi_start(struct hdmi_context *hdata, bool start)
{
- u32 reg;
+ u32 val = start ? HDMI_TG_EN : 0;
- if (hdata->type == HDMI_TYPE13)
- reg = HDMI_V13_CORE_RSTOUT;
- else
- reg = HDMI_CORE_RSTOUT;
+ if (hdata->current_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= HDMI_FIELD_EN;
- /* resetting HDMI core */
- hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT);
- usleep_range(10000, 12000);
- hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
- usleep_range(10000, 12000);
+ hdmi_reg_writemask(hdata, HDMI_CON_0, val, HDMI_EN);
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, val, HDMI_TG_EN | HDMI_FIELD_EN);
}
static void hdmi_conf_init(struct hdmi_context *hdata)
@@ -1163,6 +1355,8 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
/* choose HDMI mode */
hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+ /* Apply Video preable and Guard band in HDMI mode only */
+ hdmi_reg_writeb(hdata, HDMI_CON_2, 0);
/* disable bluescreen */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
@@ -1286,12 +1480,7 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
clk_prepare_enable(hdata->res.sclk_hdmi);
/* enable HDMI and timing generator */
- hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
- if (core->int_pro_mode[0])
- hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
- HDMI_FIELD_EN);
- else
- hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+ hdmi_start(hdata, true);
}
static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
@@ -1453,12 +1642,7 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
clk_prepare_enable(hdata->res.sclk_hdmi);
/* enable HDMI and timing generator */
- hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
- if (core->int_pro_mode[0])
- hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
- HDMI_FIELD_EN);
- else
- hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+ hdmi_start(hdata, true);
}
static void hdmi_mode_apply(struct hdmi_context *hdata)
@@ -1499,32 +1683,51 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
static void hdmiphy_poweron(struct hdmi_context *hdata)
{
- if (hdata->type == HDMI_TYPE14)
- hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
- HDMI_PHY_POWER_OFF_EN);
+ if (hdata->type != HDMI_TYPE14)
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* For PHY Mode Setting */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_ENABLE_MODE_SET);
+ /* Phy Power On */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_POWER,
+ HDMI_PHY_POWER_ON);
+ /* For PHY Mode Setting */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_DISABLE_MODE_SET);
+ /* PHY SW Reset */
+ hdmiphy_conf_reset(hdata);
}
static void hdmiphy_poweroff(struct hdmi_context *hdata)
{
- if (hdata->type == HDMI_TYPE14)
- hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
- HDMI_PHY_POWER_OFF_EN);
+ if (hdata->type != HDMI_TYPE14)
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* PHY SW Reset */
+ hdmiphy_conf_reset(hdata);
+ /* For PHY Mode Setting */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_ENABLE_MODE_SET);
+
+ /* PHY Power Off */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_POWER,
+ HDMI_PHY_POWER_OFF);
+
+ /* For PHY Mode Setting */
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_DISABLE_MODE_SET);
}
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{
- const u8 *hdmiphy_data;
- u8 buffer[32];
- u8 operation[2];
- u8 read_buffer[32] = {0, };
int ret;
int i;
- if (!hdata->hdmiphy_port) {
- DRM_ERROR("hdmiphy is not attached\n");
- return;
- }
-
/* pixel clock */
i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
if (i < 0) {
@@ -1532,39 +1735,21 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
return;
}
- if (hdata->type == HDMI_TYPE13)
- hdmiphy_data = hdmiphy_v13_configs[i].conf;
- else
- hdmiphy_data = hdmiphy_v14_configs[i].conf;
-
- memcpy(buffer, hdmiphy_data, 32);
- ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
- if (ret != 32) {
- DRM_ERROR("failed to configure HDMIPHY via I2C\n");
+ ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32);
+ if (ret) {
+ DRM_ERROR("failed to configure hdmiphy\n");
return;
}
usleep_range(10000, 12000);
- /* operation mode */
- operation[0] = 0x1f;
- operation[1] = 0x80;
-
- ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
- if (ret != 2) {
+ ret = hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_DISABLE_MODE_SET);
+ if (ret) {
DRM_ERROR("failed to enable hdmiphy\n");
return;
}
- ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
- if (ret < 0) {
- DRM_ERROR("failed to read hdmiphy config\n");
- return;
- }
-
- for (i = 0; i < ret; i++)
- DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
- "recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
}
static void hdmi_conf_apply(struct hdmi_context *hdata)
@@ -1573,7 +1758,7 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmiphy_conf_apply(hdata);
mutex_lock(&hdata->hdmi_mutex);
- hdmi_conf_reset(hdata);
+ hdmi_start(hdata, false);
hdmi_conf_init(hdata);
mutex_unlock(&hdata->hdmi_mutex);
@@ -1814,6 +1999,9 @@ static void hdmi_mode_set(struct exynos_drm_display *display,
m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
"INTERLACED" : "PROGERESSIVE");
+ /* preserve mode information for later use. */
+ drm_mode_copy(&hdata->current_mode, mode);
+
if (hdata->type == HDMI_TYPE13)
hdmi_v13_mode_set(hdata, mode);
else
@@ -1854,7 +2042,10 @@ static void hdmi_poweron(struct exynos_drm_display *display)
if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
DRM_DEBUG_KMS("failed to enable regulator bulk\n");
- clk_prepare_enable(res->hdmiphy);
+ /* set pmu hdmiphy control bit to enable hdmiphy */
+ regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
+ PMU_HDMI_PHY_ENABLE_BIT, 1);
+
clk_prepare_enable(res->hdmi);
clk_prepare_enable(res->sclk_hdmi);
@@ -1872,16 +2063,20 @@ static void hdmi_poweroff(struct exynos_drm_display *display)
goto out;
mutex_unlock(&hdata->hdmi_mutex);
- /*
- * The TV power domain needs any condition of hdmiphy to turn off and
- * its reset state seems to meet the condition.
- */
- hdmiphy_conf_reset(hdata);
+ /* HDMI System Disable */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
+
hdmiphy_poweroff(hdata);
+ cancel_delayed_work(&hdata->hotplug_work);
+
clk_disable_unprepare(res->sclk_hdmi);
clk_disable_unprepare(res->hdmi);
- clk_disable_unprepare(res->hdmiphy);
+
+ /* reset pmu hdmiphy control bit to disable hdmiphy */
+ regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
+ PMU_HDMI_PHY_ENABLE_BIT, 0);
+
regulator_bulk_disable(res->regul_count, res->regul_bulk);
pm_runtime_put_sync(hdata->dev);
@@ -1895,6 +2090,11 @@ out:
static void hdmi_dpms(struct exynos_drm_display *display, int mode)
{
+ struct hdmi_context *hdata = display->ctx;
+ struct drm_encoder *encoder = hdata->encoder;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc_helper_funcs *funcs = NULL;
+
DRM_DEBUG_KMS("mode %d\n", mode);
switch (mode) {
@@ -1904,6 +2104,20 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+ /*
+ * The SFRs of VP and Mixer are updated by Vertical Sync of
+ * Timing generator which is a part of HDMI so the sequence
+ * to disable TV Subsystem should be as following,
+ * VP -> Mixer -> HDMI
+ *
+ * Below codes will try to disable Mixer and VP(if used)
+ * prior to disabling HDMI.
+ */
+ if (crtc)
+ funcs = crtc->helper_private;
+ if (funcs && funcs->dpms)
+ (*funcs->dpms)(crtc, mode);
+
hdmi_poweroff(display);
break;
default:
@@ -1913,7 +2127,6 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
}
static struct exynos_drm_display_ops hdmi_display_ops = {
- .initialize = hdmi_initialize,
.create_connector = hdmi_create_connector,
.mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
@@ -1926,9 +2139,11 @@ static struct exynos_drm_display hdmi_display = {
.ops = &hdmi_display_ops,
};
-static irqreturn_t hdmi_irq_thread(int irq, void *arg)
+static void hdmi_hotplug_work_func(struct work_struct *work)
{
- struct hdmi_context *hdata = arg;
+ struct hdmi_context *hdata;
+
+ hdata = container_of(work, struct hdmi_context, hotplug_work.work);
mutex_lock(&hdata->hdmi_mutex);
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
@@ -1936,6 +2151,14 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
if (hdata->drm_dev)
drm_helper_hpd_irq_event(hdata->drm_dev);
+}
+
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
+{
+ struct hdmi_context *hdata = arg;
+
+ mod_delayed_work(system_wq, &hdata->hotplug_work,
+ msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
return IRQ_HANDLED;
}
@@ -1954,37 +2177,35 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
DRM_DEBUG_KMS("HDMI resource init\n");
- memset(res, 0, sizeof(*res));
-
/* get clocks, power */
res->hdmi = devm_clk_get(dev, "hdmi");
if (IS_ERR(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n");
+ ret = PTR_ERR(res->hdmi);
goto fail;
}
res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
+ ret = PTR_ERR(res->sclk_hdmi);
goto fail;
}
res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
if (IS_ERR(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n");
+ ret = PTR_ERR(res->sclk_pixel);
goto fail;
}
res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
if (IS_ERR(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
- goto fail;
- }
- res->hdmiphy = devm_clk_get(dev, "hdmiphy");
- if (IS_ERR(res->hdmiphy)) {
- DRM_ERROR("failed to get clock 'hdmiphy'\n");
+ ret = PTR_ERR(res->sclk_hdmiphy);
goto fail;
}
res->mout_hdmi = devm_clk_get(dev, "mout_hdmi");
if (IS_ERR(res->mout_hdmi)) {
DRM_ERROR("failed to get clock 'mout_hdmi'\n");
+ ret = PTR_ERR(res->mout_hdmi);
goto fail;
}
@@ -1992,8 +2213,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
- if (!res->regul_bulk)
+ if (!res->regul_bulk) {
+ ret = -ENOMEM;
goto fail;
+ }
for (i = 0; i < ARRAY_SIZE(supply); ++i) {
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
@@ -2001,14 +2224,14 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) {
DRM_ERROR("failed to get regulators\n");
- goto fail;
+ return ret;
}
res->regul_count = ARRAY_SIZE(supply);
- return 0;
+ return ret;
fail:
DRM_ERROR("HDMI resource init - failed\n");
- return -ENODEV;
+ return ret;
}
static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
@@ -2043,42 +2266,105 @@ static struct of_device_id hdmi_match_types[] = {
.compatible = "samsung,exynos4212-hdmi",
.data = &exynos4212_hdmi_driver_data,
}, {
+ .compatible = "samsung,exynos5420-hdmi",
+ .data = &exynos5420_hdmi_driver_data,
+ }, {
/* end node */
}
};
+static int hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm_dev = data;
+ struct hdmi_context *hdata;
+
+ hdata = hdmi_display.ctx;
+ hdata->drm_dev = drm_dev;
+
+ return exynos_drm_create_enc_conn(drm_dev, &hdmi_display);
+}
+
+static void hdmi_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct exynos_drm_display *display = get_hdmi_display(dev);
+ struct drm_encoder *encoder = display->encoder;
+ struct hdmi_context *hdata = display->ctx;
+
+ encoder->funcs->destroy(encoder);
+ drm_connector_cleanup(&hdata->connector);
+}
+
+static const struct component_ops hdmi_component_ops = {
+ .bind = hdmi_bind,
+ .unbind = hdmi_unbind,
+};
+
+static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev)
+{
+ const char *compatible_str = "samsung,exynos4210-hdmiddc";
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, compatible_str);
+ if (np)
+ return of_get_next_parent(np);
+
+ return NULL;
+}
+
+static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
+{
+ const char *compatible_str = "samsung,exynos4212-hdmiphy";
+
+ return of_find_compatible_node(NULL, NULL, compatible_str);
+}
+
static int hdmi_probe(struct platform_device *pdev)
{
+ struct device_node *ddc_node, *phy_node;
+ struct s5p_hdmi_platform_data *pdata;
+ struct hdmi_driver_data *drv_data;
+ const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
- struct s5p_hdmi_platform_data *pdata;
struct resource *res;
- const struct of_device_id *match;
- struct device_node *ddc_node, *phy_node;
- struct hdmi_driver_data *drv_data;
int ret;
- if (!dev->of_node)
- return -ENODEV;
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ hdmi_display.type);
+ if (ret)
+ return ret;
+
+ if (!dev->of_node) {
+ ret = -ENODEV;
+ goto err_del_component;
+ }
pdata = drm_hdmi_dt_parse_pdata(dev);
- if (!pdata)
- return -EINVAL;
+ if (!pdata) {
+ ret = -EINVAL;
+ goto err_del_component;
+ }
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
- if (!hdata)
- return -ENOMEM;
+ if (!hdata) {
+ ret = -ENOMEM;
+ goto err_del_component;
+ }
mutex_init(&hdata->hdmi_mutex);
platform_set_drvdata(pdev, &hdmi_display);
match = of_match_node(hdmi_match_types, dev->of_node);
- if (!match)
- return -ENODEV;
+ if (!match) {
+ ret = -ENODEV;
+ goto err_del_component;
+ }
drv_data = (struct hdmi_driver_data *)match->data;
hdata->type = drv_data->type;
+ hdata->phy_confs = drv_data->phy_confs;
+ hdata->phy_conf_count = drv_data->phy_conf_count;
hdata->hpd_gpio = pdata->hpd_gpio;
hdata->dev = dev;
@@ -2086,35 +2372,44 @@ static int hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
DRM_ERROR("hdmi_resources_init failed\n");
- return -EINVAL;
+ return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdata->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(hdata->regs))
- return PTR_ERR(hdata->regs);
+ if (IS_ERR(hdata->regs)) {
+ ret = PTR_ERR(hdata->regs);
+ goto err_del_component;
+ }
ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
- return ret;
+ goto err_del_component;
}
+ ddc_node = hdmi_legacy_ddc_dt_binding(dev);
+ if (ddc_node)
+ goto out_get_ddc_adpt;
+
/* DDC i2c driver */
ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
if (!ddc_node) {
DRM_ERROR("Failed to find ddc node in device tree\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_del_component;
}
+
+out_get_ddc_adpt:
hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
if (!hdata->ddc_adpt) {
DRM_ERROR("Failed to get ddc i2c adapter by node\n");
- return -ENODEV;
+ return -EPROBE_DEFER;
}
- /* Not support APB PHY yet. */
- if (drv_data->is_apb_phy)
- return -EPERM;
+ phy_node = hdmi_legacy_phy_dt_binding(dev);
+ if (phy_node)
+ goto out_get_phy_port;
/* hdmiphy i2c driver */
phy_node = of_parse_phandle(dev->of_node, "phy", 0);
@@ -2123,11 +2418,22 @@ static int hdmi_probe(struct platform_device *pdev)
ret = -ENODEV;
goto err_ddc;
}
- hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node);
- if (!hdata->hdmiphy_port) {
- DRM_ERROR("Failed to get hdmi phy i2c client from node\n");
- ret = -ENODEV;
- goto err_ddc;
+
+out_get_phy_port:
+ if (drv_data->is_apb_phy) {
+ hdata->regs_hdmiphy = of_iomap(phy_node, 0);
+ if (!hdata->regs_hdmiphy) {
+ DRM_ERROR("failed to ioremap hdmi phy\n");
+ ret = -ENOMEM;
+ goto err_ddc;
+ }
+ } else {
+ hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node);
+ if (!hdata->hdmiphy_port) {
+ DRM_ERROR("Failed to get hdmi phy i2c client\n");
+ ret = -EPROBE_DEFER;
+ goto err_ddc;
+ }
}
hdata->irq = gpio_to_irq(hdata->hpd_gpio);
@@ -2139,6 +2445,8 @@ static int hdmi_probe(struct platform_device *pdev)
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+ INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
+
ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
hdmi_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
@@ -2148,30 +2456,51 @@ static int hdmi_probe(struct platform_device *pdev)
goto err_hdmiphy;
}
- pm_runtime_enable(dev);
+ hdata->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(hdata->pmureg)) {
+ DRM_ERROR("syscon regmap lookup failed.\n");
+ ret = -EPROBE_DEFER;
+ goto err_hdmiphy;
+ }
+ pm_runtime_enable(dev);
hdmi_display.ctx = hdata;
- exynos_drm_display_register(&hdmi_display);
- return 0;
+ ret = component_add(&pdev->dev, &hdmi_component_ops);
+ if (ret)
+ goto err_disable_pm_runtime;
+
+ return ret;
+
+err_disable_pm_runtime:
+ pm_runtime_disable(dev);
err_hdmiphy:
- put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->hdmiphy_port)
+ put_device(&hdata->hdmiphy_port->dev);
err_ddc:
put_device(&hdata->ddc_adpt->dev);
+
+err_del_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+
return ret;
}
static int hdmi_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct exynos_drm_display *display = get_hdmi_display(dev);
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = hdmi_display.ctx;
+
+ cancel_delayed_work_sync(&hdata->hotplug_work);
put_device(&hdata->hdmiphy_port->dev);
put_device(&hdata->ddc_adpt->dev);
+
pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &hdmi_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
deleted file mode 100644
index 0ddf3957de15..000000000000
--- a/drivers/gpu/drm/exynos/exynos_hdmi.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- * Inki Dae <inki.dae@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_HDMI_H_
-#define _EXYNOS_HDMI_H_
-
-void hdmi_attach_ddc_client(struct i2c_client *ddc);
-void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
-
-extern struct i2c_driver hdmiphy_driver;
-extern struct i2c_driver ddc_driver;
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
deleted file mode 100644
index 59abb1494ceb..000000000000
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * Authors:
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- * Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <drm/drmP.h>
-
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/of.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_hdmi.h"
-
-
-static int hdmiphy_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- hdmi_attach_hdmiphy_client(client);
-
- dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
- "into i2c adapter successfully\n");
-
- return 0;
-}
-
-static int hdmiphy_remove(struct i2c_client *client)
-{
- dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
- "from i2c adapter successfully\n");
-
- return 0;
-}
-
-static struct of_device_id hdmiphy_match_types[] = {
- {
- .compatible = "samsung,exynos5-hdmiphy",
- }, {
- .compatible = "samsung,exynos4210-hdmiphy",
- }, {
- .compatible = "samsung,exynos4212-hdmiphy",
- }, {
- /* end node */
- }
-};
-
-struct i2c_driver hdmiphy_driver = {
- .driver = {
- .name = "exynos-hdmiphy",
- .owner = THIS_MODULE,
- .of_match_table = hdmiphy_match_types,
- },
- .probe = hdmiphy_probe,
- .remove = hdmiphy_remove,
- .command = NULL,
-};
-EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index ce288818d2c0..7529946d0a74 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -31,6 +31,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
+#include <linux/component.h>
#include <drm/exynos_drm.h>
@@ -376,6 +377,20 @@ static void mixer_run(struct mixer_context *ctx)
mixer_regs_dump(ctx);
}
+static void mixer_stop(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ int timeout = 20;
+
+ mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+
+ while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
+ --timeout)
+ usleep_range(10000, 12000);
+
+ mixer_regs_dump(ctx);
+}
+
static void vp_video_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
@@ -496,13 +511,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
static void mixer_layer_update(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
- u32 val;
- val = mixer_reg_read(res, MXR_CFG);
-
- /* allow one update per vsync only */
- if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
- mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+ mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
@@ -830,13 +840,15 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
}
static int mixer_initialize(struct exynos_drm_manager *mgr,
- struct drm_device *drm_dev, int pipe)
+ struct drm_device *drm_dev)
{
int ret;
struct mixer_context *mixer_ctx = mgr->ctx;
+ struct exynos_drm_private *priv;
+ priv = drm_dev->dev_private;
- mixer_ctx->drm_dev = drm_dev;
- mixer_ctx->pipe = pipe;
+ mgr->drm_dev = mixer_ctx->drm_dev = drm_dev;
+ mgr->pipe = mixer_ctx->pipe = priv->pipe++;
/* acquire resources: regs, irqs, clocks */
ret = mixer_resources_init(mixer_ctx);
@@ -1007,6 +1019,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
}
mutex_unlock(&mixer_ctx->mixer_mutex);
+ drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+
atomic_set(&mixer_ctx->wait_vsync_event, 1);
/*
@@ -1017,6 +1031,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
!atomic_read(&mixer_ctx->wait_vsync_event),
HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
+
+ drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
}
static void mixer_window_suspend(struct exynos_drm_manager *mgr)
@@ -1058,7 +1074,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
mutex_unlock(&ctx->mixer_mutex);
return;
}
- ctx->powered = true;
+
mutex_unlock(&ctx->mixer_mutex);
pm_runtime_get_sync(ctx->dev);
@@ -1069,6 +1085,12 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
clk_prepare_enable(res->sclk_mixer);
}
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
@@ -1081,14 +1103,21 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered)
- goto out;
+ if (!ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
mutex_unlock(&ctx->mixer_mutex);
+ mixer_stop(ctx);
mixer_window_suspend(mgr);
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+ mutex_unlock(&ctx->mixer_mutex);
+
clk_disable_unprepare(res->mixer);
if (ctx->vp_enabled) {
clk_disable_unprepare(res->vp);
@@ -1096,12 +1125,6 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
}
pm_runtime_put_sync(ctx->dev);
-
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
-
-out:
- mutex_unlock(&ctx->mixer_mutex);
}
static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
@@ -1142,8 +1165,6 @@ int mixer_check_mode(struct drm_display_mode *mode)
}
static struct exynos_drm_manager_ops mixer_manager_ops = {
- .initialize = mixer_initialize,
- .remove = mixer_mgr_remove,
.dpms = mixer_dpms,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
@@ -1200,11 +1221,13 @@ static struct of_device_id mixer_match_types[] = {
}
};
-static int mixer_probe(struct platform_device *pdev)
+static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
- struct device *dev = &pdev->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm_dev = data;
struct mixer_context *ctx;
struct mixer_drv_data *drv;
+ int ret;
dev_info(dev, "probe start\n");
@@ -1233,19 +1256,61 @@ static int mixer_probe(struct platform_device *pdev)
atomic_set(&ctx->wait_vsync_event, 0);
mixer_manager.ctx = ctx;
+ ret = mixer_initialize(&mixer_manager, drm_dev);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, &mixer_manager);
- exynos_drm_manager_register(&mixer_manager);
+ ret = exynos_drm_crtc_create(&mixer_manager);
+ if (ret) {
+ mixer_mgr_remove(&mixer_manager);
+ return ret;
+ }
pm_runtime_enable(dev);
return 0;
}
-static int mixer_remove(struct platform_device *pdev)
+static void mixer_unbind(struct device *dev, struct device *master, void *data)
{
- dev_info(&pdev->dev, "remove successful\n");
+ struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
+ struct drm_crtc *crtc = mgr->crtc;
+
+ dev_info(dev, "remove successful\n");
- pm_runtime_disable(&pdev->dev);
+ mixer_mgr_remove(mgr);
+
+ pm_runtime_disable(dev);
+
+ crtc->funcs->destroy(crtc);
+}
+
+static const struct component_ops mixer_component_ops = {
+ .bind = mixer_bind,
+ .unbind = mixer_unbind,
+};
+
+static int mixer_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
+ mixer_manager.type);
+ if (ret)
+ return ret;
+
+ ret = component_add(&pdev->dev, &mixer_component_ops);
+ if (ret)
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
+ return ret;
+}
+
+static int mixer_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mixer_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index ef1b3eb3ba6e..3f35ac6d8a47 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -578,4 +578,20 @@
#define HDMI_TG_VACT_ST4_H HDMI_TG_BASE(0x0074)
#define HDMI_TG_3D HDMI_TG_BASE(0x00F0)
+/* HDMI PHY Registers Offsets*/
+#define HDMIPHY_POWER (0x74 >> 2)
+#define HDMIPHY_MODE_SET_DONE (0x7c >> 2)
+
+/* HDMI PHY Values */
+#define HDMI_PHY_POWER_ON 0x80
+#define HDMI_PHY_POWER_OFF 0xff
+
+/* HDMI PHY Values */
+#define HDMI_PHY_DISABLE_MODE_SET 0x80
+#define HDMI_PHY_ENABLE_MODE_SET 0x00
+
+/* PMU Registers for PHY */
+#define PMU_HDMI_PHY_CONTROL 0x700
+#define PMU_HDMI_PHY_ENABLE_BIT BIT(0)
+
#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 4537026bc385..5f32e1a29411 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -78,6 +78,7 @@
#define MXR_STATUS_BIG_ENDIAN (1 << 3)
#define MXR_STATUS_ENDIAN_MASK (1 << 3)
#define MXR_STATUS_SYNC_ENABLE (1 << 2)
+#define MXR_STATUS_REG_IDLE (1 << 1)
#define MXR_STATUS_REG_RUN (1 << 0)
/* bits for MXR_CFG */
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 489ffd2c66e5..87885d8c06e8 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -148,7 +148,7 @@ static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
break;
case BIT(14):
/*wait for all fifo empty*/
- /*wait_for_all_fifos_empty(sender)*/;
+ /*wait_for_all_fifos_empty(sender)*/
break;
case BIT(15):
dev_dbg(sender->dev->dev, "No Action required\n");
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index b686e56646eb..6e8fe9ec02b5 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -112,11 +112,9 @@ static void psb_driver_lastclose(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_fbdev *fbdev = dev_priv->fbdev;
- drm_modeset_lock_all(dev);
- ret = drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->psb_fb_helper);
if (ret)
DRM_DEBUG("failed to restore crtc mode\n");
- drm_modeset_unlock_all(dev);
return;
}
@@ -354,7 +352,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- drm_irq_install(dev);
+ drm_irq_install(dev, dev->pdev->irq);
dev->vblank_disable_allowed = true;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -479,7 +477,7 @@ static struct drm_driver driver = {
.lastclose = psb_driver_lastclose,
.preclose = psb_driver_preclose,
- .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+ .num_ioctls = ARRAY_SIZE(psb_ioctls),
.device_is_agp = psb_driver_device_is_agp,
.irq_preinstall = psb_irq_preinstall,
.irq_postinstall = psb_irq_postinstall,
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 48af5cac1902..ac357b02bd35 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -568,11 +568,11 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
{
- uint8_t sum = 0;
+ int sum = 0;
while (bytes--)
- sum += *buf++;
- return (255 - sum) + 1;
+ sum -= *buf++;
+ return sum;
}
#define HB(x) (x)
@@ -810,6 +810,12 @@ static int
tda998x_encoder_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
+ if (mode->clock > 150000)
+ return MODE_CLOCK_HIGH;
+ if (mode->htotal >= BIT(13))
+ return MODE_BAD_HVALUE;
+ if (mode->vtotal >= BIT(11))
+ return MODE_BAD_VVALUE;
return MODE_OK;
}
@@ -1048,8 +1054,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
return i;
}
} else {
- for (i = 10; i > 0; i--) {
- msleep(10);
+ for (i = 100; i > 0; i--) {
+ msleep(1);
ret = reg_read(priv, REG_INT_FLAGS_2);
if (ret < 0)
return ret;
@@ -1183,7 +1189,6 @@ static void
tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
- drm_i2c_encoder_destroy(encoder);
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
@@ -1193,6 +1198,7 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
if (priv->cec)
i2c_unregister_device(priv->cec);
+ drm_i2c_encoder_destroy(encoder);
kfree(priv);
}
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index aeace37415aa..e88bac1d781f 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1251,7 +1251,7 @@ const struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
-int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
+int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
/**
* Determine if the device really is AGP or not.
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index bea2d67196fb..437e1824d0bf 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -5,6 +5,7 @@ config DRM_I915
depends on (AGP || AGP=n)
select INTEL_GTT
select AGP_INTEL if AGP
+ select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
@@ -71,7 +72,7 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
config DRM_I915_UMS
bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
- depends on DRM_I915
+ depends on DRM_I915 && BROKEN
default n
help
Choose this option if you still need userspace modesetting.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b1445b73465b..cad1683d8bb5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -18,6 +18,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# GEM code
i915-y += i915_cmd_parser.o \
i915_gem_context.o \
+ i915_gem_render_state.o \
i915_gem_debug.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
@@ -26,12 +27,18 @@ i915-y += i915_cmd_parser.o \
i915_gem.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_gem_userptr.o \
i915_gpu_error.o \
i915_irq.o \
i915_trace_points.o \
intel_ringbuffer.o \
intel_uncore.o
+# autogenerated null render state
+i915-y += intel_renderstate_gen6.o \
+ intel_renderstate_gen7.o \
+ intel_renderstate_gen8.o
+
# modesetting core code
i915-y += intel_bios.o \
intel_display.o \
@@ -55,6 +62,7 @@ i915-y += dvo_ch7017.o \
intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
+ intel_dsi_panel_vbt.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a0f5bdd69491..80449f475960 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -160,7 +160,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
- };
+ }
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0f1865d7d4d8..0f2587ff347c 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -195,7 +195,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
if (i2c_transfer(adapter, msgs, 3) == 3) {
*data = (in_buf[1] << 8) | in_buf[0];
return true;
- };
+ }
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 8155ded79079..74f2af7c2d3e 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -121,7 +121,7 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
- };
+ }
if (!ns->quiet) {
DRM_DEBUG_KMS
@@ -233,9 +233,8 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
DRM_DEBUG_KMS
- ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
- __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
- mode->vtotal);
+ ("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+ mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
/*
* Currently, these are all the modes I have data from.
@@ -261,9 +260,8 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
DRM_DEBUG_KMS
- ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
- __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
- mode->vtotal);
+ ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+ mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
/*
* Where do I find the native resolution for which scaling is not required???
@@ -277,8 +275,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
/* mode 277 */
ns->reg_8_shadow &= ~NS2501_8_BPAS;
- DRM_DEBUG_KMS("%s: switching to 800x600\n",
- __FUNCTION__);
+ DRM_DEBUG_KMS("switching to 800x600\n");
/*
* No, I do not know where this data comes from.
@@ -341,8 +338,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
/* mode 274 */
- DRM_DEBUG_KMS("%s: switching to 640x480\n",
- __FUNCTION__);
+ DRM_DEBUG_KMS("switching to 640x480\n");
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
@@ -406,8 +402,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
/* mode 280 */
- DRM_DEBUG_KMS("%s: switching to 1024x768\n",
- __FUNCTION__);
+ DRM_DEBUG_KMS("switching to 1024x768\n");
/*
* This might or might not work, actually. I'm silently
* assuming here that the native panel resolution is
@@ -458,8 +453,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
unsigned char ch;
- DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
- __FUNCTION__, enable);
+ DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
ch = ns->reg_8_shadow;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 7b3e9e936200..fa0114967076 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -93,7 +93,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
- };
+ }
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 12ea4b164692..7853719a0e81 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -118,7 +118,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
- };
+ }
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 4cf6d020d513..9d7954366bd2 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -28,7 +28,7 @@
#include "i915_drv.h"
/**
- * DOC: i915 batch buffer command parser
+ * DOC: batch buffer command parser
*
* Motivation:
* Certain OpenGL features (e.g. transform feedback, performance monitoring)
@@ -86,6 +86,367 @@
* general bitmasking mechanism.
*/
+#define STD_MI_OPCODE_MASK 0xFF800000
+#define STD_3D_OPCODE_MASK 0xFFFF0000
+#define STD_2D_OPCODE_MASK 0xFFC00000
+#define STD_MFX_OPCODE_MASK 0xFFFF0000
+
+#define CMD(op, opm, f, lm, fl, ...) \
+ { \
+ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
+ .cmd = { (op), (opm) }, \
+ .length = { (lm) }, \
+ __VA_ARGS__ \
+ }
+
+/* Convenience macros to compress the tables */
+#define SMI STD_MI_OPCODE_MASK
+#define S3D STD_3D_OPCODE_MASK
+#define S2D STD_2D_OPCODE_MASK
+#define SMFX STD_MFX_OPCODE_MASK
+#define F true
+#define S CMD_DESC_SKIP
+#define R CMD_DESC_REJECT
+#define W CMD_DESC_REGISTER
+#define B CMD_DESC_BITMASK
+#define M CMD_DESC_MASTER
+
+/* Command Mask Fixed Len Action
+ ---------------------------------------------------------- */
+static const struct drm_i915_cmd_descriptor common_cmds[] = {
+ CMD( MI_NOOP, SMI, F, 1, S ),
+ CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
+ CMD( MI_ARB_CHECK, SMI, F, 1, S ),
+ CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
+ CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
+ CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
+ CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
+ CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
+ .reg = { .offset = 1, .mask = 0x007FFFFC },
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
+ .reg = { .offset = 1, .mask = 0x007FFFFC },
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
+};
+
+static const struct drm_i915_cmd_descriptor render_cmds[] = {
+ CMD( MI_FLUSH, SMI, F, 1, S ),
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_PREDICATE, SMI, F, 1, S ),
+ CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
+ CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
+ CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
+ CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
+ CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 1,
+ .mask = MI_REPORT_PERF_COUNT_GGTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
+ CMD( PIPELINE_SELECT, S3D, F, 1, S ),
+ CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
+ .bits = {{
+ .offset = 2,
+ .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
+ .expected = 0,
+ }}, ),
+ CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
+ CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
+ CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
+ .bits = {{
+ .offset = 1,
+ .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
+ .expected = 0,
+ },
+ {
+ .offset = 1,
+ .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX),
+ .expected = 0,
+ .condition_offset = 1,
+ .condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
+ }}, ),
+};
+
+static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
+ CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
+ CMD( MI_RS_CONTROL, SMI, F, 1, S ),
+ CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
+ CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
+ CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
+ CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
+ CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
+ CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
+ CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
+
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
+ CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
+};
+
+static const struct drm_i915_cmd_descriptor video_cmds[] = {
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_FLUSH_DW_NOTIFY,
+ .expected = 0,
+ },
+ {
+ .offset = 1,
+ .mask = MI_FLUSH_DW_USE_GTT,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ },
+ {
+ .offset = 0,
+ .mask = MI_FLUSH_DW_STORE_INDEX,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ }}, ),
+ CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ /*
+ * MFX_WAIT doesn't fit the way we handle length for most commands.
+ * It has a length field but it uses a non-standard length bias.
+ * It is always 1 dword though, so just treat it as fixed length.
+ */
+ CMD( MFX_WAIT, SMFX, F, 1, S ),
+};
+
+static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_FLUSH_DW_NOTIFY,
+ .expected = 0,
+ },
+ {
+ .offset = 1,
+ .mask = MI_FLUSH_DW_USE_GTT,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ },
+ {
+ .offset = 0,
+ .mask = MI_FLUSH_DW_STORE_INDEX,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ }}, ),
+ CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+};
+
+static const struct drm_i915_cmd_descriptor blt_cmds[] = {
+ CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_GLOBAL_GTT,
+ .expected = 0,
+ }}, ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_FLUSH_DW_NOTIFY,
+ .expected = 0,
+ },
+ {
+ .offset = 1,
+ .mask = MI_FLUSH_DW_USE_GTT,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ },
+ {
+ .offset = 0,
+ .mask = MI_FLUSH_DW_STORE_INDEX,
+ .expected = 0,
+ .condition_offset = 0,
+ .condition_mask = MI_FLUSH_DW_OP_MASK,
+ }}, ),
+ CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
+ CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
+};
+
+static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
+};
+
+#undef CMD
+#undef SMI
+#undef S3D
+#undef S2D
+#undef SMFX
+#undef F
+#undef S
+#undef R
+#undef W
+#undef B
+#undef M
+
+static const struct drm_i915_cmd_table gen7_render_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { render_cmds, ARRAY_SIZE(render_cmds) },
+};
+
+static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { render_cmds, ARRAY_SIZE(render_cmds) },
+ { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
+};
+
+static const struct drm_i915_cmd_table gen7_video_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { video_cmds, ARRAY_SIZE(video_cmds) },
+};
+
+static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
+};
+
+static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { blt_cmds, ARRAY_SIZE(blt_cmds) },
+};
+
+static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
+ { common_cmds, ARRAY_SIZE(common_cmds) },
+ { blt_cmds, ARRAY_SIZE(blt_cmds) },
+ { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
+};
+
+/*
+ * Register whitelists, sorted by increasing register offset.
+ *
+ * Some registers that userspace accesses are 64 bits. The register
+ * access commands only allow 32-bit accesses. Hence, we have to include
+ * entries for both halves of the 64-bit registers.
+ */
+
+/* Convenience macro for adding 64-bit registers */
+#define REG64(addr) (addr), (addr + sizeof(u32))
+
+static const u32 gen7_render_regs[] = {
+ REG64(HS_INVOCATION_COUNT),
+ REG64(DS_INVOCATION_COUNT),
+ REG64(IA_VERTICES_COUNT),
+ REG64(IA_PRIMITIVES_COUNT),
+ REG64(VS_INVOCATION_COUNT),
+ REG64(GS_INVOCATION_COUNT),
+ REG64(GS_PRIMITIVES_COUNT),
+ REG64(CL_INVOCATION_COUNT),
+ REG64(CL_PRIMITIVES_COUNT),
+ REG64(PS_INVOCATION_COUNT),
+ REG64(PS_DEPTH_COUNT),
+ OACONTROL, /* Only allowed for LRI and SRM. See below. */
+ GEN7_3DPRIM_END_OFFSET,
+ GEN7_3DPRIM_START_VERTEX,
+ GEN7_3DPRIM_VERTEX_COUNT,
+ GEN7_3DPRIM_INSTANCE_COUNT,
+ GEN7_3DPRIM_START_INSTANCE,
+ GEN7_3DPRIM_BASE_VERTEX,
+ REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
+ REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
+ REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
+ REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
+ REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
+ REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
+ REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
+ REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
+ GEN7_SO_WRITE_OFFSET(0),
+ GEN7_SO_WRITE_OFFSET(1),
+ GEN7_SO_WRITE_OFFSET(2),
+ GEN7_SO_WRITE_OFFSET(3),
+};
+
+static const u32 gen7_blt_regs[] = {
+ BCS_SWCTRL,
+};
+
+static const u32 ivb_master_regs[] = {
+ FORCEWAKE_MT,
+ DERRMR,
+ GEN7_PIPE_DE_LOAD_SL(PIPE_A),
+ GEN7_PIPE_DE_LOAD_SL(PIPE_B),
+ GEN7_PIPE_DE_LOAD_SL(PIPE_C),
+};
+
+static const u32 hsw_master_regs[] = {
+ FORCEWAKE_MT,
+ DERRMR,
+};
+
+#undef REG64
+
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
@@ -137,15 +498,18 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
-static void validate_cmds_sorted(struct intel_ring_buffer *ring)
+static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+ const struct drm_i915_cmd_table *cmd_tables,
+ int cmd_table_count)
{
int i;
+ bool ret = true;
- if (!ring->cmd_tables || ring->cmd_table_count == 0)
- return;
+ if (!cmd_tables || cmd_table_count == 0)
+ return true;
- for (i = 0; i < ring->cmd_table_count; i++) {
- const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
+ for (i = 0; i < cmd_table_count; i++) {
+ const struct drm_i915_cmd_table *table = &cmd_tables[i];
u32 previous = 0;
int j;
@@ -154,35 +518,107 @@ static void validate_cmds_sorted(struct intel_ring_buffer *ring)
&table->table[i];
u32 curr = desc->cmd.value & desc->cmd.mask;
- if (curr < previous)
+ if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
ring->id, i, j, curr, previous);
+ ret = false;
+ }
previous = curr;
}
}
+
+ return ret;
}
-static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
+static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
{
int i;
u32 previous = 0;
+ bool ret = true;
for (i = 0; i < reg_count; i++) {
u32 curr = reg_table[i];
- if (curr < previous)
+ if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
ring_id, i, curr, previous);
+ ret = false;
+ }
previous = curr;
}
+
+ return ret;
+}
+
+static bool validate_regs_sorted(struct intel_engine_cs *ring)
+{
+ return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
+ check_sorted(ring->id, ring->master_reg_table,
+ ring->master_reg_count);
+}
+
+struct cmd_node {
+ const struct drm_i915_cmd_descriptor *desc;
+ struct hlist_node node;
+};
+
+/*
+ * Different command ranges have different numbers of bits for the opcode. For
+ * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
+ * problem is that, for example, MI commands use bits 22:16 for other fields
+ * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
+ * we mask a command from a batch it could hash to the wrong bucket due to
+ * non-opcode bits being set. But if we don't include those bits, some 3D
+ * commands may hash to the same bucket due to not including opcode bits that
+ * make the command unique. For now, we will risk hashing to the same bucket.
+ *
+ * If we attempt to generate a perfect hash, we should be able to look at bits
+ * 31:29 of a command from a batch buffer and use the full mask for that
+ * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
+ */
+#define CMD_HASH_MASK STD_MI_OPCODE_MASK
+
+static int init_hash_table(struct intel_engine_cs *ring,
+ const struct drm_i915_cmd_table *cmd_tables,
+ int cmd_table_count)
+{
+ int i, j;
+
+ hash_init(ring->cmd_hash);
+
+ for (i = 0; i < cmd_table_count; i++) {
+ const struct drm_i915_cmd_table *table = &cmd_tables[i];
+
+ for (j = 0; j < table->count; j++) {
+ const struct drm_i915_cmd_descriptor *desc =
+ &table->table[j];
+ struct cmd_node *desc_node =
+ kmalloc(sizeof(*desc_node), GFP_KERNEL);
+
+ if (!desc_node)
+ return -ENOMEM;
+
+ desc_node->desc = desc;
+ hash_add(ring->cmd_hash, &desc_node->node,
+ desc->cmd.value & CMD_HASH_MASK);
+ }
+ }
+
+ return 0;
}
-static void validate_regs_sorted(struct intel_ring_buffer *ring)
+static void fini_hash_table(struct intel_engine_cs *ring)
{
- check_sorted(ring->id, ring->reg_table, ring->reg_count);
- check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
+ struct hlist_node *tmp;
+ struct cmd_node *desc_node;
+ int i;
+
+ hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
+ hash_del(&desc_node->node);
+ kfree(desc_node);
+ }
}
/**
@@ -190,25 +626,74 @@ static void validate_regs_sorted(struct intel_ring_buffer *ring)
* @ring: the ringbuffer to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
- * struct intel_ring_buffer based on whether the platform requires software
+ * struct intel_engine_cs based on whether the platform requires software
* command parsing.
+ *
+ * Return: non-zero if initialization fails
*/
-void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
{
+ const struct drm_i915_cmd_table *cmd_tables;
+ int cmd_table_count;
+ int ret;
+
if (!IS_GEN7(ring->dev))
- return;
+ return 0;
switch (ring->id) {
case RCS:
+ if (IS_HASWELL(ring->dev)) {
+ cmd_tables = hsw_render_ring_cmds;
+ cmd_table_count =
+ ARRAY_SIZE(hsw_render_ring_cmds);
+ } else {
+ cmd_tables = gen7_render_cmds;
+ cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+ }
+
+ ring->reg_table = gen7_render_regs;
+ ring->reg_count = ARRAY_SIZE(gen7_render_regs);
+
+ if (IS_HASWELL(ring->dev)) {
+ ring->master_reg_table = hsw_master_regs;
+ ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+ } else {
+ ring->master_reg_table = ivb_master_regs;
+ ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+ }
+
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
+ cmd_tables = gen7_video_cmds;
+ cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
+ if (IS_HASWELL(ring->dev)) {
+ cmd_tables = hsw_blt_ring_cmds;
+ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+ } else {
+ cmd_tables = gen7_blt_cmds;
+ cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+ }
+
+ ring->reg_table = gen7_blt_regs;
+ ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
+
+ if (IS_HASWELL(ring->dev)) {
+ ring->master_reg_table = hsw_master_regs;
+ ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+ } else {
+ ring->master_reg_table = ivb_master_regs;
+ ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+ }
+
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
+ cmd_tables = hsw_vebox_cmds;
+ cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
@@ -218,18 +703,45 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
BUG();
}
- validate_cmds_sorted(ring);
- validate_regs_sorted(ring);
+ BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
+ BUG_ON(!validate_regs_sorted(ring));
+
+ ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ if (ret) {
+ DRM_ERROR("CMD: cmd_parser_init failed!\n");
+ fini_hash_table(ring);
+ return ret;
+ }
+
+ ring->needs_cmd_parser = true;
+
+ return 0;
+}
+
+/**
+ * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
+ * @ring: the ringbuffer to clean up
+ *
+ * Releases any resources related to command parsing that may have been
+ * initialized for the specified ring.
+ */
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+{
+ if (!ring->needs_cmd_parser)
+ return;
+
+ fini_hash_table(ring);
}
static const struct drm_i915_cmd_descriptor*
-find_cmd_in_table(const struct drm_i915_cmd_table *table,
+find_cmd_in_table(struct intel_engine_cs *ring,
u32 cmd_header)
{
- int i;
+ struct cmd_node *desc_node;
- for (i = 0; i < table->count; i++) {
- const struct drm_i915_cmd_descriptor *desc = &table->table[i];
+ hash_for_each_possible(ring->cmd_hash, desc_node, node,
+ cmd_header & CMD_HASH_MASK) {
+ const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
u32 masked_cmd = desc->cmd.mask & cmd_header;
u32 masked_value = desc->cmd.value & desc->cmd.mask;
@@ -249,20 +761,16 @@ find_cmd_in_table(const struct drm_i915_cmd_table *table,
* ring's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
-find_cmd(struct intel_ring_buffer *ring,
+find_cmd(struct intel_engine_cs *ring,
u32 cmd_header,
struct drm_i915_cmd_descriptor *default_desc)
{
+ const struct drm_i915_cmd_descriptor *desc;
u32 mask;
- int i;
- for (i = 0; i < ring->cmd_table_count; i++) {
- const struct drm_i915_cmd_descriptor *desc;
-
- desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
- if (desc)
- return desc;
- }
+ desc = find_cmd_in_table(ring, cmd_header);
+ if (desc)
+ return desc;
mask = ring->get_cmd_length_mask(cmd_header);
if (!mask)
@@ -329,15 +837,112 @@ finish:
*
* Return: true if the ring requires software command parsing
*/
-bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
{
- /* No command tables indicates a platform without parsing */
- if (!ring->cmd_tables)
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!ring->needs_cmd_parser)
+ return false;
+
+ /*
+ * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
+ * disabled. That will cause all of the parser's PPGTT checks to
+ * fail. For now, disable parsing when PPGTT is off.
+ */
+ if (!dev_priv->mm.aliasing_ppgtt)
return false;
return (i915.enable_cmd_parser == 1);
}
+static bool check_cmd(const struct intel_engine_cs *ring,
+ const struct drm_i915_cmd_descriptor *desc,
+ const u32 *cmd,
+ const bool is_master,
+ bool *oacontrol_set)
+{
+ if (desc->flags & CMD_DESC_REJECT) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+ return false;
+ }
+
+ if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
+ DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
+ *cmd);
+ return false;
+ }
+
+ if (desc->flags & CMD_DESC_REGISTER) {
+ u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
+
+ /*
+ * OACONTROL requires some special handling for writes. We
+ * want to make sure that any batch which enables OA also
+ * disables it before the end of the batch. The goal is to
+ * prevent one process from snooping on the perf data from
+ * another process. To do that, we need to check the value
+ * that will be written to the register. Hence, limit
+ * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
+ */
+ if (reg_addr == OACONTROL) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
+ return false;
+
+ if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
+ *oacontrol_set = (cmd[2] != 0);
+ }
+
+ if (!valid_reg(ring->reg_table,
+ ring->reg_count, reg_addr)) {
+ if (!is_master ||
+ !valid_reg(ring->master_reg_table,
+ ring->master_reg_count,
+ reg_addr)) {
+ DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+ reg_addr,
+ *cmd,
+ ring->id);
+ return false;
+ }
+ }
+ }
+
+ if (desc->flags & CMD_DESC_BITMASK) {
+ int i;
+
+ for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
+ u32 dword;
+
+ if (desc->bits[i].mask == 0)
+ break;
+
+ if (desc->bits[i].condition_mask != 0) {
+ u32 offset =
+ desc->bits[i].condition_offset;
+ u32 condition = cmd[offset] &
+ desc->bits[i].condition_mask;
+
+ if (condition == 0)
+ continue;
+ }
+
+ dword = cmd[desc->bits[i].offset] &
+ desc->bits[i].mask;
+
+ if (dword != desc->bits[i].expected) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+ *cmd,
+ desc->bits[i].mask,
+ desc->bits[i].expected,
+ dword, ring->id);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
#define LENGTH_BIAS 2
/**
@@ -352,7 +957,7 @@ bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
*
* Return: non-zero if the parser finds violations or otherwise fails
*/
-int i915_parse_cmds(struct intel_ring_buffer *ring,
+int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master)
@@ -361,6 +966,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
int needs_clflush = 0;
+ bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
if (ret) {
@@ -402,76 +1008,27 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
if ((batch_end - cmd) < length) {
- DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n",
+ DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
*cmd,
length,
- (unsigned long)(batch_end - cmd));
+ batch_end - cmd);
ret = -EINVAL;
break;
}
- if (desc->flags & CMD_DESC_REJECT) {
- DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+ if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
ret = -EINVAL;
break;
}
- if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
- DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
- *cmd);
- ret = -EINVAL;
- break;
- }
-
- if (desc->flags & CMD_DESC_REGISTER) {
- u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
-
- if (!valid_reg(ring->reg_table,
- ring->reg_count, reg_addr)) {
- if (!is_master ||
- !valid_reg(ring->master_reg_table,
- ring->master_reg_count,
- reg_addr)) {
- DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
- reg_addr,
- *cmd,
- ring->id);
- ret = -EINVAL;
- break;
- }
- }
- }
-
- if (desc->flags & CMD_DESC_BITMASK) {
- int i;
-
- for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
- u32 dword;
-
- if (desc->bits[i].mask == 0)
- break;
-
- dword = cmd[desc->bits[i].offset] &
- desc->bits[i].mask;
-
- if (dword != desc->bits[i].expected) {
- DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
- *cmd,
- desc->bits[i].mask,
- desc->bits[i].expected,
- dword, ring->id);
- ret = -EINVAL;
- break;
- }
- }
-
- if (ret)
- break;
- }
-
cmd += length;
}
+ if (oacontrol_set) {
+ DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
+ ret = -EINVAL;
+ }
+
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
@@ -483,3 +1040,22 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
return ret;
}
+
+/**
+ * i915_cmd_parser_get_version() - get the cmd parser version number
+ *
+ * The cmd parser maintains a simple increasing integer version number suitable
+ * for passing to userspace clients to determine what operations are permitted.
+ *
+ * Return: the current version number of the cmd parser
+ */
+int i915_cmd_parser_get_version(void)
+{
+ /*
+ * Command parser version history
+ *
+ * 1. Initial version. Checks batches and reports violations, but leaves
+ * hardware parsing enabled (so does not allow new use cases).
+ */
+ return 1;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 195fe5bc0aac..b8c689202c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -79,7 +79,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
static int i915_capabilities(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
const struct intel_device_info *info = INTEL_INFO(dev);
@@ -172,7 +172,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", obj->ring->name);
}
-static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
+static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{
seq_putc(m, ctx->is_initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
@@ -181,7 +181,7 @@ static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
@@ -239,7 +239,7 @@ static int obj_rank_by_stolen(void *priv,
static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
@@ -371,7 +371,7 @@ static int per_file_stats(int id, void *ptr, void *data)
static int i915_gem_object_info(struct seq_file *m, void* data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 count, mappable_count, purgeable_count;
@@ -446,7 +446,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
memset(&stats, 0, sizeof(stats));
stats.file_priv = file->driver_priv;
+ spin_lock(&file->table_lock);
idr_for_each(&file->object_idr, per_file_stats, &stats);
+ spin_unlock(&file->table_lock);
/*
* Although we have a valid reference on file->pid, that does
* not guarantee that the task_struct who called get_pid() is
@@ -474,7 +476,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
static int i915_gem_gtt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -509,12 +511,12 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
unsigned long flags;
struct intel_crtc *crtc;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
@@ -559,10 +561,10 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
static int i915_gem_request_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
struct drm_i915_gem_request *gem_request;
int ret, count, i;
@@ -594,7 +596,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
}
static void i915_ring_seqno_info(struct seq_file *m,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %u\n",
@@ -604,10 +606,10 @@ static void i915_ring_seqno_info(struct seq_file *m,
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -627,10 +629,10 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
static int i915_interrupt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -638,7 +640,47 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
return ret;
intel_runtime_pm_get(dev_priv);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (IS_CHERRYVIEW(dev)) {
+ int i;
+ seq_printf(m, "Master Interrupt Control:\t%08x\n",
+ I915_READ(GEN8_MASTER_IRQ));
+
+ seq_printf(m, "Display IER:\t%08x\n",
+ I915_READ(VLV_IER));
+ seq_printf(m, "Display IIR:\t%08x\n",
+ I915_READ(VLV_IIR));
+ seq_printf(m, "Display IIR_RW:\t%08x\n",
+ I915_READ(VLV_IIR_RW));
+ seq_printf(m, "Display IMR:\t%08x\n",
+ I915_READ(VLV_IMR));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+
+ seq_printf(m, "Port hotplug:\t%08x\n",
+ I915_READ(PORT_HOTPLUG_EN));
+ seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+ I915_READ(VLV_DPFLIPSTAT));
+ seq_printf(m, "DPINVGTT:\t%08x\n",
+ I915_READ(DPINVGTT));
+
+ for (i = 0; i < 4; i++) {
+ seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IMR(i)));
+ seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IIR(i)));
+ seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
+ i, I915_READ(GEN8_GT_IER(i)));
+ }
+
+ seq_printf(m, "PCU interrupt mask:\t%08x\n",
+ I915_READ(GEN8_PCU_IMR));
+ seq_printf(m, "PCU interrupt identity:\t%08x\n",
+ I915_READ(GEN8_PCU_IIR));
+ seq_printf(m, "PCU interrupt enable:\t%08x\n",
+ I915_READ(GEN8_PCU_IER));
+ } else if (INTEL_INFO(dev)->gen >= 8) {
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -768,7 +810,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
@@ -797,10 +839,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static int i915_hws_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
const u32 *hws;
int i;
@@ -945,7 +987,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 crstanddelay;
@@ -966,9 +1008,9 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
return 0;
}
-static int i915_cur_delayinfo(struct seq_file *m, void *unused)
+static int i915_frequency_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
@@ -991,6 +1033,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
@@ -1011,6 +1054,10 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
reqf >>= 25;
reqf *= GT_FREQUENCY_MULTIPLIER;
+ rpmodectl = I915_READ(GEN6_RP_CONTROL);
+ rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
+
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -1027,14 +1074,23 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
+ I915_READ(GEN6_PMIER),
+ I915_READ(GEN6_PMIMR),
+ I915_READ(GEN6_PMISR),
+ I915_READ(GEN6_PMIIR),
+ I915_READ(GEN6_PMINTRMSK));
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
- seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+ seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
+ seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+ seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
@@ -1094,7 +1150,7 @@ out:
static int i915_delayfreq_table(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 delayfreq;
@@ -1125,7 +1181,7 @@ static inline int MAP_TO_MV(int map)
static int i915_inttoext_table(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 inttoext;
@@ -1149,7 +1205,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl, rstdbyctl;
@@ -1219,15 +1275,19 @@ static int ironlake_drpc_info(struct seq_file *m)
static int vlv_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, rcctl1;
unsigned fw_rendercount = 0, fw_mediacount = 0;
+ intel_runtime_pm_get(dev_priv);
+
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
+ intel_runtime_pm_put(dev_priv);
+
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "Turbo enabled: %s\n",
@@ -1247,6 +1307,11 @@ static int vlv_drpc_info(struct seq_file *m)
(I915_READ(VLV_GTLC_PW_STATUS) &
VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Render RC6 residency since boot: %u\n",
+ I915_READ(VLV_GT_RENDER_RC6));
+ seq_printf(m, "Media RC6 residency since boot: %u\n",
+ I915_READ(VLV_GT_MEDIA_RC6));
+
spin_lock_irq(&dev_priv->uncore.lock);
fw_rendercount = dev_priv->uncore.fw_rendercount;
fw_mediacount = dev_priv->uncore.fw_mediacount;
@@ -1263,7 +1328,7 @@ static int vlv_drpc_info(struct seq_file *m)
static int gen6_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
@@ -1362,7 +1427,7 @@ static int gen6_drpc_info(struct seq_file *m)
static int i915_drpc_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
if (IS_VALLEYVIEW(dev))
@@ -1375,7 +1440,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
static int i915_fbc_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1437,7 +1502,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
static int i915_ips_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1460,7 +1525,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool sr_enabled = false;
@@ -1486,7 +1551,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_emon_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long temp, chipset, gfx;
@@ -1514,7 +1579,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
@@ -1557,7 +1622,7 @@ out:
static int i915_gfxec(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -1577,7 +1642,7 @@ static int i915_gfxec(struct seq_file *m, void *unused)
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1605,7 +1670,7 @@ out:
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct intel_fbdev *ifbdev = NULL;
struct intel_framebuffer *fb;
@@ -1651,11 +1716,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
static int i915_context_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- struct i915_hw_context *ctx;
+ struct intel_engine_cs *ring;
+ struct intel_context *ctx;
int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1675,6 +1740,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ if (ctx->obj == NULL)
+ continue;
+
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
for_each_ring(ring, dev_priv, i)
@@ -1692,7 +1760,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
@@ -1740,7 +1808,7 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -1788,10 +1856,14 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
static int per_file_ctx(int id, void *ptr, void *data)
{
- struct i915_hw_context *ctx = ptr;
+ struct intel_context *ctx = ptr;
struct seq_file *m = data;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
+ if (i915_gem_context_is_default(ctx))
+ seq_puts(m, " default context:\n");
+ else
+ seq_printf(m, " context %d:\n", ctx->id);
ppgtt->debug_dump(ppgtt, m);
return 0;
@@ -1800,7 +1872,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int unused, i;
@@ -1816,8 +1888,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
u64 pdp = I915_READ(ring->mmio_base + offset + 4);
pdp <<= 32;
pdp |= I915_READ(ring->mmio_base + offset);
- for (i = 0; i < 4; i++)
- seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
+ seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
}
}
}
@@ -1825,7 +1896,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
struct drm_file *file;
int i;
@@ -1852,12 +1923,9 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_ppgtt *pvt_ppgtt;
- pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
seq_printf(m, "proc: %s\n",
get_pid_task(file->pid, PIDTYPE_PID)->comm);
- seq_puts(m, " default context:\n");
idr_for_each(&file_priv->context_idr, per_file_ctx, m);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1865,7 +1933,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static int i915_ppgtt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1885,56 +1953,9 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_dpio_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
-
- if (!IS_VALLEYVIEW(dev)) {
- seq_puts(m, "unsupported\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
- if (ret)
- return ret;
-
- seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
-
- seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
- seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
-
- seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
- seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
-
- seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
- seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
-
- seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
- seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
-
- seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
-
- mutex_unlock(&dev_priv->dpio_lock);
-
- return 0;
-}
-
static int i915_llc(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2040,11 +2061,11 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
static int i915_pc8_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_HASWELL(dev)) {
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
seq_puts(m, "not supported\n");
return 0;
}
@@ -2115,7 +2136,7 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
@@ -2170,7 +2191,7 @@ static void intel_encoder_info(struct seq_file *m,
struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_connector *intel_connector;
@@ -2178,12 +2199,12 @@ static void intel_encoder_info(struct seq_file *m,
encoder = &intel_encoder->base;
seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
- encoder->base.id, drm_get_encoder_name(encoder));
+ encoder->base.id, encoder->name);
for_each_connector_on_encoder(dev, encoder, intel_connector) {
struct drm_connector *connector = &intel_connector->base;
seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
drm_get_connector_status_name(connector->status));
if (connector->status == connector_status_connected) {
struct drm_display_mode *mode = &crtc->mode;
@@ -2197,7 +2218,7 @@ static void intel_encoder_info(struct seq_file *m,
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
@@ -2254,7 +2275,7 @@ static void intel_connector_info(struct seq_file *m,
struct drm_display_mode *mode;
seq_printf(m, "connector %d: type %s, status: %s\n",
- connector->base.id, drm_get_connector_name(connector),
+ connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
if (connector->status == connector_status_connected) {
seq_printf(m, "\tname: %s\n", connector->display_info.name);
@@ -2286,10 +2307,8 @@ static bool cursor_active(struct drm_device *dev, int pipe)
if (IS_845G(dev) || IS_I865G(dev))
state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
- else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
- state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
else
- state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+ state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
return state;
}
@@ -2299,10 +2318,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pos;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
- pos = I915_READ(CURPOS_IVB(pipe));
- else
- pos = I915_READ(CURPOS(pipe));
+ pos = I915_READ(CURPOS(pipe));
*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
@@ -2317,7 +2333,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
static int i915_display_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
@@ -2327,7 +2343,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_modeset_lock_all(dev);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
bool active;
int x, y;
@@ -2339,10 +2355,14 @@ static int i915_display_info(struct seq_file *m, void *unused)
active = cursor_position(dev, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
- yesno(crtc->cursor_visible),
+ yesno(crtc->cursor_base),
x, y, crtc->cursor_addr,
yesno(active));
}
+
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
+ yesno(!crtc->cpu_fifo_underrun_disabled),
+ yesno(!crtc->pch_fifo_underrun_disabled));
}
seq_printf(m, "\n");
@@ -2595,7 +2615,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (!encoder->base.crtc)
@@ -2631,7 +2651,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
break;
}
}
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return ret;
}
@@ -3106,7 +3126,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
{
struct drm_device *dev = m->private;
- int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+ int num_levels = ilk_wm_max_level(dev) + 1;
int level;
drm_modeset_lock_all(dev);
@@ -3189,7 +3209,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
uint16_t new[5] = { 0 };
- int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
+ int num_levels = ilk_wm_max_level(dev) + 1;
int level;
int ret;
char tmp[32];
@@ -3286,9 +3306,15 @@ static int
i915_wedged_set(void *data, u64 val)
{
struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_runtime_pm_get(dev_priv);
i915_handle_error(dev, val,
"Manually setting wedged to %llu", val);
+
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -3774,7 +3800,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
- {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
+ {"i915_frequency_info", i915_frequency_info, 0},
{"i915_delayfreq_table", i915_delayfreq_table, 0},
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
@@ -3790,7 +3816,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
- {"i915_dpio", i915_dpio_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_sink_crc_eDP1", i915_sink_crc, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index eedb023af27d..d44344140627 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -36,6 +36,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include <linux/pci.h>
+#include <linux/console.h>
+#include <linux/vt.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
@@ -44,6 +46,7 @@
#include <acpi/video.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/oom.h>
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
@@ -63,7 +66,7 @@
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
- if (LP_RING(dev->dev_private)->obj == NULL) \
+ if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
@@ -119,7 +122,7 @@ static void i915_write_hws_pga(struct drm_device *dev)
static void i915_free_hws(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
@@ -139,7 +142,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
/*
* We should never lose context on the ring with modesetting
@@ -148,17 +152,17 @@ void i915_kernel_lost_context(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
- if (ring->space < 0)
- ring->space += ring->size;
+ ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
+ if (ringbuf->space < 0)
+ ringbuf->space += ringbuf->size;
if (!dev->primary->master)
return;
master_priv = dev->primary->master->driver_priv;
- if (ring->head == ring->tail && master_priv->sarea_priv)
+ if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
@@ -201,7 +205,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (LP_RING(dev_priv)->obj != NULL) {
+ if (LP_RING(dev_priv)->buffer->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
@@ -234,11 +238,11 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_dma_resume(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (ring->virtual_start == NULL) {
+ if (ring->buffer->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
@@ -360,7 +364,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
- if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
+ if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
return -EINVAL;
for (i = 0; i < dwords;) {
@@ -782,7 +786,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine_cs *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -823,7 +827,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+ if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1017,6 +1021,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
value = 1;
break;
+ case I915_PARAM_CMD_PARSER_VERSION:
+ value = i915_cmd_parser_get_version();
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -1070,7 +1077,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@@ -1277,12 +1284,13 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- bool can_switch;
- spin_lock(&dev->count_lock);
- can_switch = (dev->open_count == 0);
- spin_unlock(&dev->count_lock);
- return can_switch;
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return dev->open_count == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
@@ -1326,7 +1334,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_power_domains_init_hw(dev_priv);
- ret = drm_irq_install(dev);
+ ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_gem_stolen;
@@ -1336,7 +1344,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init(dev);
if (ret)
- goto cleanup_power;
+ goto cleanup_irq;
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
@@ -1345,10 +1353,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = true;
- if (INTEL_INFO(dev)->num_pipes == 0) {
- intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
+ if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
- }
ret = intel_fbdev_init(dev);
if (ret)
@@ -1382,9 +1388,7 @@ cleanup_gem:
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
- drm_mm_takedown(&dev_priv->gtt.base.mm);
-cleanup_power:
- intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
+cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
@@ -1447,6 +1451,39 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
}
#endif
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ DRM_INFO("Replacing VGA console driver\n");
+
+ console_lock();
+ if (con_is_bound(&vga_con))
+ ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+ if (ret == 0) {
+ ret = do_unregister_con_driver(&vga_con);
+
+ /* Ignore "already unregistered". */
+ if (ret == -ENODEV)
+ ret = 0;
+ }
+ console_unlock();
+
+ return ret;
+}
+#endif
+
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
@@ -1620,8 +1657,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_regs;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_kick_out_vgacon(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting VGA console\n");
+ goto out_gtt;
+ }
+
i915_kick_out_firmware_fb(dev_priv);
+ }
pci_set_master(dev->pdev);
@@ -1739,8 +1783,8 @@ out_power_well:
intel_power_domains_remove(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
- if (dev_priv->mm.inactive_shrinker.scan_objects)
- unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+ WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+ unregister_shrinker(&dev_priv->mm.shrinker);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
@@ -1753,8 +1797,6 @@ out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
- list_del(&dev_priv->gtt.base.global_link);
- drm_mm_takedown(&dev_priv->gtt.base.mm);
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
out_regs:
intel_uncore_fini(dev);
@@ -1791,8 +1833,8 @@ int i915_driver_unload(struct drm_device *dev)
i915_teardown_sysfs(dev);
- if (dev_priv->mm.inactive_shrinker.scan_objects)
- unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+ WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+ unregister_shrinker(&dev_priv->mm.shrinker);
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1843,7 +1885,6 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
- list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
drm_vblank_cleanup(dev);
@@ -1864,7 +1905,7 @@ int i915_driver_unload(struct drm_device *dev)
kmem_cache_destroy(dev_priv->slab);
pci_dev_put(dev_priv->bridge_dev);
- kfree(dev->dev_private);
+ kfree(dev_priv);
return 0;
}
@@ -1925,6 +1966,8 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ if (file_priv && file_priv->bsd_ring)
+ file_priv->bsd_ring = NULL;
kfree(file_priv);
}
@@ -1978,9 +2021,10 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
-int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
/*
* This is really ugly: Because old userspace abused the linux agp interface to
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 82f4d1f47d3b..651e65e051c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -36,6 +36,7 @@
#include <linux/console.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
static struct drm_driver driver;
@@ -49,12 +50,30 @@ static struct drm_driver driver;
.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+#define GEN_CHV_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ CHV_PIPE_C_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ CHV_TRANSCODER_C_OFFSET, }, \
+ .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
+ CHV_DPLL_C_OFFSET }, \
+ .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
+ CHV_DPLL_C_MD_OFFSET }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
+ CHV_PALETTE_C_OFFSET }
+
+#define CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+
+#define IVB_CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_845g_info = {
@@ -62,6 +81,7 @@ static const struct intel_device_info intel_845g_info = {
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i85x_info = {
@@ -71,6 +91,7 @@ static const struct intel_device_info intel_i85x_info = {
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i865g_info = {
@@ -78,6 +99,7 @@ static const struct intel_device_info intel_i865g_info = {
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i915g_info = {
@@ -85,6 +107,7 @@ static const struct intel_device_info intel_i915g_info = {
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
@@ -94,12 +117,14 @@ static const struct intel_device_info intel_i915gm_info = {
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
@@ -109,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i965g_info = {
@@ -117,6 +143,7 @@ static const struct intel_device_info intel_i965g_info = {
.has_overlay = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i965gm_info = {
@@ -126,6 +153,7 @@ static const struct intel_device_info intel_i965gm_info = {
.supports_tv = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_g33_info = {
@@ -134,6 +162,7 @@ static const struct intel_device_info intel_g33_info = {
.has_overlay = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_g45_info = {
@@ -141,6 +170,7 @@ static const struct intel_device_info intel_g45_info = {
.has_pipe_cxsr = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_gm45_info = {
@@ -150,6 +180,7 @@ static const struct intel_device_info intel_gm45_info = {
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_pineview_info = {
@@ -157,6 +188,7 @@ static const struct intel_device_info intel_pineview_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ironlake_d_info = {
@@ -164,6 +196,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -172,6 +205,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -181,6 +215,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -190,6 +225,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
#define GEN7_FEATURES \
@@ -203,6 +239,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -210,6 +247,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.is_ivybridge = 1,
.is_mobile = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ivybridge_q_info = {
@@ -217,6 +255,7 @@ static const struct intel_device_info intel_ivybridge_q_info = {
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_valleyview_m_info = {
@@ -228,6 +267,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_valleyview_d_info = {
@@ -238,6 +278,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
static const struct intel_device_info intel_haswell_d_info = {
@@ -247,6 +288,7 @@ static const struct intel_device_info intel_haswell_d_info = {
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_haswell_m_info = {
@@ -257,6 +299,7 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_broadwell_d_info = {
@@ -267,6 +310,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
.has_ddi = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_broadwell_m_info = {
@@ -277,6 +321,40 @@ static const struct intel_device_info intel_broadwell_m_info = {
.has_ddi = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broadwell_gt3d_info = {
+ .gen = 8, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_broadwell_gt3m_info = {
+ .gen = 8, .is_mobile = 1, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_cherryview_info = {
+ .is_preliminary = 1,
+ .gen = 8, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_CHV_PIPEOFFSETS,
+ CURSOR_OFFSETS,
};
/*
@@ -311,8 +389,11 @@ static const struct intel_device_info intel_broadwell_m_info = {
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
- INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
- INTEL_BDW_D_IDS(&intel_broadwell_d_info)
+ INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
+ INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
+ INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
+ INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
+ INTEL_CHV_IDS(&intel_cherryview_info)
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -445,18 +526,20 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
- cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
-
drm_irq_uninstall(dev);
dev_priv->enable_hotplug_processing = false;
+
+ intel_disable_gt_powersave(dev);
+
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw.
*/
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ drm_modeset_lock_all(dev);
+ for_each_crtc(dev, crtc) {
dev_priv->display.crtc_disable(crtc);
- mutex_unlock(&dev->mode_config.mutex);
+ }
+ drm_modeset_unlock_all(dev);
intel_modeset_suspend_hw(dev);
}
@@ -519,24 +602,6 @@ void intel_console_resume(struct work_struct *work)
console_unlock();
}
-static void intel_resume_hotplug(struct drm_device *dev)
-{
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
-
- mutex_lock(&mode_config->mutex);
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
- if (encoder->hot_plug)
- encoder->hot_plug(encoder);
-
- mutex_unlock(&mode_config->mutex);
-
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(dev);
-}
-
static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -551,7 +616,6 @@ static int i915_drm_thaw_early(struct drm_device *dev)
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int error = 0;
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
restore_gtt_mappings) {
@@ -569,12 +633,14 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
drm_mode_config_reset(dev);
mutex_lock(&dev->struct_mutex);
-
- error = i915_gem_init_hw(dev);
+ if (i915_gem_init_hw(dev)) {
+ DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ }
mutex_unlock(&dev->struct_mutex);
/* We need working interrupts for modeset enabling ... */
- drm_irq_install(dev);
+ drm_irq_install(dev, dev->pdev->irq);
intel_modeset_init_hw(dev);
@@ -591,7 +657,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_hpd_init(dev);
dev_priv->enable_hotplug_processing = true;
/* Config may have changed between suspend and resume */
- intel_resume_hotplug(dev);
+ drm_helper_hpd_irq_event(dev);
}
intel_opregion_init(dev);
@@ -613,7 +679,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
mutex_unlock(&dev_priv->modeset_restore_lock);
intel_runtime_pm_put(dev_priv);
- return error;
+ return 0;
}
static int i915_drm_thaw(struct drm_device *dev)
@@ -746,18 +812,20 @@ int i915_reset(struct drm_device *dev)
return ret;
}
- drm_irq_uninstall(dev);
- drm_irq_install(dev);
+ /*
+ * FIXME: This races pretty badly against concurrent holders of
+ * ring interrupts. This is possible since we've started to drop
+ * dev->struct_mutex in select places when waiting for the gpu.
+ */
- /* rps/rc6 re-init is necessary to restore state lost after the
- * reset and the re-install of drm irq. Skip for ironlake per
+ /*
+ * rps/rc6 re-init is necessary to restore state lost after the
+ * reset and the re-install of gt irqs. Skip for ironlake per
* previous concerns that it doesn't respond well to some forms
- * of re-init after reset. */
- if (INTEL_INFO(dev)->gen > 5) {
- mutex_lock(&dev->struct_mutex);
- intel_enable_gt_powersave(dev);
- mutex_unlock(&dev->struct_mutex);
- }
+ * of re-init after reset.
+ */
+ if (INTEL_INFO(dev)->gen > 5)
+ intel_reset_gt_powersave(dev);
intel_hpd_init(dev);
} else {
@@ -891,21 +959,453 @@ static int i915_pm_poweroff(struct device *dev)
return i915_drm_freeze(drm_dev);
}
-static int i915_runtime_suspend(struct device *device)
+static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
+{
+ hsw_enable_pc8(dev_priv);
+
+ return 0;
+}
+
+static int snb_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ intel_init_pch_refclk(dev);
+
+ return 0;
+}
+
+static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ hsw_disable_pc8(dev_priv);
+
+ return 0;
+}
+
+/*
+ * Save all Gunit registers that may be lost after a D3 and a subsequent
+ * S0i[R123] transition. The list of registers needing a save/restore is
+ * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
+ * registers in the following way:
+ * - Driver: saved/restored by the driver
+ * - Punit : saved/restored by the Punit firmware
+ * - No, w/o marking: no need to save/restore, since the register is R/O or
+ * used internally by the HW in a way that doesn't depend
+ * keeping the content across a suspend/resume.
+ * - Debug : used for debugging
+ *
+ * We save/restore all registers marked with 'Driver', with the following
+ * exceptions:
+ * - Registers out of use, including also registers marked with 'Debug'.
+ * These have no effect on the driver's operation, so we don't save/restore
+ * them to reduce the overhead.
+ * - Registers that are fully setup by an initialization function called from
+ * the resume path. For example many clock gating and RPS/RC6 registers.
+ * - Registers that provide the right functionality with their reset defaults.
+ *
+ * TODO: Except for registers that based on the above 3 criteria can be safely
+ * ignored, we save/restore all others, practically treating the HW context as
+ * a black-box for the driver. Further investigation is needed to reduce the
+ * saved/restored registers even further, by following the same 3 criteria.
+ */
+static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+{
+ struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ int i;
+
+ /* GAM 0x4000-0x4770 */
+ s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
+ s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
+ s->arb_mode = I915_READ(ARB_MODE);
+ s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
+ s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
+
+ s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+ s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+
+ s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
+ s->ecochk = I915_READ(GAM_ECOCHK);
+ s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
+ s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
+
+ s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ s->g3dctl = I915_READ(VLV_G3DCTL);
+ s->gsckgctl = I915_READ(VLV_GSCKGCTL);
+ s->mbctl = I915_READ(GEN6_MBCTL);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
+ s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
+ s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
+ s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
+ s->rstctl = I915_READ(GEN6_RSTCTL);
+ s->misccpctl = I915_READ(GEN7_MISCCPCTL);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ s->gfxpause = I915_READ(GEN6_GFXPAUSE);
+ s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
+ s->rpdeuc = I915_READ(GEN6_RPDEUC);
+ s->ecobus = I915_READ(ECOBUS);
+ s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
+ s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
+ s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
+ s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
+ s->rcedata = I915_READ(VLV_RCEDATA);
+ s->spare2gh = I915_READ(VLV_SPAREG2H);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ s->gt_imr = I915_READ(GTIMR);
+ s->gt_ier = I915_READ(GTIER);
+ s->pm_imr = I915_READ(GEN6_PMIMR);
+ s->pm_ier = I915_READ(GEN6_PMIER);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ s->tilectl = I915_READ(TILECTL);
+ s->gt_fifoctl = I915_READ(GTFIFOCTL);
+ s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
+ s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ s->pmwgicz = I915_READ(VLV_PMWGICZ);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
+ s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
+ s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
+
+ /*
+ * Not saving any of:
+ * DFT, 0x9800-0x9EC0
+ * SARB, 0xB000-0xB1FC
+ * GAC, 0x5208-0x524C, 0x14000-0x14C000
+ * PCI CFG
+ */
+}
+
+static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+{
+ struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ u32 val;
+ int i;
+
+ /* GAM 0x4000-0x4770 */
+ I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
+ I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
+ I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
+ I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
+ I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
+
+ I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+ I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+ I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
+ I915_WRITE(GAM_ECOCHK, s->ecochk);
+ I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
+ I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
+
+ I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ I915_WRITE(VLV_G3DCTL, s->g3dctl);
+ I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
+ I915_WRITE(GEN6_MBCTL, s->mbctl);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
+ I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
+ I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
+ I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
+ I915_WRITE(GEN6_RSTCTL, s->rstctl);
+ I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
+ I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
+ I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
+ I915_WRITE(ECOBUS, s->ecobus);
+ I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
+ I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
+ I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
+ I915_WRITE(VLV_RCEDATA, s->rcedata);
+ I915_WRITE(VLV_SPAREG2H, s->spare2gh);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ I915_WRITE(GTIMR, s->gt_imr);
+ I915_WRITE(GTIER, s->gt_ier);
+ I915_WRITE(GEN6_PMIMR, s->pm_imr);
+ I915_WRITE(GEN6_PMIER, s->pm_ier);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ I915_WRITE(TILECTL, s->tilectl);
+ I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
+ /*
+ * Preserve the GT allow wake and GFX force clock bit, they are not
+ * be restored, as they are used to control the s0ix suspend/resume
+ * sequence by the caller.
+ */
+ val = I915_READ(VLV_GTLC_WAKE_CTRL);
+ val &= VLV_GTLC_ALLOWWAKEREQ;
+ val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
+
+ val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ val &= VLV_GFX_CLK_FORCE_ON_BIT;
+ val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
+ I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
+
+ I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
+ I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
+}
+
+int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
+{
+ u32 val;
+ int err;
+
+ val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
+
+#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
+ /* Wait for a previous force-off to settle */
+ if (force_on) {
+ err = wait_for(!COND, 20);
+ if (err) {
+ DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG));
+ return err;
+ }
+ }
+
+ val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
+ if (force_on)
+ val |= VLV_GFX_CLK_FORCE_ON_BIT;
+ I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
+
+ if (!force_on)
+ return 0;
+
+ err = wait_for(COND, 20);
+ if (err)
+ DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
+ I915_READ(VLV_GTLC_SURVIVABILITY_REG));
+
+ return err;
+#undef COND
+}
+
+static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
+{
+ u32 val;
+ int err = 0;
+
+ val = I915_READ(VLV_GTLC_WAKE_CTRL);
+ val &= ~VLV_GTLC_ALLOWWAKEREQ;
+ if (allow)
+ val |= VLV_GTLC_ALLOWWAKEREQ;
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
+ POSTING_READ(VLV_GTLC_WAKE_CTRL);
+
+#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
+ allow)
+ err = wait_for(COND, 1);
+ if (err)
+ DRM_ERROR("timeout disabling GT waking\n");
+ return err;
+#undef COND
+}
+
+static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+ bool wait_for_on)
+{
+ u32 mask;
+ u32 val;
+ int err;
+
+ mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
+ val = wait_for_on ? mask : 0;
+#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
+ if (COND)
+ return 0;
+
+ DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
+ wait_for_on ? "on" : "off",
+ I915_READ(VLV_GTLC_PW_STATUS));
+
+ /*
+ * RC6 transitioning can be delayed up to 2 msec (see
+ * valleyview_enable_rps), use 3 msec for safety.
+ */
+ err = wait_for(COND, 3);
+ if (err)
+ DRM_ERROR("timeout waiting for GT wells to go %s\n",
+ wait_for_on ? "on" : "off");
+
+ return err;
+#undef COND
+}
+
+static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
+{
+ if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
+ return;
+
+ DRM_ERROR("GT register access while GT waking disabled\n");
+ I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
+}
+
+static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+ int err;
+
+ /*
+ * Bspec defines the following GT well on flags as debug only, so
+ * don't treat them as hard failures.
+ */
+ (void)vlv_wait_for_gt_wells(dev_priv, false);
+
+ mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
+ WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
+
+ vlv_check_no_gt_access(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, true);
+ if (err)
+ goto err1;
+
+ err = vlv_allow_gt_wake(dev_priv, false);
+ if (err)
+ goto err2;
+ vlv_save_gunit_s0ix_state(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ /* For safety always re-enable waking and disable gfx clock forcing */
+ vlv_allow_gt_wake(dev_priv, true);
+err1:
+ vlv_force_gfx_clock(dev_priv, false);
+
+ return err;
+}
+
+static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int err;
+ int ret;
+
+ /*
+ * If any of the steps fail just try to continue, that's the best we
+ * can do at this point. Return the first error code (which will also
+ * leave RPM permanently disabled).
+ */
+ ret = vlv_force_gfx_clock(dev_priv, true);
+
+ vlv_restore_gunit_s0ix_state(dev_priv);
+
+ err = vlv_allow_gt_wake(dev_priv, true);
+ if (!ret)
+ ret = err;
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (!ret)
+ ret = err;
+
+ vlv_check_no_gt_access(dev_priv);
+
+ intel_init_clock_gating(dev);
+ i915_gem_restore_fences(dev);
+
+ return ret;
+}
+
+static int intel_runtime_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
+ return -ENODEV;
WARN_ON(!HAS_RUNTIME_PM(dev));
assert_force_wake_inactive(dev_priv);
DRM_DEBUG_KMS("Suspending device\n");
- if (HAS_PC8(dev))
- hsw_enable_pc8(dev_priv);
+ /*
+ * We could deadlock here in case another thread holding struct_mutex
+ * calls RPM suspend concurrently, since the RPM suspend will wait
+ * first for this RPM suspend to finish. In this case the concurrent
+ * RPM resume will be followed by its RPM suspend counterpart. Still
+ * for consistency return -EAGAIN, which will reschedule this suspend.
+ */
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
+ /*
+ * Bump the expiration timestamp, otherwise the suspend won't
+ * be rescheduled.
+ */
+ pm_runtime_mark_last_busy(device);
+ return -EAGAIN;
+ }
+ /*
+ * We are safe here against re-faults, since the fault handler takes
+ * an RPM reference.
+ */
i915_gem_release_all_mmaps(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * rps.work can't be rearmed here, since we get here only after making
+ * sure the GPU is idle and the RPS freq is set to the minimum. See
+ * intel_mark_idle().
+ */
+ cancel_work_sync(&dev_priv->rps.work);
+ intel_runtime_pm_disable_interrupts(dev);
+
+ if (IS_GEN6(dev)) {
+ ret = 0;
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ ret = hsw_runtime_suspend(dev_priv);
+ } else if (IS_VALLEYVIEW(dev)) {
+ ret = vlv_runtime_suspend(dev_priv);
+ } else {
+ ret = -ENODEV;
+ WARN_ON(1);
+ }
+
+ if (ret) {
+ DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+ intel_runtime_pm_restore_interrupts(dev);
+
+ return ret;
+ }
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
dev_priv->pm.suspended = true;
@@ -923,11 +1423,12 @@ static int i915_runtime_suspend(struct device *device)
return 0;
}
-static int i915_runtime_resume(struct device *device)
+static int intel_runtime_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
WARN_ON(!HAS_RUNTIME_PM(dev));
@@ -936,11 +1437,33 @@ static int i915_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
- if (HAS_PC8(dev))
- hsw_disable_pc8(dev_priv);
+ if (IS_GEN6(dev)) {
+ ret = snb_runtime_resume(dev_priv);
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ ret = hsw_runtime_resume(dev_priv);
+ } else if (IS_VALLEYVIEW(dev)) {
+ ret = vlv_runtime_resume(dev_priv);
+ } else {
+ WARN_ON(1);
+ ret = -ENODEV;
+ }
- DRM_DEBUG_KMS("Device resumed\n");
- return 0;
+ /*
+ * No point of rolling back things in case of an error, as the best
+ * we can do is to hope that things will still work (and disable RPM).
+ */
+ i915_gem_init_swizzling(dev);
+ gen6_update_ring_freq(dev);
+
+ intel_runtime_pm_restore_interrupts(dev);
+ intel_reset_gt_powersave(dev);
+
+ if (ret)
+ DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
+ else
+ DRM_DEBUG_KMS("Device resumed\n");
+
+ return ret;
}
static const struct dev_pm_ops i915_pm_ops = {
@@ -954,8 +1477,8 @@ static const struct dev_pm_ops i915_pm_ops = {
.poweroff = i915_pm_poweroff,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
- .runtime_suspend = i915_runtime_suspend,
- .runtime_resume = i915_runtime_resume,
+ .runtime_suspend = intel_runtime_suspend,
+ .runtime_resume = intel_runtime_resume,
};
static const struct vm_operations_struct i915_gem_vm_ops = {
@@ -1062,6 +1585,7 @@ static int __init i915_init(void)
driver.get_vblank_timestamp = NULL;
#ifndef CONFIG_DRM_I915_UMS
/* Silently fail loading to not upset userspace. */
+ DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
return 0;
#endif
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 388c028e223c..374f964323ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,11 +35,13 @@
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
+#include "i915_gem_gtt.h"
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <linux/backlight.h>
+#include <linux/hashtable.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
@@ -91,7 +93,7 @@ enum port {
};
#define port_name(p) ((p) + 'A')
-#define I915_NUM_PHYS_VLV 1
+#define I915_NUM_PHYS_VLV 2
enum dpio_channel {
DPIO_CH0,
@@ -162,6 +164,12 @@ enum hpd_pin {
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
+#define for_each_crtc(dev, crtc) \
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
@@ -171,6 +179,7 @@ enum hpd_pin {
if ((intel_connector)->base.encoder == (__encoder))
struct drm_i915_private;
+struct i915_mmu_object;
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
@@ -312,7 +321,6 @@ struct drm_i915_error_state {
u32 gab_ctl;
u32 gfx_mode;
u32 extra_instdone[I915_NUM_INSTDONE_REG];
- u32 pipestat[I915_MAX_PIPES];
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
@@ -346,7 +354,7 @@ struct drm_i915_error_state {
u64 bbaddr;
u64 acthd;
u32 fault_reg;
- u32 faddr;
+ u64 faddr;
u32 rc_psmi; /* sleep state */
u32 semaphore_mboxes[I915_NUM_RINGS - 1];
@@ -385,6 +393,7 @@ struct drm_i915_error_state {
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
+ u32 userptr:1;
s32 ring:4;
u32 cache_level:3;
} **active_bo, **pinned_bo;
@@ -449,10 +458,11 @@ struct drm_i915_display_funcs {
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags);
- int (*update_primary_plane)(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y);
+ void (*update_primary_plane)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
@@ -545,6 +555,7 @@ struct intel_device_info {
int dpll_offsets[I915_MAX_PIPES];
int dpll_md_offsets[I915_MAX_PIPES];
int palette_offsets[I915_MAX_PIPES];
+ int cursor_offsets[I915_MAX_PIPES];
};
#undef DEFINE_FLAG
@@ -560,168 +571,6 @@ enum i915_cache_level {
I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
};
-typedef uint32_t gen6_gtt_pte_t;
-
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
-
- /** This object's place on the active/inactive lists */
- struct list_head mm_list;
-
- struct list_head vma_link; /* Link in the object's VMA list */
-
- /** This vma's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
-
- /**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, pin_ioctl
- * (via user_pin_count), execbuffer (objects are not allowed multiple
- * times for the same batchbuffer), and the framebuffer code. When
- * switching/pageflipping, the framebuffer code has at most two buffers
- * pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits. */
- unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
-
- /** Unmap an object from an address space. This usually consists of
- * setting the valid PTE entries to a reserved scratch page. */
- void (*unbind_vma)(struct i915_vma *vma);
- /* Map an object into an address space with the given cache flags. */
-#define GLOBAL_BIND (1<<0)
- void (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
-};
-
-struct i915_address_space {
- struct drm_mm mm;
- struct drm_device *dev;
- struct list_head global_link;
- unsigned long start; /* Start offset always 0 for dri2 */
- size_t total; /* size addr space maps (ex. 2GB for ggtt) */
-
- struct {
- dma_addr_t addr;
- struct page *page;
- } scratch;
-
- /**
- * List of objects currently involved in rendering.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
- /* FIXME: Need a more generic return type */
- gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid); /* Create a valid PTE */
- void (*clear_range)(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch);
- void (*insert_entries)(struct i915_address_space *vm,
- struct sg_table *st,
- uint64_t start,
- enum i915_cache_level cache_level);
- void (*cleanup)(struct i915_address_space *vm);
-};
-
-/* The Graphics Translation Table is the way in which GEN hardware translates a
- * Graphics Virtual Address into a Physical Address. In addition to the normal
- * collateral associated with any va->pa translations GEN hardware also has a
- * portion of the GTT which can be mapped by the CPU and remain both coherent
- * and correct (in cases like swizzling). That region is referred to as GMADR in
- * the spec.
- */
-struct i915_gtt {
- struct i915_address_space base;
- size_t stolen_size; /* Total size of stolen memory */
-
- unsigned long mappable_end; /* End offset that we can CPU map */
- struct io_mapping *mappable; /* Mapping to our CPU mappable region */
- phys_addr_t mappable_base; /* PA of our GMADR */
-
- /** "Graphics Stolen Memory" holds the global PTEs */
- void __iomem *gsm;
-
- bool do_idle_maps;
-
- int mtrr;
-
- /* global gtt ops */
- int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
- size_t *stolen, phys_addr_t *mappable_base,
- unsigned long *mappable_end);
-};
-#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-
-#define GEN8_LEGACY_PDPS 4
-struct i915_hw_ppgtt {
- struct i915_address_space base;
- struct kref ref;
- struct drm_mm_node node;
- unsigned num_pd_entries;
- unsigned num_pd_pages; /* gen8+ */
- union {
- struct page **pt_pages;
- struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
- };
- struct page *pd_pages;
- union {
- uint32_t pd_offset;
- dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
- };
- union {
- dma_addr_t *pt_dma_addr;
- dma_addr_t *gen8_pt_dma_addr[4];
- };
-
- struct i915_hw_context *ctx;
-
- int (*enable)(struct i915_hw_ppgtt *ppgtt);
- int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
- bool synchronous);
- void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
-};
-
struct i915_ctx_hang_stats {
/* This context had batch pending when hang was declared */
unsigned batch_pending;
@@ -738,13 +587,13 @@ struct i915_ctx_hang_stats {
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0
-struct i915_hw_context {
+struct intel_context {
struct kref ref;
int id;
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_ring_buffer *last_ring;
+ struct intel_engine_cs *last_ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
@@ -782,6 +631,10 @@ struct i915_fbc {
} no_fbc_reason;
};
+struct i915_drrs {
+ struct intel_connector *connector;
+};
+
struct i915_psr {
bool sink_support;
bool source_ok;
@@ -803,6 +656,7 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_BACKLIGHT_PRESENT (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
@@ -965,6 +819,67 @@ struct i915_suspend_saved_registers {
u32 savePCH_PORT_HOTPLUG;
};
+struct vlv_s0ix_state {
+ /* GAM */
+ u32 wr_watermark;
+ u32 gfx_prio_ctrl;
+ u32 arb_mode;
+ u32 gfx_pend_tlb0;
+ u32 gfx_pend_tlb1;
+ u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+ u32 media_max_req_count;
+ u32 gfx_max_req_count;
+ u32 render_hwsp;
+ u32 ecochk;
+ u32 bsd_hwsp;
+ u32 blt_hwsp;
+ u32 tlb_rd_addr;
+
+ /* MBC */
+ u32 g3dctl;
+ u32 gsckgctl;
+ u32 mbctl;
+
+ /* GCP */
+ u32 ucgctl1;
+ u32 ucgctl3;
+ u32 rcgctl1;
+ u32 rcgctl2;
+ u32 rstctl;
+ u32 misccpctl;
+
+ /* GPM */
+ u32 gfxpause;
+ u32 rpdeuhwtc;
+ u32 rpdeuc;
+ u32 ecobus;
+ u32 pwrdwnupctl;
+ u32 rp_down_timeout;
+ u32 rp_deucsw;
+ u32 rcubmabdtmr;
+ u32 rcedata;
+ u32 spare2gh;
+
+ /* Display 1 CZ domain */
+ u32 gt_imr;
+ u32 gt_ier;
+ u32 pm_imr;
+ u32 pm_ier;
+ u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+ /* GT SA CZ domain */
+ u32 tilectl;
+ u32 gt_fifoctl;
+ u32 gtlc_wake_ctrl;
+ u32 gtlc_survive;
+ u32 pmwgicz;
+
+ /* Display 2 CZ domain */
+ u32 gu_ctl0;
+ u32 gu_ctl1;
+ u32 clock_gate_dis2;
+};
+
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
@@ -1063,6 +978,8 @@ struct i915_power_well {
bool always_on;
/* power well enable/disable usage count */
int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
unsigned long domains;
unsigned long data;
const struct i915_power_well_ops *ops;
@@ -1074,6 +991,7 @@ struct i915_power_domains {
* time are on. They are kept on until after the first modeset.
*/
bool init_power_on;
+ bool initializing;
int power_well_count;
struct mutex lock;
@@ -1132,7 +1050,8 @@ struct i915_gem_mm {
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
- struct shrinker inactive_shrinker;
+ struct notifier_block oom_notifier;
+ struct shrinker shrinker;
bool shrinker_no_lock_stealing;
/** LRU list of objects with fence regs on them. */
@@ -1170,6 +1089,9 @@ struct i915_gem_mm {
*/
bool busy;
+ /* the indicator for dispatch video commands on two BSD rings */
+ int bsd_ring_dispatch_index;
+
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@@ -1245,8 +1167,12 @@ struct i915_gpu_error {
*/
wait_queue_head_t reset_queue;
- /* For gpu hang simulation. */
- unsigned int stop_rings;
+ /* Userspace knobs for gpu hang simulation;
+ * combines both a ring mask, and extra flags
+ */
+ u32 stop_rings;
+#define I915_STOP_RING_ALLOW_BAN (1 << 31)
+#define I915_STOP_RING_ALLOW_WARN (1 << 30)
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
@@ -1266,6 +1192,12 @@ struct ddi_vbt_port_info {
uint8_t supports_dp:1;
};
+enum drrs_support_type {
+ DRRS_NOT_SUPPORTED = 0,
+ STATIC_DRRS_SUPPORT = 1,
+ SEAMLESS_DRRS_SUPPORT = 2
+};
+
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1278,9 +1210,12 @@ struct intel_vbt_data {
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
+ unsigned int has_mipi:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ enum drrs_support_type drrs_type;
+
/* eDP */
int edp_rate;
int edp_lanes;
@@ -1299,7 +1234,14 @@ struct intel_vbt_data {
/* MIPI DSI */
struct {
+ u16 port;
u16 panel_id;
+ struct mipi_config *config;
+ struct mipi_pps_data *pps;
+ u8 seq_version;
+ u32 size;
+ u8 *data;
+ u8 *sequence[MIPI_SEQ_MAX];
} dsi;
int crt_ddc_pin;
@@ -1351,23 +1293,13 @@ struct ilk_wm_values {
* goes back to false exactly before we reenable the IRQs. We use this variable
* to check if someone is trying to enable/disable IRQs while they're supposed
* to be disabled. This shouldn't happen and we'll print some error messages in
- * case it happens, but if it actually happens we'll also update the variables
- * inside struct regsave so when we restore the IRQs they will contain the
- * latest expected values.
+ * case it happens.
*
* For more, read the Documentation/power/runtime_pm.txt.
*/
struct i915_runtime_pm {
bool suspended;
bool irqs_disabled;
-
- struct {
- uint32_t deimr;
- uint32_t sdeimr;
- uint32_t gtimr;
- uint32_t gtier;
- uint32_t gen6_pmimr;
- } regsave;
};
enum intel_pipe_crc_source {
@@ -1400,7 +1332,7 @@ struct intel_pipe_crc {
wait_queue_head_t wq;
};
-typedef struct drm_i915_private {
+struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1424,10 +1356,13 @@ typedef struct drm_i915_private {
*/
uint32_t gpio_mmio_base;
+ /* MMIO base address for MIPI regs */
+ uint32_t mipi_mmio_base;
+
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
- struct intel_ring_buffer ring[I915_NUM_RINGS];
+ struct intel_engine_cs ring[I915_NUM_RINGS];
uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -1469,6 +1404,7 @@ typedef struct drm_i915_private {
struct timer_list hotplug_reenable_timer;
struct i915_fbc fbc;
+ struct i915_drrs drrs;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
@@ -1486,6 +1422,7 @@ typedef struct drm_i915_private {
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
+ unsigned int vlv_cdclk_freq;
/**
* wq - Driver workqueue for GEM.
@@ -1509,9 +1446,12 @@ typedef struct drm_i915_private {
struct mutex modeset_restore_lock;
struct list_head vm_list; /* Global list of all address spaces */
- struct i915_gtt gtt; /* VMA representing the global address space */
+ struct i915_gtt gtt; /* VM representing the global address space */
struct i915_gem_mm mm;
+#if defined(CONFIG_MMU_NOTIFIER)
+ DECLARE_HASHTABLE(mmu_notifiers, 7);
+#endif
/* Kernel Modesetting */
@@ -1580,6 +1520,7 @@ typedef struct drm_i915_private {
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
+ struct vlv_s0ix_state vlv_s0ix_state;
struct {
/*
@@ -1605,7 +1546,12 @@ typedef struct drm_i915_private {
struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
-} drm_i915_private_t;
+
+ /*
+ * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
+ * will be rejected. Instead look for a better place.
+ */
+};
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
@@ -1642,6 +1588,8 @@ struct drm_i915_gem_object_ops {
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
+ int (*dmabuf_export)(struct drm_i915_gem_object *);
+ void (*release)(struct drm_i915_gem_object *);
};
struct drm_i915_gem_object {
@@ -1732,7 +1680,7 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_read_seqno;
@@ -1755,8 +1703,20 @@ struct drm_i915_gem_object {
/** for phy allocated objects */
drm_dma_handle_t *phys_handle;
-};
+ union {
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+ unsigned workers :4;
+#define I915_GEM_USERPTR_MAX_WORKERS 15
+
+ struct mm_struct *mm;
+ struct i915_mmu_object *mn;
+ struct work_struct *work;
+ } userptr;
+ };
+};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
@@ -1771,7 +1731,7 @@ struct drm_i915_gem_object {
*/
struct drm_i915_gem_request {
/** On Which ring this request was generated */
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
/** GEM sequence number associated with this request. */
uint32_t seqno;
@@ -1783,7 +1743,7 @@ struct drm_i915_gem_request {
u32 tail;
/** Context related to this request */
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
/** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj;
@@ -1810,8 +1770,8 @@ struct drm_i915_file_private {
} mm;
struct idr context_idr;
- struct i915_hw_context *private_default_ctx;
atomic_t rps_wait_boost;
+ struct intel_engine_cs *bsd_ring;
};
/*
@@ -1879,11 +1839,17 @@ struct drm_i915_cmd_descriptor {
* the expected value, the parser rejects it. Only valid if flags has
* the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
* are valid.
+ *
+ * If the check specifies a non-zero condition_mask then the parser
+ * only performs the check when the bits specified by condition_mask
+ * are non-zero.
*/
struct {
u32 offset;
u32 mask;
u32 expected;
+ u32 condition_offset;
+ u32 condition_mask;
} bits[MAX_CMD_DESC_BITMASKS];
};
@@ -1925,8 +1891,9 @@ struct drm_i915_cmd_table {
(dev)->pdev->device == 0x0106 || \
(dev)->pdev->device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
+#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0C00)
@@ -1962,17 +1929,21 @@ struct drm_i915_cmd_table {
#define BSD_RING (1<<VCS)
#define BLT_RING (1<<BCS)
#define VEBOX_RING (1<<VECS)
-#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
-#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
-#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
-#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
+#define BSD2_RING (1<<VCS2)
+#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
+#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
+#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
+#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
+ to_i915(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
- && !IS_BROADWELL(dev))
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
+ (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
+ && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
@@ -2010,8 +1981,8 @@ struct drm_i915_cmd_table {
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
-#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
-#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
+#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
+ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2068,6 +2039,7 @@ struct i915_params {
bool prefault_disable;
bool reset;
bool disable_display;
+ bool disable_vtd_wa;
};
extern struct i915_params i915 __read_mostly;
@@ -2096,6 +2068,7 @@ extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
extern void intel_console_resume(struct work_struct *work);
@@ -2170,6 +2143,9 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_init_userptr(struct drm_device *dev);
+int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
@@ -2227,9 +2203,9 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to);
+ struct intel_engine_cs *to);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_ring_buffer *ring);
+ struct intel_engine_cs *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -2249,31 +2225,14 @@ int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-static inline bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- dev_priv->fence_regs[obj->fence_reg].pin_count++;
- return true;
- } else
- return false;
-}
-
-static inline void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
- dev_priv->fence_regs[obj->fence_reg].pin_count--;
- }
-}
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_ring_buffer *ring);
+i915_gem_find_active_request(struct intel_engine_cs *ring);
bool i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
@@ -2292,23 +2251,35 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
}
+static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gpu_error.stop_rings == 0 ||
+ dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
+}
+
+static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gpu_error.stop_rings == 0 ||
+ dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
+}
+
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
+int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_ring_buffer *ring,
+int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
-int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
@@ -2319,7 +2290,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_ring_buffer *pipelined);
+ struct intel_engine_cs *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
@@ -2416,22 +2387,22 @@ void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_ring_buffer *ring,
- struct i915_hw_context *to);
-struct i915_hw_context *
+int i915_switch_context(struct intel_engine_cs *ring,
+ struct intel_context *to);
+struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
-static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
+static inline void i915_gem_context_reference(struct intel_context *ctx)
{
kref_get(&ctx->ref);
}
-static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
+static inline void i915_gem_context_unreference(struct intel_context *ctx)
{
kref_put(&ctx->ref, i915_gem_context_free);
}
-static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
+static inline bool i915_gem_context_is_default(const struct intel_context *c)
{
return c->id == DEFAULT_CONTEXT_ID;
}
@@ -2441,6 +2412,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+/* i915_gem_render_state.c */
+int i915_gem_render_state_init(struct intel_engine_cs *ring);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm,
@@ -2453,23 +2426,12 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
-/* i915_gem_gtt.c */
-void i915_check_and_clear_faults(struct drm_device *dev);
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
-void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-void i915_gem_init_global_gtt(struct drm_device *dev);
-void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
- unsigned long mappable_end, unsigned long end);
-int i915_gem_gtt_init(struct drm_device *dev);
+/* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
-int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
-bool intel_enable_ppgtt(struct drm_device *dev, bool full);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
@@ -2537,9 +2499,11 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type);
/* i915_cmd_parser.c */
-void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
-bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
-int i915_parse_cmds(struct intel_ring_buffer *ring,
+int i915_cmd_parser_get_version(void);
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
+int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master);
@@ -2688,20 +2652,6 @@ void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
-void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
-void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
-
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
- (((reg) >= 0x2000 && (reg) < 0x4000) ||\
- ((reg) >= 0x5000 && (reg) < 0x8000) ||\
- ((reg) >= 0xB000 && (reg) < 0x12000) ||\
- ((reg) >= 0x2E000 && (reg) < 0x30000))
-
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
- (((reg) >= 0x12000 && (reg) < 0x14000) ||\
- ((reg) >= 0x22000 && (reg) < 0x24000) ||\
- ((reg) >= 0x30000 && (reg) < 0x40000))
-
#define FORCEWAKE_RENDER (1 << 0)
#define FORCEWAKE_MEDIA (1 << 1)
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3326770c9ed2..f36126383d26 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/oom.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -43,6 +44,8 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
+static void
+i915_gem_object_retire(struct drm_i915_gem_object *obj);
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj);
@@ -50,14 +53,15 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
struct shrink_control *sc);
-static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc);
+static int i915_gem_shrinker_oom(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr);
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
@@ -470,6 +474,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
+
+ i915_gem_object_retire(obj);
}
ret = i915_gem_object_get_pages(obj);
@@ -885,6 +891,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
+
+ i915_gem_object_retire(obj);
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
@@ -1088,7 +1096,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
* equal.
*/
static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
@@ -1107,7 +1115,7 @@ static void fake_irq(unsigned long data)
}
static bool missed_irq(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
@@ -1138,7 +1146,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
struct timespec *timeout,
@@ -1245,7 +1253,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
* request and object lists appropriately for that event.
*/
int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1270,9 +1278,10 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
- i915_gem_retire_requests_ring(ring);
+ if (!obj->active)
+ return 0;
/* Manually manage the write flush as we may have not yet
* retired the buffer.
@@ -1282,7 +1291,6 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
* we know we have passed the last write.
*/
obj->last_write_seqno = 0;
- obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
return 0;
}
@@ -1295,7 +1303,7 @@ static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
- struct intel_ring_buffer *ring = obj->ring;
+ struct intel_engine_cs *ring = obj->ring;
u32 seqno;
int ret;
@@ -1320,7 +1328,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = obj->ring;
+ struct intel_engine_cs *ring = obj->ring;
unsigned reset_counter;
u32 seqno;
int ret;
@@ -1536,7 +1544,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
- ret = -EINVAL;
+ ret = -EFAULT;
goto unlock;
}
@@ -1803,12 +1811,16 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+ return obj->madv == I915_MADV_DONTNEED;
+}
+
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- struct inode *inode;
-
i915_gem_object_free_mmap_offset(obj);
if (obj->base.filp == NULL)
@@ -1819,16 +1831,28 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
- inode = file_inode(obj->base.filp);
- shmem_truncate_range(inode, 0, (loff_t)-1);
-
+ shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+/* Try to discard unwanted pages */
+static void
+i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
- return obj->madv == I915_MADV_DONTNEED;
+ struct address_space *mapping;
+
+ switch (obj->madv) {
+ case I915_MADV_DONTNEED:
+ i915_gem_object_truncate(obj);
+ case __I915_MADV_PURGED:
+ return;
+ }
+
+ if (obj->base.filp == NULL)
+ return;
+
+ mapping = file_inode(obj->base.filp)->i_mapping,
+ invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
static void
@@ -1893,8 +1917,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
ops->put_pages(obj);
obj->pages = NULL;
- if (i915_gem_object_is_purgeable(obj))
- i915_gem_object_truncate(obj);
+ i915_gem_object_invalidate(obj);
return 0;
}
@@ -1903,58 +1926,58 @@ static unsigned long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
- struct list_head still_bound_list;
- struct drm_i915_gem_object *obj, *next;
+ struct list_head still_in_list;
+ struct drm_i915_gem_object *obj;
unsigned long count = 0;
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.unbound_list,
- global_list) {
- if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
- i915_gem_object_put_pages(obj) == 0) {
- count += obj->base.size >> PAGE_SHIFT;
- if (count >= target)
- return count;
- }
- }
-
/*
- * As we may completely rewrite the bound list whilst unbinding
+ * As we may completely rewrite the (un)bound list whilst unbinding
* (due to retiring requests) we have to strictly process only
* one element of the list at the time, and recheck the list
* on every iteration.
+ *
+ * In particular, we must hold a reference whilst removing the
+ * object as we may end up waiting for and/or retiring the objects.
+ * This might release the final reference (held by the active list)
+ * and result in the object being freed from under us. This is
+ * similar to the precautions the eviction code must take whilst
+ * removing objects.
+ *
+ * Also note that although these lists do not hold a reference to
+ * the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
*/
- INIT_LIST_HEAD(&still_bound_list);
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
+ obj = list_first_entry(&dev_priv->mm.unbound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
+
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
+
+ drm_gem_object_reference(&obj->base);
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, &dev_priv->mm.unbound_list);
+
+ INIT_LIST_HEAD(&still_in_list);
while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
struct i915_vma *vma, *v;
obj = list_first_entry(&dev_priv->mm.bound_list,
typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_bound_list);
+ list_move_tail(&obj->global_list, &still_in_list);
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
- /*
- * Hold a reference whilst we unbind this object, as we may
- * end up waiting for and retiring requests. This might
- * release the final reference (held by the active list)
- * and result in the object being freed from under us.
- * in this object being freed.
- *
- * Note 1: Shrinking the bound list is special since only active
- * (and hence bound objects) can contain such limbo objects, so
- * we don't need special tricks for shrinking the unbound list.
- * The only other place where we have to be careful with active
- * objects suddenly disappearing due to retiring requests is the
- * eviction code.
- *
- * Note 2: Even though the bound list doesn't hold a reference
- * to the object we can safely grab one here: The final object
- * unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
- * object on the bound_list with a reference count equals 0.
- */
drm_gem_object_reference(&obj->base);
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
@@ -1966,7 +1989,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
drm_gem_object_unreference(&obj->base);
}
- list_splice(&still_bound_list, &dev_priv->mm.bound_list);
+ list_splice(&still_in_list, &dev_priv->mm.bound_list);
return count;
}
@@ -1980,17 +2003,8 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
- struct drm_i915_gem_object *obj, *next;
- long freed = 0;
-
i915_gem_evict_everything(dev_priv->dev);
-
- list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
- global_list) {
- if (i915_gem_object_put_pages(obj) == 0)
- freed += obj->base.size >> PAGE_SHIFT;
- }
- return freed;
+ return __i915_gem_shrink(dev_priv, LONG_MAX, false);
}
static int
@@ -2094,7 +2108,19 @@ err_pages:
page_cache_release(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);
- return PTR_ERR(page);
+
+ /* shmemfs first checks if there is enough memory to allocate the page
+ * and reports ENOSPC should there be insufficient, along with the usual
+ * ENOMEM for a genuine allocation failure.
+ *
+ * We use ENOSPC in our driver to mean that we have run out of aperture
+ * space and so want to translate the error from shmemfs back to our
+ * usual understanding of ENOMEM.
+ */
+ if (PTR_ERR(page) == -ENOSPC)
+ return -ENOMEM;
+ else
+ return PTR_ERR(page);
}
/* Ensure that the associated pages are gathered from the backing storage
@@ -2131,7 +2157,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2169,7 +2195,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
list_move_tail(&vma->mm_list, &vma->vm->active_list);
return i915_gem_object_move_to_active(vma->obj, ring);
@@ -2207,11 +2233,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
WARN_ON(i915_verify_lists(dev));
}
+static void
+i915_gem_object_retire(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *ring = obj->ring;
+
+ if (ring == NULL)
+ return;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true),
+ obj->last_read_seqno))
+ i915_gem_object_move_to_inactive(obj);
+}
+
static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i, j;
/* Carefully retire all requests without writing to the rings */
@@ -2226,8 +2265,8 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
for_each_ring(ring, dev_priv, i) {
intel_ring_init_seqno(ring, seqno);
- for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
- ring->sync_seqno[j] = 0;
+ for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
+ ring->semaphore.sync_seqno[j] = 0;
}
return 0;
@@ -2277,7 +2316,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
return 0;
}
-int __i915_add_request(struct intel_ring_buffer *ring,
+int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
@@ -2382,7 +2421,7 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
}
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
- const struct i915_hw_context *ctx)
+ const struct intel_context *ctx)
{
unsigned long elapsed;
@@ -2395,8 +2434,9 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
- } else if (dev_priv->gpu_error.stop_rings == 0) {
- DRM_ERROR("gpu hanging too fast, banning!\n");
+ } else if (i915_stop_ring_allow_ban(dev_priv)) {
+ if (i915_stop_ring_allow_warn(dev_priv))
+ DRM_ERROR("gpu hanging too fast, banning!\n");
return true;
}
}
@@ -2405,7 +2445,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
}
static void i915_set_reset_status(struct drm_i915_private *dev_priv,
- struct i915_hw_context *ctx,
+ struct intel_context *ctx,
const bool guilty)
{
struct i915_ctx_hang_stats *hs;
@@ -2436,7 +2476,7 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
}
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_ring_buffer *ring)
+i915_gem_find_active_request(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
u32 completed_seqno;
@@ -2454,7 +2494,7 @@ i915_gem_find_active_request(struct intel_ring_buffer *ring)
}
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
bool ring_hung;
@@ -2473,7 +2513,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
}
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
@@ -2501,6 +2541,11 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
+
+ /* These may not have been flush before the reset, do so now */
+ kfree(ring->preallocated_lazy_request);
+ ring->preallocated_lazy_request = NULL;
+ ring->outstanding_lazy_seqno = 0;
}
void i915_gem_restore_fences(struct drm_device *dev)
@@ -2527,7 +2572,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
/*
@@ -2541,8 +2586,6 @@ void i915_gem_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_cleanup(dev_priv, ring);
- i915_gem_cleanup_ringbuffer(dev);
-
i915_gem_context_reset(dev);
i915_gem_restore_fences(dev);
@@ -2551,8 +2594,8 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
*/
-static void
-i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+void
+i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
uint32_t seqno;
@@ -2597,7 +2640,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
* of tail of the request to update the last known position
* of the GPU head.
*/
- ring->last_retired_head = request->tail;
+ ring->buffer->last_retired_head = request->tail;
i915_gem_free_request(request);
}
@@ -2615,7 +2658,7 @@ bool
i915_gem_retire_requests(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
bool idle = true;
int i;
@@ -2709,7 +2752,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct intel_ring_buffer *ring = NULL;
+ struct intel_engine_cs *ring = NULL;
struct timespec timeout_stack, *timeout = NULL;
unsigned reset_counter;
u32 seqno = 0;
@@ -2780,9 +2823,9 @@ out:
*/
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to)
+ struct intel_engine_cs *to)
{
- struct intel_ring_buffer *from = obj->ring;
+ struct intel_engine_cs *from = obj->ring;
u32 seqno;
int ret, idx;
@@ -2795,7 +2838,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to);
seqno = obj->last_read_seqno;
- if (seqno <= from->sync_seqno[idx])
+ if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
ret = i915_gem_check_olr(obj->ring, seqno);
@@ -2803,13 +2846,13 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
return ret;
trace_i915_gem_ring_sync_to(from, to, seqno);
- ret = to->sync_to(to, from, seqno);
+ ret = to->semaphore.sync_to(to, from, seqno);
if (!ret)
/* We use last_read_seqno because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
- from->sync_seqno[idx] = obj->last_read_seqno;
+ from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
return ret;
}
@@ -2865,12 +2908,14 @@ int i915_vma_unbind(struct i915_vma *vma)
* cause memory corruption through use-after-free.
*/
- i915_gem_object_finish_gtt(obj);
+ if (i915_is_ggtt(vma->vm)) {
+ i915_gem_object_finish_gtt(obj);
- /* release the fence reg _after_ flushing */
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- return ret;
+ /* release the fence reg _after_ flushing */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
trace_i915_vma_unbind(vma);
@@ -2903,7 +2948,7 @@ int i915_vma_unbind(struct i915_vma *vma)
int i915_gpu_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i;
/* Flush everything onto the inactive list. */
@@ -3144,6 +3189,9 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
fence = &dev_priv->fence_regs[obj->fence_reg];
+ if (WARN_ON(fence->pin_count))
+ return -EBUSY;
+
i915_gem_object_fence_lost(obj);
i915_gem_object_update_fence(obj, fence, false);
@@ -3548,6 +3596,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
+ i915_gem_object_retire(obj);
i915_gem_object_flush_cpu_write_domain(obj, false);
/* Serialise direct access to this object with the barriers for
@@ -3646,6 +3695,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* in obj->write_domain and have been skipping the clflushes.
* Just set it to the CPU cache for now.
*/
+ i915_gem_object_retire(obj);
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
old_read_domains = obj->base.read_domains;
@@ -3743,6 +3793,15 @@ unlock:
static bool is_pin_display(struct drm_i915_gem_object *obj)
{
+ struct i915_vma *vma;
+
+ if (list_empty(&obj->vma_list))
+ return false;
+
+ vma = i915_gem_obj_to_ggtt(obj);
+ if (!vma)
+ return false;
+
/* There are 3 sources that pin objects:
* 1. The display engine (scanouts, sprites, cursors);
* 2. Reservations for execbuffer;
@@ -3754,7 +3813,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
* subtracting the potential reference by the user, any pin_count
* remains, it must be due to another use by the display engine.
*/
- return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
+ return vma->pin_count - !!obj->user_pin_count;
}
/*
@@ -3765,9 +3824,10 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_ring_buffer *pipelined)
+ struct intel_engine_cs *pipelined)
{
u32 old_read_domains, old_write_domain;
+ bool was_pin_display;
int ret;
if (pipelined != obj->ring) {
@@ -3779,6 +3839,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
+ was_pin_display = obj->pin_display;
obj->pin_display = true;
/* The display engine is not coherent with the LLC cache on gen6. As
@@ -3821,7 +3882,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return 0;
err_unpin_display:
- obj->pin_display = is_pin_display(obj);
+ WARN_ON(was_pin_display != is_pin_display(obj));
+ obj->pin_display = was_pin_display;
return ret;
}
@@ -3868,6 +3930,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
+ i915_gem_object_retire(obj);
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -3917,7 +3980,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
- struct intel_ring_buffer *ring = NULL;
+ struct intel_engine_cs *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret;
@@ -3976,9 +4039,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
uint64_t flags)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
int ret;
+ if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
+ return -ENODEV;
+
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
@@ -4032,6 +4099,32 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
obj->pin_mappable = false;
}
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+ WARN_ON(!ggtt_vma ||
+ dev_priv->fence_regs[obj->fence_reg].pin_count >
+ ggtt_vma->pin_count);
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -4292,6 +4385,30 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return obj;
}
+static bool discard_backing_storage(struct drm_i915_gem_object *obj)
+{
+ /* If we are the last user of the backing storage (be it shmemfs
+ * pages or stolen etc), we know that the pages are going to be
+ * immediately released. In this case, we can then skip copying
+ * back the contents from the GPU.
+ */
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ return false;
+
+ if (obj->base.filp == NULL)
+ return true;
+
+ /* At first glance, this looks racy, but then again so would be
+ * userspace racing mmap against close. However, the first external
+ * reference to the filp can only be obtained through the
+ * i915_gem_mmap_ioctl() which safeguards us against the user
+ * acquiring such a reference whilst we are in the middle of
+ * freeing the object.
+ */
+ return atomic_long_read(&obj->base.filp->f_count) == 1;
+}
+
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4329,6 +4446,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
+ if (discard_backing_storage(obj))
+ obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
i915_gem_object_release_stolen(obj);
@@ -4338,6 +4457,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
+ if (obj->ops->release)
+ obj->ops->release(obj);
+
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -4371,6 +4493,17 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
kfree(vma);
}
+static void
+i915_gem_stop_ringbuffers(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ intel_stop_ring_buffer(ring);
+}
+
int
i915_gem_suspend(struct drm_device *dev)
{
@@ -4392,7 +4525,7 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_evict_everything(dev);
i915_kernel_lost_context(dev);
- i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_stop_ringbuffers(dev);
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
@@ -4413,7 +4546,7 @@ err:
return ret;
}
-int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
+int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4512,13 +4645,20 @@ static int i915_gem_init_rings(struct drm_device *dev)
goto cleanup_blt_ring;
}
+ if (HAS_BSD2(dev)) {
+ ret = intel_init_bsd2_ring_buffer(dev);
+ if (ret)
+ goto cleanup_vebox_ring;
+ }
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
- goto cleanup_vebox_ring;
+ goto cleanup_bsd2_ring;
return 0;
+cleanup_bsd2_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
cleanup_vebox_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
cleanup_blt_ring:
@@ -4576,15 +4716,11 @@ i915_gem_init_hw(struct drm_device *dev)
* the do_switch), but before enabling PPGTT. So don't move this.
*/
ret = i915_gem_context_enable(dev_priv);
- if (ret) {
+ if (ret && ret != -EIO) {
DRM_ERROR("Context enable failed %d\n", ret);
- goto err_out;
+ i915_gem_cleanup_ringbuffer(dev);
}
- return 0;
-
-err_out:
- i915_gem_cleanup_ringbuffer(dev);
return ret;
}
@@ -4597,11 +4733,13 @@ int i915_gem_init(struct drm_device *dev)
if (IS_VALLEYVIEW(dev)) {
/* VLVA0 (potential hack), BIOS isn't actually waking us */
- I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
- if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
+ if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
+ VLV_GTLC_ALLOWWAKEACK), 10))
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
+ i915_gem_init_userptr(dev);
i915_gem_init_global_gtt(dev);
ret = i915_gem_context_init(dev);
@@ -4611,25 +4749,28 @@ int i915_gem_init(struct drm_device *dev)
}
ret = i915_gem_init_hw(dev);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- WARN_ON(dev_priv->mm.aliasing_ppgtt);
- i915_gem_context_fini(dev);
- drm_mm_takedown(&dev_priv->gtt.base.mm);
- return ret;
+ if (ret == -EIO) {
+ /* Allow ring initialisation to fail by marking the GPU as
+ * wedged. But we only want to do this where the GPU is angry,
+ * for all other failure, such as an allocation failure, bail.
+ */
+ DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+ atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ ret = 0;
}
+ mutex_unlock(&dev->struct_mutex);
/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->dri1.allow_batchbuffer = 1;
- return 0;
+ return ret;
}
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
for_each_ring(ring, dev_priv, i)
@@ -4661,16 +4802,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
- mutex_unlock(&dev->struct_mutex);
- ret = drm_irq_install(dev);
+ ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_ringbuffer;
+ mutex_unlock(&dev->struct_mutex);
return 0;
cleanup_ringbuffer:
- mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
@@ -4685,7 +4825,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
+ mutex_lock(&dev->struct_mutex);
drm_irq_uninstall(dev);
+ mutex_unlock(&dev->struct_mutex);
return i915_gem_suspend(dev);
}
@@ -4704,7 +4846,7 @@ i915_gem_lastclose(struct drm_device *dev)
}
static void
-init_ring_lists(struct intel_ring_buffer *ring)
+init_ring_lists(struct intel_engine_cs *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
@@ -4752,7 +4894,7 @@ i915_gem_load(struct drm_device *dev)
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- if (IS_GEN3(dev)) {
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
@@ -4779,10 +4921,13 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
- dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
- dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&dev_priv->mm.inactive_shrinker);
+ dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.shrinker);
+
+ dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ register_oom_notifier(&dev_priv->mm.oom_notifier);
}
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4857,27 +5002,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+
+ if (to_i915(dev)->mm.shrinker_no_lock_stealing)
+ return false;
+
+ *unlock = false;
+ } else
+ *unlock = true;
+
+ return true;
+}
+
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
+
+ return count;
+}
+
static unsigned long
-i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
+i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
- container_of(shrinker,
- struct drm_i915_private,
- mm.inactive_shrinker);
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
- bool unlock = true;
unsigned long count;
+ bool unlock;
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return 0;
-
- if (dev_priv->mm.shrinker_no_lock_stealing)
- return 0;
-
- unlock = false;
- }
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return 0;
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
@@ -4885,10 +5049,8 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (obj->active)
- continue;
-
- if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
+ if (!i915_gem_obj_is_pinned(obj) &&
+ obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@@ -4961,44 +5123,99 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
}
static unsigned long
-i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
+i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
- container_of(shrinker,
- struct drm_i915_private,
- mm.inactive_shrinker);
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
unsigned long freed;
- bool unlock = true;
+ bool unlock;
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return SHRINK_STOP;
-
- if (dev_priv->mm.shrinker_no_lock_stealing)
- return SHRINK_STOP;
-
- unlock = false;
- }
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
if (freed < sc->nr_to_scan)
freed += __i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed,
false);
- if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink_all(dev_priv);
-
if (unlock)
mutex_unlock(&dev->struct_mutex);
return freed;
}
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long timeout = msecs_to_jiffies(5000) + 1;
+ unsigned long pinned, bound, unbound, freed;
+ bool was_interruptible;
+ bool unlock;
+
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+ schedule_timeout_killable(1);
+ if (timeout == 0) {
+ pr_err("Unable to purge GPU memory due lock contention.\n");
+ return NOTIFY_DONE;
+ }
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ freed = i915_gem_shrink_all(dev_priv);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ /* Because we may be allocating inside our own driver, we cannot
+ * assert that there are no objects with pinned pages that are not
+ * being pointed to by hardware.
+ */
+ unbound = bound = pinned = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (!obj->base.filp) /* not backed by a freeable object */
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ unbound += obj->base.size;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!obj->base.filp)
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ bound += obj->base.size;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed, pinned);
+ if (unbound || bound)
+ pr_err("%lu and %lu bytes still available in the "
+ "bound and unbound GPU page lists.\n",
+ bound, unbound);
+
+ *(unsigned long *)ptr += freed;
+ return NOTIFY_DONE;
+}
+
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
+ /* This WARN has probably outlived its usefulness (callers already
+ * WARN if they don't find the GGTT vma they expect). When removing,
+ * remember to remove the pre-check in is_pin_display() as well */
if (WARN_ON(list_empty(&obj->vma_list)))
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d72db15afa02..a5ddf3bce9c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -178,7 +178,7 @@ static int get_context_size(struct drm_device *dev)
void i915_gem_context_free(struct kref *ctx_ref)
{
- struct i915_hw_context *ctx = container_of(ctx_ref,
+ struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
@@ -199,7 +199,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
}
static struct i915_hw_ppgtt *
-create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
+create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
@@ -218,12 +218,12 @@ create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
return ppgtt;
}
-static struct i915_hw_context *
+static struct intel_context *
__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -240,7 +240,15 @@ __create_hw_context(struct drm_device *dev,
goto err_out;
}
- if (INTEL_INFO(dev)->gen >= 7) {
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ */
+ if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
@@ -277,14 +285,14 @@ err_out:
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
-static struct i915_hw_context *
+static struct intel_context *
i915_gem_create_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv,
bool create_vm)
{
const bool is_global_default_ctx = file_priv == NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -356,8 +364,8 @@ void i915_gem_context_reset(struct drm_device *dev)
/* Prevent the hardware from restoring the last context (which hung) on
* the next switch */
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
- struct i915_hw_context *dctx = ring->default_context;
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
+ struct intel_context *dctx = ring->default_context;
/* Do a fake switch to the default context */
if (ring->last_context == dctx)
@@ -383,7 +391,7 @@ void i915_gem_context_reset(struct drm_device *dev)
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int i;
/* Init should only be called once per module load. Eventually the
@@ -418,7 +426,7 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
+ struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i;
if (dctx->obj) {
@@ -441,10 +449,12 @@ void i915_gem_context_fini(struct drm_device *dev)
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
+
+ i915_gem_object_ggtt_unpin(dctx->obj);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
@@ -453,13 +463,12 @@ void i915_gem_context_fini(struct drm_device *dev)
ring->last_context = NULL;
}
- i915_gem_object_ggtt_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
}
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i;
/* This is the only place the aliasing PPGTT gets enabled, which means
@@ -486,11 +495,7 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
static int context_idr_cleanup(int id, void *p, void *data)
{
- struct i915_hw_context *ctx = p;
-
- /* Ignore the default context because close will handle it */
- if (i915_gem_context_is_default(ctx))
- return 0;
+ struct intel_context *ctx = p;
i915_gem_context_unreference(ctx);
return 0;
@@ -499,17 +504,17 @@ static int context_idr_cleanup(int id, void *p, void *data)
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct intel_context *ctx;
idr_init(&file_priv->context_idr);
mutex_lock(&dev->struct_mutex);
- file_priv->private_default_ctx =
- i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+ ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
mutex_unlock(&dev->struct_mutex);
- if (IS_ERR(file_priv->private_default_ctx)) {
+ if (IS_ERR(ctx)) {
idr_destroy(&file_priv->context_idr);
- return PTR_ERR(file_priv->private_default_ctx);
+ return PTR_ERR(ctx);
}
return 0;
@@ -521,16 +526,14 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
-
- i915_gem_context_unreference(file_priv->private_default_ctx);
}
-struct i915_hw_context *
+struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
- ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+ ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
if (!ctx)
return ERR_PTR(-ENOENT);
@@ -538,8 +541,8 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
}
static inline int
-mi_set_context(struct intel_ring_buffer *ring,
- struct i915_hw_context *new_context,
+mi_set_context(struct intel_engine_cs *ring,
+ struct intel_context *new_context,
u32 hw_flags)
{
int ret;
@@ -549,7 +552,7 @@ mi_set_context(struct intel_ring_buffer *ring,
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
- if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
+ if (IS_GEN6(ring->dev)) {
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
@@ -559,8 +562,8 @@ mi_set_context(struct intel_ring_buffer *ring,
if (ret)
return ret;
- /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
- if (IS_GEN7(ring->dev))
+ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
+ if (INTEL_INFO(ring->dev)->gen >= 7)
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
intel_ring_emit(ring, MI_NOOP);
@@ -578,7 +581,7 @@ mi_set_context(struct intel_ring_buffer *ring,
*/
intel_ring_emit(ring, MI_NOOP);
- if (IS_GEN7(ring->dev))
+ if (INTEL_INFO(ring->dev)->gen >= 7)
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
else
intel_ring_emit(ring, MI_NOOP);
@@ -588,13 +591,14 @@ mi_set_context(struct intel_ring_buffer *ring,
return ret;
}
-static int do_switch(struct intel_ring_buffer *ring,
- struct i915_hw_context *to)
+static int do_switch(struct intel_engine_cs *ring,
+ struct intel_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct i915_hw_context *from = ring->last_context;
+ struct intel_context *from = ring->last_context;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
+ bool uninitialized = false;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -693,6 +697,7 @@ static int do_switch(struct intel_ring_buffer *ring,
i915_gem_context_unreference(from);
}
+ uninitialized = !to->is_initialized && from == NULL;
to->is_initialized = true;
done:
@@ -700,6 +705,12 @@ done:
ring->last_context = to;
to->last_ring = ring;
+ if (uninitialized) {
+ ret = i915_gem_render_state_init(ring);
+ if (ret)
+ DRM_ERROR("init render state: %d\n", ret);
+ }
+
return 0;
unpin_out:
@@ -718,8 +729,8 @@ unpin_out:
* it will have a refoucnt > 1. This allows us to destroy the context abstract
* object while letting the normal object tracking destroy the backing BO.
*/
-int i915_switch_context(struct intel_ring_buffer *ring,
- struct i915_hw_context *to)
+int i915_switch_context(struct intel_engine_cs *ring,
+ struct intel_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -748,7 +759,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret;
if (!hw_context_enabled(dev))
@@ -774,7 +785,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_context_destroy *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret;
if (args->ctx_id == DEFAULT_CONTEXT_ID)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 9bb533e0d762..580aa42443ed 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -161,12 +161,8 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- int ret;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return;
+ mutex_lock(&dev->struct_mutex);
if (--obj->vmapping_count == 0) {
vunmap(obj->dma_buf_vmapping);
obj->dma_buf_vmapping = NULL;
@@ -233,6 +229,14 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ if (obj->ops->dmabuf_export) {
+ int ret = obj->ops->dmabuf_export(obj);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 20fef6c50267..3a30133f93e8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -265,10 +265,12 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
- struct drm_i915_gem_relocation_entry *reloc)
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint64_t target_offset)
{
struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
+ uint64_t delta = reloc->delta + target_offset;
char *vaddr;
int ret;
@@ -278,7 +280,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
reloc->offset >> PAGE_SHIFT));
- *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
if (INTEL_INFO(dev)->gen >= 8) {
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
@@ -289,7 +291,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
}
- *(uint32_t *)(vaddr + page_offset) = 0;
+ *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
}
kunmap_atomic(vaddr);
@@ -299,10 +301,12 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
- struct drm_i915_gem_relocation_entry *reloc)
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint64_t target_offset)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint64_t delta = reloc->delta + target_offset;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
int ret;
@@ -321,7 +325,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + offset_in_page(reloc->offset));
- iowrite32(reloc->delta, reloc_entry);
+ iowrite32(lower_32_bits(delta), reloc_entry);
if (INTEL_INFO(dev)->gen >= 8) {
reloc_entry += 1;
@@ -334,7 +338,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
reloc_entry = reloc_page;
}
- iowrite32(0, reloc_entry);
+ iowrite32(upper_32_bits(delta), reloc_entry);
}
io_mapping_unmap_atomic(reloc_page);
@@ -351,7 +355,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
- uint32_t target_offset;
+ uint64_t target_offset;
int ret;
/* we've already hold a reference to all valid objects */
@@ -429,11 +433,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (obj->active && in_atomic())
return -EFAULT;
- reloc->delta += target_offset;
if (use_cpu_reloc(obj))
- ret = relocate_entry_cpu(obj, reloc);
+ ret = relocate_entry_cpu(obj, reloc, target_offset);
else
- ret = relocate_entry_gtt(obj, reloc);
+ ret = relocate_entry_gtt(obj, reloc, target_offset);
if (ret)
return ret;
@@ -541,7 +544,7 @@ need_reloc_mappable(struct i915_vma *vma)
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool *need_reloc)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -628,7 +631,7 @@ eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
}
static int
-i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct list_head *vmas,
bool *need_relocs)
{
@@ -642,6 +645,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (list_empty(vmas))
return 0;
+ i915_gem_retire_requests_ring(ring);
+
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
INIT_LIST_HEAD(&ordered_vmas);
@@ -727,7 +732,7 @@ static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec)
{
@@ -843,7 +848,7 @@ err:
}
static int
-i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct list_head *vmas)
{
struct i915_vma *vma;
@@ -926,11 +931,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
return 0;
}
-static struct i915_hw_context *
+static struct intel_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
- struct intel_ring_buffer *ring, const u32 ctx_id)
+ struct intel_engine_cs *ring, const u32 ctx_id)
{
- struct i915_hw_context *ctx = NULL;
+ struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
@@ -951,7 +956,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct i915_vma *vma;
@@ -974,6 +979,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (i915_gem_obj_ggtt_bound(obj) &&
i915_gem_obj_to_ggtt(obj)->pin_count)
intel_mark_fb_busy(obj, ring);
+
+ /* update for the implicit flush after a batch */
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
@@ -983,7 +991,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj)
{
/* Unconditionally force add_request to emit a full flush. */
@@ -995,13 +1003,15 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
- return 0;
+ if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("sol reset is gen7/rcs only\n");
+ return -EINVAL;
+ }
ret = intel_ring_begin(ring, 4 * 3);
if (ret)
@@ -1018,6 +1028,37 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+/**
+ * Find one BSD ring to dispatch the corresponding BSD command.
+ * The Ring ID is returned.
+ */
+static int gen8_dispatch_bsd_ring(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ /* Check whether the file_priv is using one ring */
+ if (file_priv->bsd_ring)
+ return file_priv->bsd_ring->id;
+ else {
+ /* If no, use the ping-pong mechanism to select one ring */
+ int ring_id;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
+ ring_id = VCS;
+ dev_priv->mm.bsd_ring_dispatch_index = 1;
+ } else {
+ ring_id = VCS2;
+ dev_priv->mm.bsd_ring_dispatch_index = 0;
+ }
+ file_priv->bsd_ring = &dev_priv->ring[ring_id];
+ mutex_unlock(&dev->struct_mutex);
+ return ring_id;
+ }
+}
+
static struct drm_i915_gem_object *
eb_get_batch(struct eb_vmas *eb)
{
@@ -1047,11 +1088,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
- struct intel_ring_buffer *ring;
- struct i915_hw_context *ctx;
+ struct intel_engine_cs *ring;
+ struct intel_context *ctx;
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
- u32 exec_start = args->batch_start_offset, exec_len;
+ u64 exec_start = args->batch_start_offset, exec_len;
u32 mask, flags;
int ret, mode, i;
bool need_relocs;
@@ -1073,7 +1114,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
flags |= I915_DISPATCH_PINNED;
- if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
+ if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
@@ -1081,7 +1122,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
- else
+ else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
+ if (HAS_BSD2(dev)) {
+ int ring_id;
+ ring_id = gen8_dispatch_bsd_ring(dev, file);
+ ring = &dev_priv->ring[ring_id];
+ } else
+ ring = &dev_priv->ring[VCS];
+ } else
ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
if (!intel_ring_initialized(ring)) {
@@ -1096,14 +1144,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (ring == &dev_priv->ring[RCS] &&
- mode != dev_priv->relative_constants_mode) {
- if (INTEL_INFO(dev)->gen < 4)
+ if (mode != 0 && ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+ return -EINVAL;
+ }
+
+ if (mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("no rel constants on pre-gen4\n");
return -EINVAL;
+ }
if (INTEL_INFO(dev)->gen > 5 &&
- mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+ DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
return -EINVAL;
+ }
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
@@ -1151,6 +1207,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = -EFAULT;
goto pre_mutex_err;
}
+ } else {
+ if (args->DR4 == 0xffffffff) {
+ DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+ args->DR4 = 0;
+ }
+
+ if (args->DR1 || args->DR4 || args->cliprects_ptr) {
+ DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
+ return -EINVAL;
+ }
}
intel_runtime_pm_get(dev_priv);
@@ -1170,7 +1236,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
ret = PTR_ERR(ctx);
goto pre_mutex_err;
- }
+ }
i915_gem_context_reference(ctx);
@@ -1180,6 +1246,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
eb = eb_create(args);
if (eb == NULL) {
+ i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
goto pre_mutex_err;
@@ -1430,6 +1497,11 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->rsvd2 != 0) {
+ DRM_DEBUG("dirty rvsd2 field\n");
+ return -EINVAL;
+ }
+
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5deb22864c52..8b3cde703364 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,7 +30,8 @@
#include "i915_trace.h"
#include "intel_drv.h"
-static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
bool intel_enable_ppgtt(struct drm_device *dev, bool full)
{
@@ -65,59 +66,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
}
-#define GEN6_PPGTT_PD_ENTRIES 512
-#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
-typedef uint64_t gen8_gtt_pte_t;
-typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
-
-/* PPGTT stuff */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
-
-#define GEN6_PDE_VALID (1 << 0)
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define HSW_PTE_UNCACHED (0)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
-
-/* Cacheability Control is a 4-bit value. The low three bits are stored in *
- * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
- */
-#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
- (((bits) & 0x8) << (11 - 3)))
-#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
-#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
-#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
-#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
-#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
-#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
-
-#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
-#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
-
-/* GEN8 legacy style addressis defined as a 3 level page table:
- * 31:30 | 29:21 | 20:12 | 11:0
- * PDPE | PDE | PTE | offset
- * The difference as compared to normal x86 3 level page table is the PDPEs are
- * programmed via register.
- */
-#define GEN8_PDPE_SHIFT 30
-#define GEN8_PDPE_MASK 0x3
-#define GEN8_PDE_SHIFT 21
-#define GEN8_PDE_MASK 0x1ff
-#define GEN8_PTE_SHIFT 12
-#define GEN8_PTE_MASK 0x1ff
-
-#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
-#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
-#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
-#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
static void ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
@@ -131,10 +79,19 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
{
gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
- if (level != I915_CACHE_NONE)
- pte |= PPAT_CACHED_INDEX;
- else
+
+ switch (level) {
+ case I915_CACHE_NONE:
pte |= PPAT_UNCACHED_INDEX;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC_INDEX;
+ break;
+ default:
+ pte |= PPAT_CACHED_INDEX;
+ break;
+ }
+
return pte;
}
@@ -197,9 +154,6 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
return pte;
}
-#define BYT_PTE_WRITEABLE (1 << 1)
-#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
-
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
@@ -253,7 +207,7 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
}
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
+static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val, bool synchronous)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -283,7 +237,7 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
}
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous)
{
int i, ret;
@@ -332,6 +286,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
num_entries--;
}
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
pte = 0;
@@ -368,6 +324,8 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
if (++pte == GEN8_PTES_PER_PAGE) {
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
if (++pde == GEN8_PDES_PER_PAGE) {
@@ -377,8 +335,11 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
pte = 0;
}
}
- if (pt_vaddr)
+ if (pt_vaddr) {
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
+ }
}
static void gen8_free_page_tables(struct page **pt_pages)
@@ -641,6 +602,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
+ if (!HAS_LLC(ppgtt->base.dev))
+ drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
kunmap_atomic(pd_vaddr);
}
@@ -753,7 +716,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
@@ -797,7 +760,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
@@ -848,7 +811,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
@@ -869,7 +832,7 @@ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int j, ret;
for_each_ring(ring, dev_priv, j) {
@@ -899,7 +862,7 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
uint32_t ecochk, ecobits;
int i;
@@ -938,7 +901,7 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
uint32_t ecochk, gab_ctl, ecobits;
int i;
@@ -1067,8 +1030,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
-#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
-#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool retried = false;
@@ -1084,8 +1045,7 @@ alloc:
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
0, dev_priv->gtt.base.total,
- DRM_MM_SEARCH_DEFAULT,
- DRM_MM_CREATE_DEFAULT);
+ DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
@@ -1311,7 +1271,7 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
if (INTEL_INFO(dev)->gen < 6)
@@ -1386,7 +1346,11 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 8) {
- gen8_setup_private_ppat(dev_priv);
+ if (IS_CHERRYVIEW(dev))
+ chv_setup_private_ppat(dev_priv);
+ else
+ bdw_setup_private_ppat(dev_priv);
+
return;
}
@@ -1438,7 +1402,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_dma_address(sg_iter.sg) +
@@ -1811,9 +1775,27 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
return bdw_gmch_ctl << 20;
}
+static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
@@ -1828,6 +1810,24 @@ static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
return bdw_gmch_ctl << 25; /* 32 MB units */
}
+static size_t chv_get_stolen_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GMS_MASK;
+
+ /*
+ * 0x0 to 0x10: 32MB increments starting at 0MB
+ * 0x11 to 0x16: 4MB increments starting at 8MB
+ * 0x17 to 0x1d: 4MB increments start at 36MB
+ */
+ if (gmch_ctrl < 0x11)
+ return gmch_ctrl << 25;
+ else if (gmch_ctrl < 0x17)
+ return (gmch_ctrl - 0x11 + 2) << 22;
+ else
+ return (gmch_ctrl - 0x17 + 9) << 22;
+}
+
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
@@ -1858,19 +1858,8 @@ static int ggtt_probe_common(struct drm_device *dev,
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
-static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
-{
-#define GEN8_PPAT_UC (0<<0)
-#define GEN8_PPAT_WC (1<<0)
-#define GEN8_PPAT_WT (2<<0)
-#define GEN8_PPAT_WB (3<<0)
-#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
-/* FIXME(BDW): Bspec is completely confused about cache control bits. */
-#define GEN8_PPAT_LLC (1<<2)
-#define GEN8_PPAT_LLCELLC (2<<2)
-#define GEN8_PPAT_LLCeLLC (3<<2)
-#define GEN8_PPAT_AGE(x) (x<<4)
-#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
uint64_t pat;
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
@@ -1888,6 +1877,33 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+ uint64_t pat;
+
+ /*
+ * Map WB on BDW to snooped on CHV.
+ *
+ * Only the snoop bit has meaning for CHV, the rest is
+ * ignored.
+ *
+ * Note that the harware enforces snooping for all page
+ * table accesses. The snoop bit is actually ignored for
+ * PDEs.
+ */
+ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, 0) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ I915_WRITE(GEN8_PRIVATE_PAT, pat);
+ I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
+}
+
static int gen8_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
@@ -1908,12 +1924,20 @@ static int gen8_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- *stolen = gen8_get_stolen_size(snb_gmch_ctl);
+ if (IS_CHERRYVIEW(dev)) {
+ *stolen = chv_get_stolen_size(snb_gmch_ctl);
+ gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
+ } else {
+ *stolen = gen8_get_stolen_size(snb_gmch_ctl);
+ gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ }
- gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
- gen8_setup_private_ppat(dev_priv);
+ if (IS_CHERRYVIEW(dev))
+ chv_setup_private_ppat(dev_priv);
+ else
+ bdw_setup_private_ppat(dev_priv);
ret = ggtt_probe_common(dev, gtt_size);
@@ -1968,7 +1992,10 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
- drm_mm_takedown(&vm->mm);
+ if (drm_mm_initialized(&vm->mm)) {
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+ }
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
@@ -2001,6 +2028,10 @@ static int i915_gmch_probe(struct drm_device *dev,
static void i915_gmch_remove(struct i915_address_space *vm)
{
+ if (drm_mm_initialized(&vm->mm)) {
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+ }
intel_gmch_remove();
}
@@ -2043,6 +2074,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
gtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped)
+ DRM_INFO("VT-d active for gfx access\n");
+#endif
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
new file mode 100644
index 000000000000..1b96a06be3cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Please try to maintain the following order within this file unless it makes
+ * sense to do otherwise. From top to bottom:
+ * 1. typedefs
+ * 2. #defines, and macros
+ * 3. structure definitions
+ * 4. function prototypes
+ *
+ * Within each section, please try to order by generation in ascending order,
+ * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ */
+
+#ifndef __I915_GEM_GTT_H__
+#define __I915_GEM_GTT_H__
+
+typedef uint32_t gen6_gtt_pte_t;
+typedef uint64_t gen8_gtt_pte_t;
+typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
+
+#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
+
+#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define GEN6_PTE_VALID (1 << 0)
+
+#define GEN6_PPGTT_PD_ENTRIES 512
+#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
+#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_VALID (1 << 0)
+
+#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
+
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
+#define BYT_PTE_WRITEABLE (1 << 1)
+
+/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
+ * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+ (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
+#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
+#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_PTE_UNCACHED (0)
+#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
+#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
+
+/* GEN8 legacy style address is defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 | 11:0
+ * PDPE | PDE | PTE | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ */
+#define GEN8_PDPE_SHIFT 30
+#define GEN8_PDPE_MASK 0x3
+#define GEN8_PDE_SHIFT 21
+#define GEN8_PDE_MASK 0x1ff
+#define GEN8_PTE_SHIFT 12
+#define GEN8_PTE_MASK 0x1ff
+#define GEN8_LEGACY_PDPS 4
+#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
+#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+
+#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
+#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
+
+#define CHV_PPAT_SNOOP (1<<6)
+#define GEN8_PPAT_AGE(x) (x<<4)
+#define GEN8_PPAT_LLCeLLC (3<<2)
+#define GEN8_PPAT_LLCELLC (2<<2)
+#define GEN8_PPAT_LLC (1<<2)
+#define GEN8_PPAT_WB (3<<0)
+#define GEN8_PPAT_WT (2<<0)
+#define GEN8_PPAT_WC (1<<0)
+#define GEN8_PPAT_UC (0<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
+#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+
+enum i915_cache_level;
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head mm_list;
+
+ struct list_head vma_link; /* Link in the object's VMA list */
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, pin_ioctl
+ * (via user_pin_count), execbuffer (objects are not allowed multiple
+ * times for the same batchbuffer), and the framebuffer code. When
+ * switching/pageflipping, the framebuffer code has at most two buffers
+ * pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits. */
+ unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+ /** Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page. */
+ void (*unbind_vma)(struct i915_vma *vma);
+ /* Map an object into an address space with the given cache flags. */
+#define GLOBAL_BIND (1<<0)
+ void (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+};
+
+struct i915_address_space {
+ struct drm_mm mm;
+ struct drm_device *dev;
+ struct list_head global_link;
+ unsigned long start; /* Start offset always 0 for dri2 */
+ size_t total; /* size addr space maps (ex. 2GB for ggtt) */
+
+ struct {
+ dma_addr_t addr;
+ struct page *page;
+ } scratch;
+
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /* FIXME: Need a more generic return type */
+ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid); /* Create a valid PTE */
+ void (*clear_range)(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch);
+ void (*insert_entries)(struct i915_address_space *vm,
+ struct sg_table *st,
+ uint64_t start,
+ enum i915_cache_level cache_level);
+ void (*cleanup)(struct i915_address_space *vm);
+};
+
+/* The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_gtt {
+ struct i915_address_space base;
+ size_t stolen_size; /* Total size of stolen memory */
+
+ unsigned long mappable_end; /* End offset that we can CPU map */
+ struct io_mapping *mappable; /* Mapping to our CPU mappable region */
+ phys_addr_t mappable_base; /* PA of our GMADR */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+
+ bool do_idle_maps;
+
+ int mtrr;
+
+ /* global gtt ops */
+ int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+ size_t *stolen, phys_addr_t *mappable_base,
+ unsigned long *mappable_end);
+};
+
+struct i915_hw_ppgtt {
+ struct i915_address_space base;
+ struct kref ref;
+ struct drm_mm_node node;
+ unsigned num_pd_entries;
+ unsigned num_pd_pages; /* gen8+ */
+ union {
+ struct page **pt_pages;
+ struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
+ };
+ struct page *pd_pages;
+ union {
+ uint32_t pd_offset;
+ dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
+ };
+ union {
+ dma_addr_t *pt_dma_addr;
+ dma_addr_t *gen8_pt_dma_addr[4];
+ };
+
+ struct intel_context *ctx;
+
+ int (*enable)(struct i915_hw_ppgtt *ppgtt);
+ int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring,
+ bool synchronous);
+ void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_init_global_gtt(struct drm_device *dev);
+void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+ unsigned long mappable_end, unsigned long end);
+
+bool intel_enable_ppgtt(struct drm_device *dev, bool full);
+int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
+
+void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
new file mode 100644
index 000000000000..3521f998a178
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_renderstate.h"
+
+struct i915_render_state {
+ struct drm_i915_gem_object *obj;
+ unsigned long ggtt_offset;
+ void *batch;
+ u32 size;
+ u32 len;
+};
+
+static struct i915_render_state *render_state_alloc(struct drm_device *dev)
+{
+ struct i915_render_state *so;
+ struct page *page;
+ int ret;
+
+ so = kzalloc(sizeof(*so), GFP_KERNEL);
+ if (!so)
+ return ERR_PTR(-ENOMEM);
+
+ so->obj = i915_gem_alloc_object(dev, 4096);
+ if (so->obj == NULL) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ so->size = 4096;
+
+ ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+ if (ret)
+ goto free_gem;
+
+ BUG_ON(so->obj->pages->nents != 1);
+ page = sg_page(so->obj->pages->sgl);
+
+ so->batch = kmap(page);
+ if (!so->batch) {
+ ret = -ENOMEM;
+ goto unpin;
+ }
+
+ so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+
+ return so;
+unpin:
+ i915_gem_object_ggtt_unpin(so->obj);
+free_gem:
+ drm_gem_object_unreference(&so->obj->base);
+free:
+ kfree(so);
+ return ERR_PTR(ret);
+}
+
+static void render_state_free(struct i915_render_state *so)
+{
+ kunmap(so->batch);
+ i915_gem_object_ggtt_unpin(so->obj);
+ drm_gem_object_unreference(&so->obj->base);
+ kfree(so);
+}
+
+static const struct intel_renderstate_rodata *
+render_state_get_rodata(struct drm_device *dev, const int gen)
+{
+ switch (gen) {
+ case 6:
+ return &gen6_null_state;
+ case 7:
+ return &gen7_null_state;
+ case 8:
+ return &gen8_null_state;
+ }
+
+ return NULL;
+}
+
+static int render_state_setup(const int gen,
+ const struct intel_renderstate_rodata *rodata,
+ struct i915_render_state *so)
+{
+ const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
+ u32 reloc_index = 0;
+ u32 * const d = so->batch;
+ unsigned int i = 0;
+ int ret;
+
+ if (!rodata || rodata->batch_items * 4 > so->size)
+ return -EINVAL;
+
+ ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
+ if (ret)
+ return ret;
+
+ while (i < rodata->batch_items) {
+ u32 s = rodata->batch[i];
+
+ if (reloc_index < rodata->reloc_items &&
+ i * 4 == rodata->reloc[reloc_index]) {
+
+ s += goffset & 0xffffffff;
+
+ /* We keep batch offsets max 32bit */
+ if (gen >= 8) {
+ if (i + 1 >= rodata->batch_items ||
+ rodata->batch[i + 1] != 0)
+ return -EINVAL;
+
+ d[i] = s;
+ i++;
+ s = (goffset & 0xffffffff00000000ull) >> 32;
+ }
+
+ reloc_index++;
+ }
+
+ d[i] = s;
+ i++;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+ if (ret)
+ return ret;
+
+ if (rodata->reloc_items != reloc_index) {
+ DRM_ERROR("not all relocs resolved, %d out of %d\n",
+ reloc_index, rodata->reloc_items);
+ return -EINVAL;
+ }
+
+ so->len = rodata->batch_items * 4;
+
+ return 0;
+}
+
+int i915_gem_render_state_init(struct intel_engine_cs *ring)
+{
+ const int gen = INTEL_INFO(ring->dev)->gen;
+ struct i915_render_state *so;
+ const struct intel_renderstate_rodata *rodata;
+ int ret;
+
+ if (WARN_ON(ring->id != RCS))
+ return -ENOENT;
+
+ rodata = render_state_get_rodata(ring->dev, gen);
+ if (rodata == NULL)
+ return 0;
+
+ so = render_state_alloc(ring->dev);
+ if (IS_ERR(so))
+ return PTR_ERR(so);
+
+ ret = render_state_setup(gen, rodata, so);
+ if (ret)
+ goto out;
+
+ ret = ring->dispatch_execbuffer(ring,
+ i915_gem_obj_ggtt_offset(so->obj),
+ so->len,
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
+
+ ret = __i915_add_request(ring, NULL, so->obj, NULL);
+ /* __i915_add_request moves object to inactive if it fails */
+out:
+ render_state_free(so);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 62ef55ba061c..7465ab0fd396 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (base == 0)
return 0;
+ /* make sure we don't clobber the GTT if it's within stolen memory */
+ if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ struct {
+ u32 start, end;
+ } stolen[2] = {
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ };
+ u64 gtt_start, gtt_end;
+
+ gtt_start = I915_READ(PGTBL_CTL);
+ if (IS_GEN4(dev))
+ gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
+ (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+ else
+ gtt_start &= PGTBL_ADDRESS_LO_MASK;
+ gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+
+ if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
+ stolen[0].end = gtt_start;
+ if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
+ stolen[1].start = gtt_end;
+
+ /* pick the larger of the two chunks */
+ if (stolen[0].end - stolen[0].start >
+ stolen[1].end - stolen[1].start) {
+ base = stolen[0].start;
+ dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ } else {
+ base = stolen[1].start;
+ dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ }
+
+ if (stolen[0].start != stolen[1].start ||
+ stolen[0].end != stolen[1].end) {
+ DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
+ (unsigned long long) gtt_start,
+ (unsigned long long) gtt_end - 1);
+ DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
+ base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ }
+ }
+
+
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
new file mode 100644
index 000000000000..21ea92886a56
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -0,0 +1,711 @@
+/*
+ * Copyright © 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mempolicy.h>
+#include <linux/swap.h>
+
+#if defined(CONFIG_MMU_NOTIFIER)
+#include <linux/interval_tree.h>
+
+struct i915_mmu_notifier {
+ spinlock_t lock;
+ struct hlist_node node;
+ struct mmu_notifier mn;
+ struct rb_root objects;
+ struct drm_device *dev;
+ struct mm_struct *mm;
+ struct work_struct work;
+ unsigned long count;
+ unsigned long serial;
+};
+
+struct i915_mmu_object {
+ struct i915_mmu_notifier *mmu;
+ struct interval_tree_node it;
+ struct drm_i915_gem_object *obj;
+};
+
+static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
+ struct interval_tree_node *it = NULL;
+ unsigned long serial = 0;
+
+ end--; /* interval ranges are inclusive, but invalidate range is exclusive */
+ while (start < end) {
+ struct drm_i915_gem_object *obj;
+
+ obj = NULL;
+ spin_lock(&mn->lock);
+ if (serial == mn->serial)
+ it = interval_tree_iter_next(it, start, end);
+ else
+ it = interval_tree_iter_first(&mn->objects, start, end);
+ if (it != NULL) {
+ obj = container_of(it, struct i915_mmu_object, it)->obj;
+ drm_gem_object_reference(&obj->base);
+ serial = mn->serial;
+ }
+ spin_unlock(&mn->lock);
+ if (obj == NULL)
+ return;
+
+ mutex_lock(&mn->dev->struct_mutex);
+ /* Cancel any active worker and force us to re-evaluate gup */
+ obj->userptr.work = NULL;
+
+ if (obj->pages != NULL) {
+ struct drm_i915_private *dev_priv = to_i915(mn->dev);
+ struct i915_vma *vma, *tmp;
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ WARN_ON(ret && ret != -EIO);
+ }
+ WARN_ON(i915_gem_object_put_pages(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
+ start = obj->userptr.ptr + obj->base.size;
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&mn->dev->struct_mutex);
+ }
+}
+
+static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
+ .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
+};
+
+static struct i915_mmu_notifier *
+__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_mmu_notifier *mmu;
+
+ /* Protected by dev->struct_mutex */
+ hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
+ if (mmu->mm == mm)
+ return mmu;
+
+ return NULL;
+}
+
+static struct i915_mmu_notifier *
+i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_mmu_notifier *mmu;
+ int ret;
+
+ lockdep_assert_held(&dev->struct_mutex);
+
+ mmu = __i915_mmu_notifier_lookup(dev, mm);
+ if (mmu)
+ return mmu;
+
+ mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
+ if (mmu == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&mmu->lock);
+ mmu->dev = dev;
+ mmu->mn.ops = &i915_gem_userptr_notifier;
+ mmu->mm = mm;
+ mmu->objects = RB_ROOT;
+ mmu->count = 0;
+ mmu->serial = 0;
+
+ /* Protected by mmap_sem (write-lock) */
+ ret = __mmu_notifier_register(&mmu->mn, mm);
+ if (ret) {
+ kfree(mmu);
+ return ERR_PTR(ret);
+ }
+
+ /* Protected by dev->struct_mutex */
+ hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
+ return mmu;
+}
+
+static void
+__i915_mmu_notifier_destroy_worker(struct work_struct *work)
+{
+ struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
+ mmu_notifier_unregister(&mmu->mn, mmu->mm);
+ kfree(mmu);
+}
+
+static void
+__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
+{
+ lockdep_assert_held(&mmu->dev->struct_mutex);
+
+ /* Protected by dev->struct_mutex */
+ hash_del(&mmu->node);
+
+ /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
+ * We enter the function holding struct_mutex, therefore we need
+ * to drop our mutex prior to calling mmu_notifier_unregister in
+ * order to prevent lock inversion (and system-wide deadlock)
+ * between the mmap_sem and struct-mutex. Hence we defer the
+ * unregistration to a workqueue where we hold no locks.
+ */
+ INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
+ schedule_work(&mmu->work);
+}
+
+static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
+{
+ if (++mmu->serial == 0)
+ mmu->serial = 1;
+}
+
+static void
+i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
+ struct i915_mmu_object *mn)
+{
+ lockdep_assert_held(&mmu->dev->struct_mutex);
+
+ spin_lock(&mmu->lock);
+ interval_tree_remove(&mn->it, &mmu->objects);
+ __i915_mmu_notifier_update_serial(mmu);
+ spin_unlock(&mmu->lock);
+
+ /* Protected against _add() by dev->struct_mutex */
+ if (--mmu->count == 0)
+ __i915_mmu_notifier_destroy(mmu);
+}
+
+static int
+i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
+ struct i915_mmu_object *mn)
+{
+ struct interval_tree_node *it;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(mmu->dev);
+ if (ret)
+ return ret;
+
+ /* Make sure we drop the final active reference (and thereby
+ * remove the objects from the interval tree) before we do
+ * the check for overlapping objects.
+ */
+ i915_gem_retire_requests(mmu->dev);
+
+ /* Disallow overlapping userptr objects */
+ spin_lock(&mmu->lock);
+ it = interval_tree_iter_first(&mmu->objects,
+ mn->it.start, mn->it.last);
+ if (it) {
+ struct drm_i915_gem_object *obj;
+
+ /* We only need to check the first object in the range as it
+ * either has cancelled gup work queued and we need to
+ * return back to the user to give time for the gup-workers
+ * to flush their object references upon which the object will
+ * be removed from the interval-tree, or the the range is
+ * still in use by another client and the overlap is invalid.
+ */
+
+ obj = container_of(it, struct i915_mmu_object, it)->obj;
+ ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
+ } else {
+ interval_tree_insert(&mn->it, &mmu->objects);
+ __i915_mmu_notifier_update_serial(mmu);
+ ret = 0;
+ }
+ spin_unlock(&mmu->lock);
+ mutex_unlock(&mmu->dev->struct_mutex);
+
+ return ret;
+}
+
+static void
+i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+{
+ struct i915_mmu_object *mn;
+
+ mn = obj->userptr.mn;
+ if (mn == NULL)
+ return;
+
+ i915_mmu_notifier_del(mn->mmu, mn);
+ obj->userptr.mn = NULL;
+}
+
+static int
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
+ unsigned flags)
+{
+ struct i915_mmu_notifier *mmu;
+ struct i915_mmu_object *mn;
+ int ret;
+
+ if (flags & I915_USERPTR_UNSYNCHRONIZED)
+ return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
+
+ down_write(&obj->userptr.mm->mmap_sem);
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
+ if (ret == 0) {
+ mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
+ if (!IS_ERR(mmu))
+ mmu->count++; /* preemptive add to act as a refcount */
+ else
+ ret = PTR_ERR(mmu);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ }
+ up_write(&obj->userptr.mm->mmap_sem);
+ if (ret)
+ return ret;
+
+ mn = kzalloc(sizeof(*mn), GFP_KERNEL);
+ if (mn == NULL) {
+ ret = -ENOMEM;
+ goto destroy_mmu;
+ }
+
+ mn->mmu = mmu;
+ mn->it.start = obj->userptr.ptr;
+ mn->it.last = mn->it.start + obj->base.size - 1;
+ mn->obj = obj;
+
+ ret = i915_mmu_notifier_add(mmu, mn);
+ if (ret)
+ goto free_mn;
+
+ obj->userptr.mn = mn;
+ return 0;
+
+free_mn:
+ kfree(mn);
+destroy_mmu:
+ mutex_lock(&obj->base.dev->struct_mutex);
+ if (--mmu->count == 0)
+ __i915_mmu_notifier_destroy(mmu);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return ret;
+}
+
+#else
+
+static void
+i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+{
+}
+
+static int
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
+ unsigned flags)
+{
+ if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
+ return -ENODEV;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return 0;
+}
+#endif
+
+struct get_pages_work {
+ struct work_struct work;
+ struct drm_i915_gem_object *obj;
+ struct task_struct *task;
+};
+
+
+#if IS_ENABLED(CONFIG_SWIOTLB)
+#define swiotlb_active() swiotlb_nr_tbl()
+#else
+#define swiotlb_active() 0
+#endif
+
+static int
+st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
+{
+ struct scatterlist *sg;
+ int ret, n;
+
+ *st = kmalloc(sizeof(**st), GFP_KERNEL);
+ if (*st == NULL)
+ return -ENOMEM;
+
+ if (swiotlb_active()) {
+ ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
+ if (ret)
+ goto err;
+
+ for_each_sg((*st)->sgl, sg, num_pages, n)
+ sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
+ } else {
+ ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
+ 0, num_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kfree(*st);
+ *st = NULL;
+ return ret;
+}
+
+static void
+__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
+{
+ struct get_pages_work *work = container_of(_work, typeof(*work), work);
+ struct drm_i915_gem_object *obj = work->obj;
+ struct drm_device *dev = obj->base.dev;
+ const int num_pages = obj->base.size >> PAGE_SHIFT;
+ struct page **pvec;
+ int pinned, ret;
+
+ ret = -ENOMEM;
+ pinned = 0;
+
+ pvec = kmalloc(num_pages*sizeof(struct page *),
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (pvec == NULL)
+ pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (pvec != NULL) {
+ struct mm_struct *mm = obj->userptr.mm;
+
+ down_read(&mm->mmap_sem);
+ while (pinned < num_pages) {
+ ret = get_user_pages(work->task, mm,
+ obj->userptr.ptr + pinned * PAGE_SIZE,
+ num_pages - pinned,
+ !obj->userptr.read_only, 0,
+ pvec + pinned, NULL);
+ if (ret < 0)
+ break;
+
+ pinned += ret;
+ }
+ up_read(&mm->mmap_sem);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ if (obj->userptr.work != &work->work) {
+ ret = 0;
+ } else if (pinned == num_pages) {
+ ret = st_set_pages(&obj->pages, pvec, num_pages);
+ if (ret == 0) {
+ list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
+ pinned = 0;
+ }
+ }
+
+ obj->userptr.work = ERR_PTR(ret);
+ obj->userptr.workers--;
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ release_pages(pvec, pinned, 0);
+ drm_free_large(pvec);
+
+ put_task_struct(work->task);
+ kfree(work);
+}
+
+static int
+i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+{
+ const int num_pages = obj->base.size >> PAGE_SHIFT;
+ struct page **pvec;
+ int pinned, ret;
+
+ /* If userspace should engineer that these pages are replaced in
+ * the vma between us binding this page into the GTT and completion
+ * of rendering... Their loss. If they change the mapping of their
+ * pages they need to create a new bo to point to the new vma.
+ *
+ * However, that still leaves open the possibility of the vma
+ * being copied upon fork. Which falls under the same userspace
+ * synchronisation issue as a regular bo, except that this time
+ * the process may not be expecting that a particular piece of
+ * memory is tied to the GPU.
+ *
+ * Fortunately, we can hook into the mmu_notifier in order to
+ * discard the page references prior to anything nasty happening
+ * to the vma (discard or cloning) which should prevent the more
+ * egregious cases from causing harm.
+ */
+
+ pvec = NULL;
+ pinned = 0;
+ if (obj->userptr.mm == current->mm) {
+ pvec = kmalloc(num_pages*sizeof(struct page *),
+ GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (pvec == NULL) {
+ pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (pvec == NULL)
+ return -ENOMEM;
+ }
+
+ pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
+ !obj->userptr.read_only, pvec);
+ }
+ if (pinned < num_pages) {
+ if (pinned < 0) {
+ ret = pinned;
+ pinned = 0;
+ } else {
+ /* Spawn a worker so that we can acquire the
+ * user pages without holding our mutex. Access
+ * to the user pages requires mmap_sem, and we have
+ * a strict lock ordering of mmap_sem, struct_mutex -
+ * we already hold struct_mutex here and so cannot
+ * call gup without encountering a lock inversion.
+ *
+ * Userspace will keep on repeating the operation
+ * (thanks to EAGAIN) until either we hit the fast
+ * path or the worker completes. If the worker is
+ * cancelled or superseded, the task is still run
+ * but the results ignored. (This leads to
+ * complications that we may have a stray object
+ * refcount that we need to be wary of when
+ * checking for existing objects during creation.)
+ * If the worker encounters an error, it reports
+ * that error back to this function through
+ * obj->userptr.work = ERR_PTR.
+ */
+ ret = -EAGAIN;
+ if (obj->userptr.work == NULL &&
+ obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
+ struct get_pages_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (work != NULL) {
+ obj->userptr.work = &work->work;
+ obj->userptr.workers++;
+
+ work->obj = obj;
+ drm_gem_object_reference(&obj->base);
+
+ work->task = current;
+ get_task_struct(work->task);
+
+ INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
+ schedule_work(&work->work);
+ } else
+ ret = -ENOMEM;
+ } else {
+ if (IS_ERR(obj->userptr.work)) {
+ ret = PTR_ERR(obj->userptr.work);
+ obj->userptr.work = NULL;
+ }
+ }
+ }
+ } else {
+ ret = st_set_pages(&obj->pages, pvec, num_pages);
+ if (ret == 0) {
+ obj->userptr.work = NULL;
+ pinned = 0;
+ }
+ }
+
+ release_pages(pvec, pinned, 0);
+ drm_free_large(pvec);
+ return ret;
+}
+
+static void
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(obj->userptr.work != NULL);
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ obj->dirty = 0;
+
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
+ struct page *page = sg_page(sg);
+
+ if (obj->dirty)
+ set_page_dirty(page);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
+ obj->dirty = 0;
+
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static void
+i915_gem_userptr_release(struct drm_i915_gem_object *obj)
+{
+ i915_gem_userptr_release__mmu_notifier(obj);
+
+ if (obj->userptr.mm) {
+ mmput(obj->userptr.mm);
+ obj->userptr.mm = NULL;
+ }
+}
+
+static int
+i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
+{
+ if (obj->userptr.mn)
+ return 0;
+
+ return i915_gem_userptr_init__mmu_notifier(obj, 0);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
+ .dmabuf_export = i915_gem_userptr_dmabuf_export,
+ .get_pages = i915_gem_userptr_get_pages,
+ .put_pages = i915_gem_userptr_put_pages,
+ .release = i915_gem_userptr_release,
+};
+
+/**
+ * Creates a new mm object that wraps some normal memory from the process
+ * context - user memory.
+ *
+ * We impose several restrictions upon the memory being mapped
+ * into the GPU.
+ * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
+ * 2. It cannot overlap any other userptr object in the same address space.
+ * 3. It must be normal system memory, not a pointer into another map of IO
+ * space (e.g. it must not be a GTT mmapping of another object).
+ * 4. We only allow a bo as large as we could in theory map into the GTT,
+ * that is we limit the size to the total size of the GTT.
+ * 5. The bo is marked as being snoopable. The backing pages are left
+ * accessible directly by the CPU, but reads and writes by the GPU may
+ * incur the cost of a snoop (unless you have an LLC architecture).
+ *
+ * Synchronisation between multiple users and the GPU is left to userspace
+ * through the normal set-domain-ioctl. The kernel will enforce that the
+ * GPU relinquishes the VMA before it is returned back to the system
+ * i.e. upon free(), munmap() or process termination. However, the userspace
+ * malloc() library may not immediately relinquish the VMA after free() and
+ * instead reuse it whilst the GPU is still reading and writing to the VMA.
+ * Caveat emptor.
+ *
+ * Also note, that the object created here is not currently a "first class"
+ * object, in that several ioctls are banned. These are the CPU access
+ * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
+ * direct access via your pointer rather than use those ioctls.
+ *
+ * If you think this is a good interface to use to pass GPU memory between
+ * drivers, please use dma-buf instead. In fact, wherever possible use
+ * dma-buf instead.
+ */
+int
+i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_userptr *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+ u32 handle;
+
+ if (args->flags & ~(I915_USERPTR_READ_ONLY |
+ I915_USERPTR_UNSYNCHRONIZED))
+ return -EINVAL;
+
+ if (offset_in_page(args->user_ptr | args->user_size))
+ return -EINVAL;
+
+ if (args->user_size > dev_priv->gtt.base.total)
+ return -E2BIG;
+
+ if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
+ (char __user *)(unsigned long)args->user_ptr, args->user_size))
+ return -EFAULT;
+
+ if (args->flags & I915_USERPTR_READ_ONLY) {
+ /* On almost all of the current hw, we cannot tell the GPU that a
+ * page is readonly, so this is just a placeholder in the uAPI.
+ */
+ return -ENODEV;
+ }
+
+ /* Allocate the new object */
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ drm_gem_private_object_init(dev, &obj->base, args->user_size);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops);
+ obj->cache_level = I915_CACHE_LLC;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+ obj->userptr.ptr = args->user_ptr;
+ obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
+
+ /* And keep a pointer to the current->mm for resolving the user pages
+ * at binding. This means that we need to hook into the mmu_notifier
+ * in order to detect if the mmu is destroyed.
+ */
+ ret = -ENOMEM;
+ if ((obj->userptr.mm = get_task_mm(current)))
+ ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
+ if (ret == 0)
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int
+i915_gem_init_userptr(struct drm_device *dev)
+{
+#if defined(CONFIG_MMU_NOTIFIER)
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ hash_init(dev_priv->mmu_notifiers);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 12f1d43b2d68..66cf41765bf9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -42,6 +42,7 @@ static const char *ring_str(int ring)
case VCS: return "bsd";
case BCS: return "blt";
case VECS: return "vebox";
+ case VCS2: return "bsd2";
default: return "";
}
}
@@ -204,6 +205,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
+ err_puts(m, err->userptr ? " userptr" : "");
err_puts(m, err->ring != -1 ? " " : "");
err_puts(m, ring_str(err->ring));
err_puts(m, i915_cache_level_str(err->cache_level));
@@ -257,7 +259,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
}
err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
- err_printf(m, " FADDR: 0x%08x\n", ring->faddr);
+ err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
+ lower_32_bits(ring->faddr));
if (INTEL_INFO(dev)->gen >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
@@ -452,16 +455,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
- offset = 0;
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- obj->pages[0][elt],
- obj->pages[0][elt+1],
- obj->pages[0][elt+2],
- obj->pages[0][elt+3]);
- offset += 16;
- }
+ print_error_obj(m, obj);
}
}
@@ -648,6 +642,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->userptr = obj->userptr.mm != NULL;
err->ring = obj->ring ? obj->ring->id : -1;
err->cache_level = obj->cache_level;
}
@@ -752,7 +747,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
static void i915_record_ring_state(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -764,14 +759,14 @@ static void i915_record_ring_state(struct drm_device *dev,
= I915_READ(RING_SYNC_0(ring->mmio_base));
ering->semaphore_mboxes[1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
- ering->semaphore_seqno[0] = ring->sync_seqno[0];
- ering->semaphore_seqno[1] = ring->sync_seqno[1];
+ ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
+ ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
}
if (HAS_VEBOX(dev)) {
ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base));
- ering->semaphore_seqno[2] = ring->sync_seqno[2];
+ ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -781,8 +776,10 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_INFO(dev)->gen >= 8) {
+ ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+ }
ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
ering->faddr = I915_READ(DMA_FADD_I8XX);
@@ -828,8 +825,8 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->hws = I915_READ(mmio);
}
- ering->cpu_ring_head = ring->head;
- ering->cpu_ring_tail = ring->tail;
+ ering->cpu_ring_head = ring->buffer->head;
+ ering->cpu_ring_tail = ring->buffer->tail;
ering->hangcheck_score = ring->hangcheck.score;
ering->hangcheck_action = ring->hangcheck.action;
@@ -862,7 +859,7 @@ static void i915_record_ring_state(struct drm_device *dev,
}
-static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+static void i915_gem_record_active_context(struct intel_engine_cs *ring,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
@@ -875,10 +872,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
- ering->ctx = i915_error_object_create_sized(dev_priv,
- obj,
- &dev_priv->gtt.base,
- 1);
+ ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
break;
}
}
@@ -892,7 +886,9 @@ static void i915_gem_record_rings(struct drm_device *dev,
int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
+
+ error->ring[i].pid = -1;
if (ring->dev == NULL)
continue;
@@ -901,7 +897,6 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_record_ring_state(dev, ring, &error->ring[i]);
- error->ring[i].pid = -1;
request = i915_gem_find_active_request(ring);
if (request) {
/* We need to copy these to an anonymous buffer
@@ -936,7 +931,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
error->ring[i].ringbuffer =
- i915_error_ggtt_object_create(dev_priv, ring->obj);
+ i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
if (ring->status_page.obj)
error->ring[i].hws_page =
@@ -1037,7 +1032,6 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
struct drm_device *dev = dev_priv->dev;
- int pipe;
/* General organization
* 1. Registers specific to a single generation
@@ -1062,9 +1056,6 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->gfx_mode = I915_READ(GFX_MODE);
}
- if (IS_GEN2(dev))
- error->ier = I915_READ16(IER);
-
/* 2: Registers which belong to multiple generations */
if (INTEL_INFO(dev)->gen >= 7)
error->forcewake = I915_READ(FORCEWAKE_MT);
@@ -1088,9 +1079,10 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
else {
- error->ier = I915_READ(IER);
- for_each_pipe(pipe)
- error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+ if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
}
/* 4: Everything else */
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 3c59584161c2..2e0613e26251 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -208,7 +208,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0b99de95593b..267f069765ad 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -80,17 +80,64 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
+/* IIR can theoretically queue up two events. Be paranoid. */
+#define GEN8_IRQ_RESET_NDX(type, which) do { \
+ I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IMR(which)); \
+ I915_WRITE(GEN8_##type##_IER(which), 0); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR(which)); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR(which)); \
+} while (0)
+
+#define GEN5_IRQ_RESET(type) do { \
+ I915_WRITE(type##IMR, 0xffffffff); \
+ POSTING_READ(type##IMR); \
+ I915_WRITE(type##IER, 0); \
+ I915_WRITE(type##IIR, 0xffffffff); \
+ POSTING_READ(type##IIR); \
+ I915_WRITE(type##IIR, 0xffffffff); \
+ POSTING_READ(type##IIR); \
+} while (0)
+
+/*
+ * We should clear IMR at preinstall/uninstall, and just check at postinstall.
+ */
+#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
+ u32 val = I915_READ(reg); \
+ if (val) { \
+ WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
+ (reg), val); \
+ I915_WRITE((reg), 0xffffffff); \
+ POSTING_READ(reg); \
+ I915_WRITE((reg), 0xffffffff); \
+ POSTING_READ(reg); \
+ } \
+} while (0)
+
+#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
+ GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
+ I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
+ I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
+ POSTING_READ(GEN8_##type##_IER(which)); \
+} while (0)
+
+#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
+ GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
+ I915_WRITE(type##IMR, (imr_val)); \
+ I915_WRITE(type##IER, (ier_val)); \
+ POSTING_READ(type##IER); \
+} while (0)
+
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pm.irqs_disabled) {
- WARN(1, "IRQs disabled\n");
- dev_priv->pm.regsave.deimr &= ~mask;
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
return;
- }
if ((dev_priv->irq_mask & mask) != 0) {
dev_priv->irq_mask &= ~mask;
@@ -104,11 +151,8 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pm.irqs_disabled) {
- WARN(1, "IRQs disabled\n");
- dev_priv->pm.regsave.deimr |= mask;
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
return;
- }
if ((dev_priv->irq_mask & mask) != mask) {
dev_priv->irq_mask |= mask;
@@ -129,13 +173,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pm.irqs_disabled) {
- WARN(1, "IRQs disabled\n");
- dev_priv->pm.regsave.gtimr &= ~interrupt_mask;
- dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask &
- interrupt_mask);
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
return;
- }
dev_priv->gt_irq_mask &= ~interrupt_mask;
dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
@@ -167,13 +206,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pm.irqs_disabled) {
- WARN(1, "IRQs disabled\n");
- dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask;
- dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask &
- interrupt_mask);
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
return;
- }
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
@@ -214,6 +248,46 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
return true;
}
+/**
+ * bdw_update_pm_irq - update GT interrupt 2
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ *
+ * Copied from the snb function, updated with relevant register offsets
+ */
+static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ uint32_t new_val;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
+ return;
+
+ new_val = dev_priv->pm_irq_mask;
+ new_val &= ~interrupt_mask;
+ new_val |= (~enabled_irq_mask & interrupt_mask);
+
+ if (new_val != dev_priv->pm_irq_mask) {
+ dev_priv->pm_irq_mask = new_val;
+ I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
+ POSTING_READ(GEN8_GT_IMR(2));
+ }
+}
+
+void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ bdw_update_pm_irq(dev_priv, mask, mask);
+}
+
+void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ bdw_update_pm_irq(dev_priv, mask, 0);
+}
+
static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -232,16 +306,51 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
return true;
}
-static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
+void i9xx_check_fifo_underruns(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ for_each_intel_crtc(dev, crtc) {
+ u32 reg = PIPESTAT(crtc->pipe);
+ u32 pipestat;
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ continue;
+
+ pipestat = I915_READ(reg) & 0xffff0000;
+ if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ continue;
+
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+
+ DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ }
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ u32 pipestat = I915_READ(reg) & 0xffff0000;
assert_spin_locked(&dev_priv->irq_lock);
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
+ if (enable) {
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+ } else {
+ if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
}
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -258,7 +367,8 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
}
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+ enum pipe pipe,
+ bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
@@ -269,15 +379,12 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
- bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
-
- /* Change the state _after_ we've read out the current one. */
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
- if (!was_enabled &&
- (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
- DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
+ if (old &&
+ I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ DRM_ERROR("uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
}
}
}
@@ -313,14 +420,8 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (dev_priv->pm.irqs_disabled &&
- (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
- WARN(1, "IRQs disabled\n");
- dev_priv->pm.regsave.sdeimr &= ~interrupt_mask;
- dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask &
- interrupt_mask);
+ if (WARN_ON(dev_priv->pm.irqs_disabled))
return;
- }
I915_WRITE(SDEIMR, sdeimr);
POSTING_READ(SDEIMR);
@@ -346,7 +447,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
- bool enable)
+ bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -359,16 +460,12 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
- uint32_t tmp = I915_READ(SERR_INT);
- bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
-
- /* Change the state _after_ we've read out the current one. */
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
- if (!was_enabled &&
- (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
- DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
- transcoder_name(pch_transcoder));
+ if (old && I915_READ(SERR_INT) &
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+ DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+ transcoder_name(pch_transcoder));
}
}
}
@@ -387,34 +484,29 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
*
* Returns the previous state of underrun reporting.
*/
-bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- bool ret;
+ bool old;
assert_spin_locked(&dev_priv->irq_lock);
- ret = !intel_crtc->cpu_fifo_underrun_disabled;
-
- if (enable == ret)
- goto done;
-
+ old = !intel_crtc->cpu_fifo_underrun_disabled;
intel_crtc->cpu_fifo_underrun_disabled = !enable;
- if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
- i9xx_clear_fifo_underrun(dev, pipe);
+ if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
+ i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
- ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
+ ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN8(dev))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
-done:
- return ret;
+ return old;
}
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
@@ -463,7 +555,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
- bool ret;
+ bool old;
/*
* NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
@@ -476,21 +568,16 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ret = !intel_crtc->pch_fifo_underrun_disabled;
-
- if (enable == ret)
- goto done;
-
+ old = !intel_crtc->pch_fifo_underrun_disabled;
intel_crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev))
ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
else
- cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
+ cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
-done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- return ret;
+ return old;
}
@@ -503,8 +590,10 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
- status_mask & ~PIPESTAT_INT_STATUS_MASK))
+ if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+ pipe_name(pipe), enable_mask, status_mask))
return;
if ((pipestat & enable_mask) == enable_mask)
@@ -527,8 +616,10 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
- status_mask & ~PIPESTAT_INT_STATUS_MASK))
+ if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+ pipe_name(pipe), enable_mask, status_mask))
return;
if ((pipestat & enable_mask) == 0)
@@ -546,11 +637,17 @@ static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
u32 enable_mask = status_mask << 16;
/*
- * On pipe A we don't support the PSR interrupt yet, on pipe B the
- * same bit MBZ.
+ * On pipe A we don't support the PSR interrupt yet,
+ * on pipe B and C the same bit MBZ.
*/
if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
return 0;
+ /*
+ * On pipe B and C we don't support the PSR interrupt yet, on pipe
+ * A the same bit is for perf counters which we don't use either.
+ */
+ if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
+ return 0;
enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
SPRITE0_FLIP_DONE_INT_EN_VLV |
@@ -637,6 +734,56 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
}
}
+/*
+ * This timing diagram depicts the video signal in and
+ * around the vertical blanking period.
+ *
+ * Assumptions about the fictitious mode used in this example:
+ * vblank_start >= 3
+ * vsync_start = vblank_start + 1
+ * vsync_end = vblank_start + 2
+ * vtotal = vblank_start + 3
+ *
+ * start of vblank:
+ * latch double buffered registers
+ * increment frame counter (ctg+)
+ * generate start of vblank interrupt (gen4+)
+ * |
+ * | frame start:
+ * | generate frame start interrupt (aka. vblank interrupt) (gmch)
+ * | may be shifted forward 1-3 extra lines via PIPECONF
+ * | |
+ * | | start of vsync:
+ * | | generate vsync interrupt
+ * | | |
+ * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
+ * . \hs/ . \hs/ \hs/ \hs/ . \hs/
+ * ----va---> <-----------------vb--------------------> <--------va-------------
+ * | | <----vs-----> |
+ * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
+ * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
+ * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
+ * | | |
+ * last visible pixel first visible pixel
+ * | increment frame counter (gen3/4)
+ * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
+ *
+ * x = horizontal active
+ * _ = horizontal blanking
+ * hs = horizontal sync
+ * va = vertical active
+ * vb = vertical blanking
+ * vs = vertical sync
+ * vbs = vblank_start (number)
+ *
+ * Summary:
+ * - most events happen at the start of horizontal sync
+ * - frame start happens at the start of horizontal blank, 1-4 lines
+ * (depending on PIPECONF settings) after the start of vblank
+ * - gen3/4 pixel and frame counter are synchronized with the start
+ * of horizontal active on the first line of vertical active
+ */
+
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
{
/* Gen2 doesn't have a hardware frame counter */
@@ -651,7 +798,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
- u32 high1, high2, low, pixel, vbl_start;
+ u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -665,17 +812,28 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
const struct drm_display_mode *mode =
&intel_crtc->config.adjusted_mode;
- vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vbl_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
} else {
enum transcoder cpu_transcoder = (enum transcoder) pipe;
- u32 htotal;
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+ hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
-
- vbl_start *= htotal;
+ if ((I915_READ(PIPECONF(cpu_transcoder)) &
+ PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
}
+ /* Convert to pixel count */
+ vbl_start *= htotal;
+
+ /* Start of vblank event occurs at start of hsync */
+ vbl_start -= htotal - hsync_start;
+
high_frame = PIPEFRAME(pipe);
low_frame = PIPEFRAMEPIXEL(pipe);
@@ -719,24 +877,28 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
-static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
+static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t status;
- int reg;
+ const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ enum pipe pipe = crtc->pipe;
+ int position, vtotal;
- if (INTEL_INFO(dev)->gen >= 8) {
- status = GEN8_PIPE_VBLANK;
- reg = GEN8_DE_PIPE_ISR(pipe);
- } else if (INTEL_INFO(dev)->gen >= 7) {
- status = DE_PIPE_VBLANK_IVB(pipe);
- reg = DEISR;
- } else {
- status = DE_PIPE_VBLANK(pipe);
- reg = DEISR;
- }
+ vtotal = mode->crtc_vtotal;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
- return __raw_i915_read32(dev_priv, reg) & status;
+ if (IS_GEN2(dev))
+ position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+ else
+ position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+
+ /*
+ * See update_scanline_offset() for the details on the
+ * scanline_offset adjustment.
+ */
+ return (position + crtc->scanline_offset) % vtotal;
}
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
@@ -748,7 +910,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
int position;
- int vbl_start, vbl_end, htotal, vtotal;
+ int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
unsigned long irqflags;
@@ -760,6 +922,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
}
htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
vtotal = mode->crtc_vtotal;
vbl_start = mode->crtc_vblank_start;
vbl_end = mode->crtc_vblank_end;
@@ -778,7 +941,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
* following code must not block on uncore.lock.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
+
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
/* Get optional system timestamp before query. */
@@ -789,68 +952,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
- if (IS_GEN2(dev))
- position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
- else
- position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
-
- if (HAS_DDI(dev)) {
- /*
- * On HSW HDMI outputs there seems to be a 2 line
- * difference, whereas eDP has the normal 1 line
- * difference that earlier platforms have. External
- * DP is unknown. For now just check for the 2 line
- * difference case on all output types on HSW+.
- *
- * This might misinterpret the scanline counter being
- * one line too far along on eDP, but that's less
- * dangerous than the alternative since that would lead
- * the vblank timestamp code astray when it sees a
- * scanline count before vblank_start during a vblank
- * interrupt.
- */
- in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
- if ((in_vbl && (position == vbl_start - 2 ||
- position == vbl_start - 1)) ||
- (!in_vbl && (position == vbl_end - 2 ||
- position == vbl_end - 1)))
- position = (position + 2) % vtotal;
- } else if (HAS_PCH_SPLIT(dev)) {
- /*
- * The scanline counter increments at the leading edge
- * of hsync, ie. it completely misses the active portion
- * of the line. Fix up the counter at both edges of vblank
- * to get a more accurate picture whether we're in vblank
- * or not.
- */
- in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
- if ((in_vbl && position == vbl_start - 1) ||
- (!in_vbl && position == vbl_end - 1))
- position = (position + 1) % vtotal;
- } else {
- /*
- * ISR vblank status bits don't work the way we'd want
- * them to work on non-PCH platforms (for
- * ilk_pipe_in_vblank_locked()), and there doesn't
- * appear any other way to determine if we're currently
- * in vblank.
- *
- * Instead let's assume that we're already in vblank if
- * we got called from the vblank interrupt and the
- * scanline counter value indicates that we're on the
- * line just prior to vblank start. This should result
- * in the correct answer, unless the vblank interrupt
- * delivery really got delayed for almost exactly one
- * full frame/field.
- */
- if (flags & DRM_CALLED_FROM_VBLIRQ &&
- position == vbl_start - 1) {
- position = (position + 1) % vtotal;
-
- /* Signal this correction as "applied". */
- ret |= 0x8;
- }
- }
+ position = __intel_get_crtc_scanline(intel_crtc);
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
@@ -862,6 +964,29 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
vbl_start *= htotal;
vbl_end *= htotal;
vtotal *= htotal;
+
+ /*
+ * In interlaced modes, the pixel counter counts all pixels,
+ * so one field will have htotal more pixels. In order to avoid
+ * the reported position from jumping backwards when the pixel
+ * counter is beyond the length of the shorter field, just
+ * clamp the position the length of the shorter field. This
+ * matches how the scanline counter based position works since
+ * the scanline counter doesn't count the two half lines.
+ */
+ if (position >= vtotal)
+ position = vtotal - 1;
+
+ /*
+ * Start of vblank interrupt is triggered at start of hsync,
+ * just prior to the first active line of vblank. However we
+ * consider lines to start at the leading edge of horizontal
+ * active. So, should we get here before we've crossed into
+ * the horizontal active of the first line in vblank, we would
+ * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
+ * always add htotal-hsync_start to the current pixel position.
+ */
+ position = (position + htotal - hsync_start) % vtotal;
}
/* Get optional system timestamp after query. */
@@ -900,6 +1025,19 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
return ret;
}
+int intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ unsigned long irqflags;
+ int position;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ position = __intel_get_crtc_scanline(crtc);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ return position;
+}
+
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
int *max_error,
struct timeval *vblank_time,
@@ -945,7 +1083,7 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
@@ -990,7 +1128,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
connector->polled == DRM_CONNECTOR_POLL_HPD) {
DRM_INFO("HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
- drm_get_connector_name(connector));
+ connector->name);
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
@@ -998,7 +1136,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
}
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
- drm_get_connector_name(connector), intel_encoder->hpd_pin);
+ connector->name, intel_encoder->hpd_pin);
}
}
/* if there were no outputs to poll, poll was disabled,
@@ -1073,9 +1211,9 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
}
static void notify_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
- if (ring->obj == NULL)
+ if (!intel_ring_initialized(ring))
return;
trace_i915_gem_request_complete(ring);
@@ -1094,8 +1232,12 @@ static void gen6_pm_rps_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- /* Make sure not to corrupt PMIMR state used by ringbuffer code */
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ if (IS_BROADWELL(dev_priv->dev))
+ bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ else {
+ /* Make sure not to corrupt PMIMR state used by ringbuffer */
+ snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ }
spin_unlock_irq(&dev_priv->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
@@ -1292,6 +1434,19 @@ static void snb_gt_irq_handler(struct drm_device *dev,
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
+static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+ if ((pm_iir & dev_priv->pm_rps_events) == 0)
+ return;
+
+ spin_lock(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ spin_unlock(&dev_priv->irq_lock);
+
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+}
+
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
@@ -1315,18 +1470,32 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
- if (master_ctl & GEN8_GT_VCS1_IRQ) {
+ if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(1));
if (tmp) {
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
+ vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
+ if (vcs & GT_RENDER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS2]);
I915_WRITE(GEN8_GT_IIR(1), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
+ if (master_ctl & GEN8_GT_PM_IRQ) {
+ tmp = I915_READ(GEN8_GT_IIR(2));
+ if (tmp & dev_priv->pm_rps_events) {
+ ret = IRQ_HANDLED;
+ gen8_rps_irq_handler(dev_priv, tmp);
+ I915_WRITE(GEN8_GT_IIR(2),
+ tmp & dev_priv->pm_rps_events);
+ } else
+ DRM_ERROR("The master control interrupt lied (PM)!\n");
+ }
+
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
@@ -1549,6 +1718,19 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
}
}
+static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_crtc *crtc;
+
+ if (!drm_handle_vblank(dev, pipe))
+ return false;
+
+ crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ wake_up(&crtc->vbl_wait);
+
+ return true;
+}
+
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1578,6 +1760,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
case PIPE_B:
iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
+ case PIPE_C:
+ iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+ break;
}
if (iir & iir_bit)
mask |= dev_priv->pipestat_irq_mask[pipe];
@@ -1600,7 +1785,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
for_each_pipe(pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(dev, pipe);
+ intel_pipe_handle_vblank(dev, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
intel_prepare_page_flip(dev, pipe);
@@ -1619,9 +1804,36 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
gmbus_irq_handler(dev);
}
+static void i9xx_hpd_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ if (IS_G4X(dev)) {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
+ } else {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
+ }
+
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
+ hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ /*
+ * Make sure hotplug status is cleared before we clear IIR, or else we
+ * may miss hotplug events.
+ */
+ POSTING_READ(PORT_HOTPLUG_STAT);
+}
+
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
@@ -1641,19 +1853,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev, iir);
/* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT) {
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
-
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
-
- if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev);
-
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_READ(PORT_HOTPLUG_STAT);
- }
-
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -1667,6 +1868,40 @@ out:
return ret;
}
+static irqreturn_t cherryview_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 master_ctl, iir;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (;;) {
+ master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+ iir = I915_READ(VLV_IIR);
+
+ if (master_ctl == 0 && iir == 0)
+ break;
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+
+ gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+
+ valleyview_pipestat_irq_handler(dev, iir);
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ i9xx_hpd_irq_handler(dev);
+
+ I915_WRITE(VLV_IIR, iir);
+
+ I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(GEN8_MASTER_IRQ);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1827,7 +2062,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
for_each_pipe(pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- drm_handle_vblank(dev, pipe);
+ intel_pipe_handle_vblank(dev, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
@@ -1877,7 +2112,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
for_each_pipe(pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
- drm_handle_vblank(dev, pipe);
+ intel_pipe_handle_vblank(dev, pipe);
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
@@ -1899,7 +2134,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -2020,9 +2255,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (pipe_iir & GEN8_PIPE_VBLANK)
- drm_handle_vblank(dev, pipe);
+ intel_pipe_handle_vblank(dev, pipe);
- if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
+ if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
}
@@ -2075,7 +2310,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
/*
@@ -2137,6 +2372,14 @@ static void i915_error_work_func(struct work_struct *work)
reset_event);
/*
+ * In most cases it's guaranteed that we get here with an RPM
+ * reference held, for example because there is a pending GPU
+ * request that won't finish until the reset is done. This
+ * isn't the case at least when we get here by doing a
+ * simulated reset via debugs, so get an RPM reference.
+ */
+ intel_runtime_pm_get(dev_priv);
+ /*
* All state reset _must_ be completed before we update the
* reset counter, for otherwise waiters might miss the reset
* pending state and not properly drop locks, resulting in
@@ -2146,6 +2389,8 @@ static void i915_error_work_func(struct work_struct *work)
intel_display_handle_reset(dev);
+ intel_runtime_pm_put(dev_priv);
+
if (ret == 0) {
/*
* After all the gem state is reset, increment the reset
@@ -2383,10 +2628,6 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_STATUS);
-
- /* maintain vblank delivery even in deep C-states */
- if (INTEL_INFO(dev)->gen == 3)
- I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2450,9 +2691,6 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (INTEL_INFO(dev)->gen == 3)
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
-
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_STATUS |
PIPE_START_VBLANK_INTERRUPT_STATUS);
@@ -2498,29 +2736,77 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
}
static u32
-ring_last_seqno(struct intel_ring_buffer *ring)
+ring_last_seqno(struct intel_engine_cs *ring)
{
return list_entry(ring->request_list.prev,
struct drm_i915_gem_request, list)->seqno;
}
static bool
-ring_idle(struct intel_ring_buffer *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
}
-static struct intel_ring_buffer *
-semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
+static bool
+ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
+{
+ if (INTEL_INFO(dev)->gen >= 8) {
+ /*
+ * FIXME: gen8 semaphore support - currently we don't emit
+ * semaphores on bdw anyway, but this needs to be addressed when
+ * we merge that code.
+ */
+ return false;
+ } else {
+ ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+ return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER);
+ }
+}
+
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *signaller;
+ int i;
+
+ if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
+ /*
+ * FIXME: gen8 semaphore support - currently we don't emit
+ * semaphores on bdw anyway, but this needs to be addressed when
+ * we merge that code.
+ */
+ return NULL;
+ } else {
+ u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
+
+ for_each_ring(signaller, dev_priv, i) {
+ if(ring == signaller)
+ continue;
+
+ if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
+ return signaller;
+ }
+ }
+
+ DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
+ ring->id, ipehr);
+
+ return NULL;
+}
+
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, head;
int i;
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
- if ((ipehr & ~(0x3 << 16)) !=
- (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
+ if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
return NULL;
/*
@@ -2538,10 +2824,10 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
* our ring is smaller than what the hardware (and hence
* HEAD_ADDR) allows. Also handles wrap-around.
*/
- head &= ring->size - 1;
+ head &= ring->buffer->size - 1;
/* This here seems to blow up */
- cmd = ioread32(ring->virtual_start + head);
+ cmd = ioread32(ring->buffer->virtual_start + head);
if (cmd == ipehr)
break;
@@ -2551,20 +2837,24 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
if (!i)
return NULL;
- *seqno = ioread32(ring->virtual_start + head + 4) + 1;
- return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
+ *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
+ return semaphore_wait_to_signaller_ring(ring, ipehr);
}
-static int semaphore_passed(struct intel_ring_buffer *ring)
+static int semaphore_passed(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct intel_ring_buffer *signaller;
+ struct intel_engine_cs *signaller;
u32 seqno, ctl;
- ring->hangcheck.deadlock = true;
+ ring->hangcheck.deadlock++;
signaller = semaphore_waits_for(ring, &seqno);
- if (signaller == NULL || signaller->hangcheck.deadlock)
+ if (signaller == NULL)
+ return -1;
+
+ /* Prevent pathological recursion due to driver bugs */
+ if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
/* cursory check for an unkickable deadlock */
@@ -2572,20 +2862,26 @@ static int semaphore_passed(struct intel_ring_buffer *ring)
if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
return -1;
- return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
+ if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+ return 1;
+
+ if (signaller->hangcheck.deadlock)
+ return -1;
+
+ return 0;
}
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
for_each_ring(ring, dev_priv, i)
- ring->hangcheck.deadlock = false;
+ ring->hangcheck.deadlock = 0;
}
static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
+ring_stuck(struct intel_engine_cs *ring, u64 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2641,7 +2937,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 };
@@ -2759,57 +3055,63 @@ void i915_queue_hangcheck(struct drm_device *dev)
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
-static void ibx_irq_preinstall(struct drm_device *dev)
+static void ibx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_PCH_NOP(dev))
return;
- /* south display irq */
- I915_WRITE(SDEIMR, 0xffffffff);
- /*
- * SDEIER is also touched by the interrupt handler to work around missed
- * PCH interrupts. Hence we can't update it after the interrupt handler
- * is enabled - instead we unconditionally enable all PCH interrupt
- * sources here, but then only unmask them as needed with SDEIMR.
- */
+ GEN5_IRQ_RESET(SDE);
+
+ if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+ I915_WRITE(SERR_INT, 0xffffffff);
+}
+
+/*
+ * SDEIER is also touched by the interrupt handler to work around missed PCH
+ * interrupts. Hence we can't update it after the interrupt handler is enabled -
+ * instead we unconditionally enable all PCH interrupt sources here, but then
+ * only unmask them as needed with SDEIMR.
+ *
+ * This function needs to be called before interrupts are enabled.
+ */
+static void ibx_irq_pre_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_PCH_NOP(dev))
+ return;
+
+ WARN_ON(I915_READ(SDEIER) != 0);
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_preinstall(struct drm_device *dev)
+static void gen5_gt_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* and GT */
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- POSTING_READ(GTIER);
-
- if (INTEL_INFO(dev)->gen >= 6) {
- /* and PM */
- I915_WRITE(GEN6_PMIMR, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0x0);
- POSTING_READ(GEN6_PMIER);
- }
+ GEN5_IRQ_RESET(GT);
+ if (INTEL_INFO(dev)->gen >= 6)
+ GEN5_IRQ_RESET(GEN6_PM);
}
/* drm_dma.h hooks
*/
-static void ironlake_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(HWSTAM, 0xeffe);
+ I915_WRITE(HWSTAM, 0xffffffff);
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- POSTING_READ(DEIER);
+ GEN5_IRQ_RESET(DE);
+ if (IS_GEN7(dev))
+ I915_WRITE(GEN7_ERR_INT, 0xffffffff);
- gen5_gt_irq_preinstall(dev);
+ gen5_gt_irq_reset(dev);
- ibx_irq_preinstall(dev);
+ ibx_irq_reset(dev);
}
static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -2827,7 +3129,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
- gen5_gt_irq_preinstall(dev);
+ gen5_gt_irq_reset(dev);
I915_WRITE(DPINVGTT, 0xff);
@@ -2841,7 +3143,15 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
POSTING_READ(VLV_IER);
}
-static void gen8_irq_preinstall(struct drm_device *dev)
+static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
+{
+ GEN8_IRQ_RESET_NDX(GT, 0);
+ GEN8_IRQ_RESET_NDX(GT, 1);
+ GEN8_IRQ_RESET_NDX(GT, 2);
+ GEN8_IRQ_RESET_NDX(GT, 3);
+}
+
+static void gen8_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
@@ -2849,43 +3159,44 @@ static void gen8_irq_preinstall(struct drm_device *dev)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
- /* IIR can theoretically queue up two events. Be paranoid */
-#define GEN8_IRQ_INIT_NDX(type, which) do { \
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IMR(which)); \
- I915_WRITE(GEN8_##type##_IER(which), 0); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- } while (0)
-
-#define GEN8_IRQ_INIT(type) do { \
- I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
- POSTING_READ(GEN8_##type##_IMR); \
- I915_WRITE(GEN8_##type##_IER, 0); \
- I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR); \
- I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
- } while (0)
-
- GEN8_IRQ_INIT_NDX(GT, 0);
- GEN8_IRQ_INIT_NDX(GT, 1);
- GEN8_IRQ_INIT_NDX(GT, 2);
- GEN8_IRQ_INIT_NDX(GT, 3);
+ gen8_gt_irq_reset(dev_priv);
- for_each_pipe(pipe) {
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
- }
+ for_each_pipe(pipe)
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+
+ GEN5_IRQ_RESET(GEN8_DE_PORT_);
+ GEN5_IRQ_RESET(GEN8_DE_MISC_);
+ GEN5_IRQ_RESET(GEN8_PCU_);
+
+ ibx_irq_reset(dev);
+}
+
+static void cherryview_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
- GEN8_IRQ_INIT(DE_PORT);
- GEN8_IRQ_INIT(DE_MISC);
- GEN8_IRQ_INIT(PCU);
-#undef GEN8_IRQ_INIT
-#undef GEN8_IRQ_INIT_NDX
+ gen8_gt_irq_reset(dev_priv);
+
+ GEN5_IRQ_RESET(GEN8_PCU_);
POSTING_READ(GEN8_PCU_IIR);
- ibx_irq_preinstall(dev);
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ POSTING_READ(VLV_IIR);
}
static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -2931,15 +3242,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_NOP(dev))
return;
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
- } else {
+ else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
- I915_WRITE(SERR_INT, I915_READ(SERR_INT));
- }
-
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
I915_WRITE(SDEIMR, ~mask);
}
@@ -2965,10 +3273,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- I915_WRITE(GTIER, gt_irqs);
- POSTING_READ(GTIER);
+ GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_INFO(dev)->gen >= 6) {
pm_irqs |= dev_priv->pm_rps_events;
@@ -2977,10 +3282,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
dev_priv->pm_irq_mask = 0xffffffff;
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
- I915_WRITE(GEN6_PMIER, pm_irqs);
- POSTING_READ(GEN6_PMIER);
+ GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
}
}
@@ -2997,8 +3299,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
-
- I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
@@ -3011,11 +3311,11 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask = ~display_mask;
- /* should always can generate irq */
- I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER, display_mask | extra_mask);
- POSTING_READ(DEIER);
+ I915_WRITE(HWSTAM, 0xeffe);
+
+ ibx_irq_pre_postinstall(dev);
+
+ GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
gen5_gt_irq_postinstall(dev);
@@ -3175,21 +3475,16 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
- for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
- u32 tmp = I915_READ(GEN8_GT_IIR(i));
- if (tmp)
- DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
- i, tmp);
- I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
- I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
- }
- POSTING_READ(GEN8_GT_IER(0));
+ for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
+ GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
+
+ dev_priv->pm_irq_mask = 0xffffffff;
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
+ uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3199,25 +3494,19 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
- for_each_pipe(pipe) {
- u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
- if (tmp)
- DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
- pipe, tmp);
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
- }
- POSTING_READ(GEN8_DE_PIPE_ISR(0));
+ for_each_pipe(pipe)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
+ de_pipe_enables);
- I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
- I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
- POSTING_READ(GEN8_DE_PORT_IER);
+ GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
}
static int gen8_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ ibx_irq_pre_postinstall(dev);
+
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
@@ -3229,44 +3518,55 @@ static int gen8_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void gen8_irq_uninstall(struct drm_device *dev)
+static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+ unsigned long irqflags;
int pipe;
- if (!dev_priv)
- return;
+ /*
+ * Leave vblank interrupts masked initially. enable/disable will
+ * toggle them based on usage.
+ */
+ dev_priv->irq_mask = ~enable_mask;
- I915_WRITE(GEN8_MASTER_IRQ, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
-#define GEN8_IRQ_FINI_NDX(type, which) do { \
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
- I915_WRITE(GEN8_##type##_IER(which), 0); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- } while (0)
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(pipe)
+ i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-#define GEN8_IRQ_FINI(type) do { \
- I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
- I915_WRITE(GEN8_##type##_IER, 0); \
- I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
- } while (0)
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, enable_mask);
- GEN8_IRQ_FINI_NDX(GT, 0);
- GEN8_IRQ_FINI_NDX(GT, 1);
- GEN8_IRQ_FINI_NDX(GT, 2);
- GEN8_IRQ_FINI_NDX(GT, 3);
+ gen8_gt_irq_postinstall(dev_priv);
- for_each_pipe(pipe) {
- GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
- }
+ I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
+ POSTING_READ(GEN8_MASTER_IRQ);
- GEN8_IRQ_FINI(DE_PORT);
- GEN8_IRQ_FINI(DE_MISC);
- GEN8_IRQ_FINI(PCU);
-#undef GEN8_IRQ_FINI
-#undef GEN8_IRQ_FINI_NDX
+ return 0;
+}
- POSTING_READ(GEN8_PCU_IIR);
+static void gen8_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv)
+ return;
+
+ intel_hpd_irq_uninstall(dev_priv);
+
+ gen8_irq_reset(dev);
}
static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -3278,6 +3578,8 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
+ I915_WRITE(VLV_MASTER_IER, 0);
+
intel_hpd_irq_uninstall(dev_priv);
for_each_pipe(pipe)
@@ -3300,35 +3602,67 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
POSTING_READ(VLV_IER);
}
-static void ironlake_irq_uninstall(struct drm_device *dev)
+static void cherryview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
+ I915_WRITE(GEN8_MASTER_IRQ, 0);
+ POSTING_READ(GEN8_MASTER_IRQ);
- I915_WRITE(HWSTAM, 0xffffffff);
+#define GEN8_IRQ_FINI_NDX(type, which) \
+do { \
+ I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+ I915_WRITE(GEN8_##type##_IER(which), 0); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR(which)); \
+ I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+} while (0)
+
+#define GEN8_IRQ_FINI(type) \
+do { \
+ I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
+ I915_WRITE(GEN8_##type##_IER, 0); \
+ I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+ POSTING_READ(GEN8_##type##_IIR); \
+ I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+} while (0)
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- I915_WRITE(DEIIR, I915_READ(DEIIR));
- if (IS_GEN7(dev))
- I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
+ GEN8_IRQ_FINI_NDX(GT, 0);
+ GEN8_IRQ_FINI_NDX(GT, 1);
+ GEN8_IRQ_FINI_NDX(GT, 2);
+ GEN8_IRQ_FINI_NDX(GT, 3);
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
+ GEN8_IRQ_FINI(PCU);
- if (HAS_PCH_NOP(dev))
+#undef GEN8_IRQ_FINI
+#undef GEN8_IRQ_FINI_NDX
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ POSTING_READ(VLV_IIR);
+}
+
+static void ironlake_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv)
return;
- I915_WRITE(SDEIMR, 0xffffffff);
- I915_WRITE(SDEIER, 0x0);
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
- I915_WRITE(SERR_INT, I915_READ(SERR_INT));
+ intel_hpd_irq_uninstall(dev_priv);
+
+ ironlake_irq_reset(dev);
}
static void i8xx_irq_preinstall(struct drm_device * dev)
@@ -3386,7 +3720,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- if (!drm_handle_vblank(dev, pipe))
+ if (!intel_pipe_handle_vblank(dev, pipe))
return false;
if ((iir & flip_pending) == 0)
@@ -3410,7 +3744,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 iir, new_iir;
u32 pipe_stats[2];
@@ -3571,7 +3905,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- if (!drm_handle_vblank(dev, pipe))
+ if (!intel_pipe_handle_vblank(dev, pipe))
return false;
if ((iir & flip_pending) == 0)
@@ -3595,7 +3929,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
@@ -3636,16 +3970,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
break;
/* Consume port. Then clear IIR or we'll miss events */
- if ((I915_HAS_HOTPLUG(dev)) &&
- (iir & I915_DISPLAY_PORT_INTERRUPT)) {
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
-
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
-
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- POSTING_READ(PORT_HOTPLUG_STAT);
- }
+ if (I915_HAS_HOTPLUG(dev) &&
+ iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
@@ -3832,7 +4159,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
@@ -3879,22 +4206,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
/* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT) {
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
- HOTPLUG_INT_STATUS_G4X :
- HOTPLUG_INT_STATUS_I915);
-
- intel_hpd_irq_handler(dev, hotplug_trigger,
- IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
-
- if (IS_G4X(dev) &&
- (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
- dp_aux_irq_handler(dev);
-
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_READ(PORT_HOTPLUG_STAT);
- }
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
@@ -3997,7 +4310,7 @@ static void intel_hpd_irq_reenable(unsigned long data)
if (intel_connector->encoder->hpd_pin == i) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
- drm_get_connector_name(connector));
+ connector->name);
connector->polled = intel_connector->polled;
if (!connector->polled)
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -4045,7 +4358,15 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ dev->driver->irq_handler = cherryview_irq_handler;
+ dev->driver->irq_preinstall = cherryview_irq_preinstall;
+ dev->driver->irq_postinstall = cherryview_irq_postinstall;
+ dev->driver->irq_uninstall = cherryview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else if (IS_VALLEYVIEW(dev)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
@@ -4055,7 +4376,7 @@ void intel_irq_init(struct drm_device *dev)
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_GEN8(dev)) {
dev->driver->irq_handler = gen8_irq_handler;
- dev->driver->irq_preinstall = gen8_irq_preinstall;
+ dev->driver->irq_preinstall = gen8_irq_reset;
dev->driver->irq_postinstall = gen8_irq_postinstall;
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
@@ -4063,7 +4384,7 @@ void intel_irq_init(struct drm_device *dev)
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ironlake_enable_vblank;
@@ -4121,57 +4442,20 @@ void intel_hpd_init(struct drm_device *dev)
}
/* Disable interrupts so we can allow runtime PM. */
-void hsw_runtime_pm_disable_interrupts(struct drm_device *dev)
+void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
- dev_priv->pm.regsave.deimr = I915_READ(DEIMR);
- dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR);
- dev_priv->pm.regsave.gtimr = I915_READ(GTIMR);
- dev_priv->pm.regsave.gtier = I915_READ(GTIER);
- dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
-
- ironlake_disable_display_irq(dev_priv, 0xffffffff);
- ibx_disable_display_interrupt(dev_priv, 0xffffffff);
- ilk_disable_gt_irq(dev_priv, 0xffffffff);
- snb_disable_pm_irq(dev_priv, 0xffffffff);
+ dev->driver->irq_uninstall(dev);
dev_priv->pm.irqs_disabled = true;
-
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
/* Restore interrupts so we can recover from runtime PM. */
-void hsw_runtime_pm_restore_interrupts(struct drm_device *dev)
+void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
- uint32_t val;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
- val = I915_READ(DEIMR);
- WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
-
- val = I915_READ(SDEIMR);
- WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
-
- val = I915_READ(GTIMR);
- WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
-
- val = I915_READ(GEN6_PMIMR);
- WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
dev_priv->pm.irqs_disabled = false;
-
- ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr);
- ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr);
- ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr);
- snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr);
- I915_WRITE(GTIER, dev_priv->pm.regsave.gtier);
-
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ dev->driver->irq_preinstall(dev);
+ dev->driver->irq_postinstall(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d1d7980f0e01..d05a2afa17dc 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -46,7 +46,8 @@ struct i915_params i915 __read_mostly = {
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
- .enable_cmd_parser = 0,
+ .enable_cmd_parser = 1,
+ .disable_vtd_wa = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -149,6 +150,9 @@ MODULE_PARM_DESC(invert_brightness,
module_param_named(disable_display, i915.disable_display, bool, 0600);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
+module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
+MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
+
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
- "Enable command parsing (1=enabled, 0=disabled [default])");
+ "Enable command parsing (1=enabled [default], 0=disabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c77af69c2d8f..a5bab61bfc00 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,6 +29,8 @@
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
+#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -77,13 +79,19 @@
/* Graphics reset regs */
#define I965_GDRST 0xc0 /* PCI config register */
-#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_MASK (3<<2)
#define GRDOM_RESET_ENABLE (1<<0)
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define ILK_GRDOM_FULL (0<<1)
+#define ILK_GRDOM_RENDER (1<<1)
+#define ILK_GRDOM_MEDIA (3<<1)
+#define ILK_GRDOM_MASK (3<<1)
+#define ILK_GRDOM_RESET_ENABLE (1<<0)
+
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
#define GEN6_MBC_SNPCR_MASK (3<<21)
@@ -92,6 +100,9 @@
#define GEN6_MBC_SNPCR_LOW (2<<21)
#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+#define VLV_G3DCTL 0x9024
+#define VLV_GSCKGCTL 0x9028
+
#define GEN6_MBCTL 0x0907c
#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
@@ -190,6 +201,8 @@
* Memory interface instructions used by the kernel
*/
#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
+#define MI_GLOBAL_GTT (1<<22)
#define MI_NOOP MI_INSTR(0, 0)
#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
@@ -244,7 +257,8 @@
#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
-#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+#define MI_SEMAPHORE_SYNC_MASK (3<<16)
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -262,13 +276,16 @@
* - One can actually load arbitrary many arbitrary registers: Simply issue x
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
-#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
-#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
+#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
+#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
+#define MI_FLUSH_DW_OP_MASK (3<<14)
+#define MI_FLUSH_DW_NOTIFY (1<<8)
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
@@ -330,9 +347,12 @@
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
+#define PIPE_CONTROL_MMIO_WRITE (1<<23)
+#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
@@ -347,6 +367,94 @@
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+/*
+ * Commands used only by the command parser
+ */
+#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
+#define MI_ARB_CHECK MI_INSTR(0x05, 0)
+#define MI_RS_CONTROL MI_INSTR(0x06, 0)
+#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0)
+#define MI_PREDICATE MI_INSTR(0x0C, 0)
+#define MI_RS_CONTEXT MI_INSTR(0x0F, 0)
+#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0)
+#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
+#define MI_URB_CLEAR MI_INSTR(0x19, 0)
+#define MI_UPDATE_GTT MI_INSTR(0x23, 0)
+#define MI_CLFLUSH MI_INSTR(0x27, 0)
+#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
+#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
+#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
+#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
+#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
+#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
+#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
+#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+
+#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
+#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
+#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
+#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
+#define GFX_OP_3DSTATE_SO_DECL_LIST \
+ ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
+
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
+
+#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
+
+#define COLOR_BLT ((0x2<<29)|(0x40<<22))
+#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
+
+/*
+ * Registers used only by the command parser
+ */
+#define BCS_SWCTRL 0x22200
+
+#define HS_INVOCATION_COUNT 0x2300
+#define DS_INVOCATION_COUNT 0x2308
+#define IA_VERTICES_COUNT 0x2310
+#define IA_PRIMITIVES_COUNT 0x2318
+#define VS_INVOCATION_COUNT 0x2320
+#define GS_INVOCATION_COUNT 0x2328
+#define GS_PRIMITIVES_COUNT 0x2330
+#define CL_INVOCATION_COUNT 0x2338
+#define CL_PRIMITIVES_COUNT 0x2340
+#define PS_INVOCATION_COUNT 0x2348
+#define PS_DEPTH_COUNT 0x2350
+
+/* There are the 4 64-bit counter registers, one for each stream output */
+#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
+
+#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
+
+#define GEN7_3DPRIM_END_OFFSET 0x2420
+#define GEN7_3DPRIM_START_VERTEX 0x2430
+#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
+#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
+#define GEN7_3DPRIM_START_INSTANCE 0x243C
+#define GEN7_3DPRIM_BASE_VERTEX 0x2440
+
+#define OACONTROL 0x2360
+
+#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
+#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
+#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \
+ _GEN7_PIPEA_DE_LOAD_SL, \
+ _GEN7_PIPEB_DE_LOAD_SL)
/*
* Reset registers
@@ -370,6 +478,7 @@
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
+#define IOSF_PORT_DPIO_2 0x1a
#define IOSF_PORT_GPIO_NC 0x13
#define IOSF_PORT_CCK 0x14
#define IOSF_PORT_CCU 0xA9
@@ -381,9 +490,6 @@
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
-#define PUNIT_OPCODE_REG_READ 6
-#define PUNIT_OPCODE_REG_WRITE 7
-
#define PUNIT_REG_DSPFREQ 0x36
#define DSPFREQSTAT_SHIFT 30
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
@@ -469,16 +575,91 @@ enum punit_power_well {
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
-/*
- * DPIO - a special bus for various display related registers to hide behind
+/**
+ * DOC: DPIO
+ *
+ * VLV and CHV have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
*
- * DPIO is VLV only.
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally the common lane corresponds to the pipe and
+ * the spline (PCS/TX) correponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ * pipe A == CMN/PLL/REF CH0
+ *
+ * pipe B == CMN/PLL/REF CH1
+ *
+ * port B == PCS/TX CH0
+ *
+ * port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ * pipe C == CMN/PLL/REF CH0
+ *
+ * port D == PCS/TX CH0
+ *
+ * Note: digital port B is DDI0, digital port C is DDI1,
+ * digital port D is DDI2
+ */
+/*
+ * Dual channel PHY (VLV/CHV)
+ * ---------------------------------
+ * | CH0 | CH1 |
+ * | CMN/PLL/REF | CMN/PLL/REF |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * | DDI0 | DDI1 | DP/HDMI ports
+ * ---------------------------------
*
- * Note: digital port B is DDI0, digital pot C is DDI1
+ * Single channel PHY (CHV)
+ * -----------------
+ * | CH0 |
+ * | CMN/PLL/REF |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * | DDI2 | DP/HDMI port
+ * -----------------
*/
#define DPIO_DEVFN 0
-#define DPIO_OPCODE_REG_WRITE 1
-#define DPIO_OPCODE_REG_READ 0
#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
@@ -555,14 +736,29 @@ enum punit_power_well {
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
+#define _VLV_PCS01_DW0_CH0 0x200
+#define _VLV_PCS23_DW0_CH0 0x400
+#define _VLV_PCS01_DW0_CH1 0x2600
+#define _VLV_PCS23_DW0_CH1 0x2800
+#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
+#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
+
#define _VLV_PCS_DW1_CH0 0x8204
#define _VLV_PCS_DW1_CH1 0x8404
+#define CHV_PCS_REQ_SOFTRESET_EN (1<<23)
#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
+#define _VLV_PCS01_DW1_CH0 0x204
+#define _VLV_PCS23_DW1_CH0 0x404
+#define _VLV_PCS01_DW1_CH1 0x2604
+#define _VLV_PCS23_DW1_CH1 0x2804
+#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
+#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
+
#define _VLV_PCS_DW8_CH0 0x8220
#define _VLV_PCS_DW8_CH1 0x8420
#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
@@ -578,6 +774,19 @@ enum punit_power_well {
#define _VLV_PCS_DW9_CH1 0x8424
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+#define _CHV_PCS_DW10_CH0 0x8228
+#define _CHV_PCS_DW10_CH1 0x8428
+#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
+#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
+
+#define _VLV_PCS01_DW10_CH0 0x0228
+#define _VLV_PCS23_DW10_CH0 0x0428
+#define _VLV_PCS01_DW10_CH1 0x2628
+#define _VLV_PCS23_DW10_CH1 0x2828
+#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1)
+#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1)
+
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
@@ -596,14 +805,21 @@ enum punit_power_well {
#define _VLV_TX_DW2_CH0 0x8288
#define _VLV_TX_DW2_CH1 0x8488
+#define DPIO_SWING_MARGIN_SHIFT 16
+#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT)
+#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
#define _VLV_TX_DW3_CH0 0x828c
#define _VLV_TX_DW3_CH1 0x848c
+/* The following bit for CHV phy */
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
#define _VLV_TX_DW4_CH0 0x8290
#define _VLV_TX_DW4_CH1 0x8490
+#define DPIO_SWING_DEEMPH9P5_SHIFT 24
+#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
#define _VLV_TX3_DW4_CH0 0x690
@@ -623,6 +839,73 @@ enum punit_power_well {
#define _VLV_TX_DW14_CH1 0x84b8
#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
+/* CHV dpPhy registers */
+#define _CHV_PLL_DW0_CH0 0x8000
+#define _CHV_PLL_DW0_CH1 0x8180
+#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1)
+
+#define _CHV_PLL_DW1_CH0 0x8004
+#define _CHV_PLL_DW1_CH1 0x8184
+#define DPIO_CHV_N_DIV_SHIFT 8
+#define DPIO_CHV_M1_DIV_BY_2 (0 << 0)
+#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1)
+
+#define _CHV_PLL_DW2_CH0 0x8008
+#define _CHV_PLL_DW2_CH1 0x8188
+#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1)
+
+#define _CHV_PLL_DW3_CH0 0x800c
+#define _CHV_PLL_DW3_CH1 0x818c
+#define DPIO_CHV_FRAC_DIV_EN (1 << 16)
+#define DPIO_CHV_FIRST_MOD (0 << 8)
+#define DPIO_CHV_SECOND_MOD (1 << 8)
+#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
+#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
+
+#define _CHV_PLL_DW6_CH0 0x8018
+#define _CHV_PLL_DW6_CH1 0x8198
+#define DPIO_CHV_GAIN_CTRL_SHIFT 16
+#define DPIO_CHV_INT_COEFF_SHIFT 8
+#define DPIO_CHV_PROP_COEFF_SHIFT 0
+#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+
+#define _CHV_CMN_DW13_CH0 0x8134
+#define _CHV_CMN_DW0_CH1 0x8080
+#define DPIO_CHV_S1_DIV_SHIFT 21
+#define DPIO_CHV_P1_DIV_SHIFT 13 /* 3 bits */
+#define DPIO_CHV_P2_DIV_SHIFT 8 /* 5 bits */
+#define DPIO_CHV_K_DIV_SHIFT 4
+#define DPIO_PLL_FREQLOCK (1 << 1)
+#define DPIO_PLL_LOCK (1 << 0)
+#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1)
+
+#define _CHV_CMN_DW14_CH0 0x8138
+#define _CHV_CMN_DW1_CH1 0x8084
+#define DPIO_AFC_RECAL (1 << 14)
+#define DPIO_DCLKP_EN (1 << 13)
+#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
+
+#define CHV_CMN_DW30 0x8178
+#define DPIO_LRC_BYPASS (1 << 3)
+
+#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
+ (lane) * 0x200 + (offset))
+
+#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80)
+#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84)
+#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88)
+#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c)
+#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90)
+#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94)
+#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98)
+#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c)
+#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0)
+#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4)
+#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8)
+#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac)
+#define DPIO_FRC_LATENCY_SHFIT 8
+#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
+#define DPIO_UPAR_SHIFT 30
/*
* Fence registers
*/
@@ -659,10 +942,14 @@ enum punit_power_well {
/*
* Instruction and interrupt control regs
*/
+#define PGTBL_CTL 0x02020
+#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
+#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
+#define GEN8_BSD2_RING_BASE 0x1c000
#define VEBOX_RING_BASE 0x1a000
#define BLT_RING_BASE 0x22000
#define RING_TAIL(base) ((base)+0x30)
@@ -688,9 +975,20 @@ enum punit_power_well {
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
-#define ARB_MODE 0x04030
+
+#define GEN7_WR_WATERMARK 0x4028
+#define GEN7_GFX_PRIO_CTRL 0x402C
+#define ARB_MODE 0x4030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define GEN7_GFX_PEND_TLB0 0x4034
+#define GEN7_GFX_PEND_TLB1 0x4038
+/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
+#define GEN7_LRA_LIMITS_BASE 0x403C
+#define GEN7_LRA_LIMITS_REG_NUM 13
+#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070
+#define GEN7_GFX_MAX_REQ_COUNT 0x4074
+
#define GAMTARBMODE 0x04a08
#define ARB_MODE_BWGTLB_DISABLE (1<<9)
#define ARB_MODE_SWIZZLE_BDW (1<<1)
@@ -725,6 +1023,9 @@ enum punit_power_well {
#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+
+#define GEN7_TLB_RD_ADDR 0x4700
+
#if 0
#define PRB0_TAIL 0x02030
#define PRB0_HEAD 0x02034
@@ -748,6 +1049,7 @@ enum punit_power_well {
#define RING_INSTDONE(base) ((base)+0x6c)
#define RING_INSTPS(base) ((base)+0x70)
#define RING_DMA_FADD(base) ((base)+0x78)
+#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */
#define RING_INSTPM(base) ((base)+0xc0)
#define RING_MI_MODE(base) ((base)+0x9c)
#define INSTPS 0x02070 /* 965+ only */
@@ -842,21 +1144,26 @@ enum punit_power_well {
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
-#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
+#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
#define GFX_REPLAY_MODE (1<<11)
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
#define VLV_DISPLAY_BASE 0x180000
+#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+#define VLV_GU_CTL0 (VLV_DISPLAY_BASE + 0x2030)
+#define VLV_GU_CTL1 (VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
+#define GINT_DIS (1<<22)
#define GCFG_DIS (1<<8)
+#define VLV_GUNIT_CLOCK_GATE2 (VLV_DISPLAY_BASE + 0x2064)
#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
@@ -875,7 +1182,7 @@ enum punit_power_well {
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
#define INSTPM_SELF_EN (1<<12) /* 915GM only */
-#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
@@ -956,6 +1263,10 @@ enum punit_power_well {
#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
+#define MI_STATE 0x020e4 /* gen2 only */
+#define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */
+#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
+
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
@@ -973,6 +1284,7 @@ enum punit_power_well {
#define ECO_FLIP_DONE (1<<0)
#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
+#define RC_OP_FLUSH_ENABLE (1<<0)
#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
@@ -984,6 +1296,7 @@ enum punit_power_well {
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
+#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
@@ -1024,24 +1337,43 @@ enum punit_power_well {
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
+
+#define I915_PM_INTERRUPT (1<<31)
+#define I915_ISP_INTERRUPT (1<<22)
+#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
+#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
+#define I915_MIPIB_INTERRUPT (1<<19)
+#define I915_MIPIA_INTERRUPT (1<<18)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16)
+#define I915_MASTER_ERROR_INTERRUPT (1<<15)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14)
#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13)
#define I915_HWB_OOM_INTERRUPT (1<<13)
+#define I915_LPE_PIPE_C_INTERRUPT (1<<12)
#define I915_SYNC_STATUS_INTERRUPT (1<<12)
+#define I915_MISC_INTERRUPT (1<<11)
#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10)
#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9)
#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8)
#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2)
#define I915_DEBUG_INTERRUPT (1<<2)
+#define I915_WINVALID_INTERRUPT (1<<1)
#define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
-#define I915_BSD_USER_INTERRUPT (1 << 25)
+#define I915_BSD_USER_INTERRUPT (1<<25)
#define GEN6_BSD_RNCID 0x12198
@@ -1198,6 +1530,7 @@ enum punit_power_well {
#define GMBUS_PORT_SSC 1
#define GMBUS_PORT_VGADDC 2
#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPD_CHV 3 /* HDMID_CHV */
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
#define GMBUS_PORT_DPD 6 /* HDMID */
@@ -1239,6 +1572,7 @@ enum punit_power_well {
*/
#define DPLL_A_OFFSET 0x6014
#define DPLL_B_OFFSET 0x6018
+#define CHV_DPLL_C_OFFSET 0x6030
#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
dev_priv->info.display_mmio_offset)
@@ -1273,10 +1607,23 @@ enum punit_power_well {
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
+#define DPLL_SSC_REF_CLOCK_CHV (1<<13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+
+/* Additional CHV pll/phy registers */
+#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
+#define DPLL_PORTD_READY_MASK (0xf)
+#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
+#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \
+ ((phy == DPIO_PHY0) ? (val | 1) : (val | 2))
+#define PHY_COM_LANE_RESET_ASSERT(phy, val) \
+ ((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2))
+#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
+#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30))
+
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
* this field (only one bit may be set).
@@ -1317,6 +1664,7 @@ enum punit_power_well {
#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
+#define CHV_DPLL_C_MD_OFFSET 0x603c
#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
dev_priv->info.display_mmio_offset)
@@ -1416,7 +1764,7 @@ enum punit_power_well {
# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
-/**
+/*
* This bit must be set on the 830 to prevent hangs when turning off the
* overlay scaler.
*/
@@ -1436,12 +1784,12 @@ enum punit_power_well {
# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7)
# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6)
# define MAG_CLOCK_GATE_DISABLE (1 << 5)
-/** This bit must be unset on 855,865 */
+/* This bit must be unset on 855,865 */
# define MECI_CLOCK_GATE_DISABLE (1 << 4)
# define DCMP_CLOCK_GATE_DISABLE (1 << 3)
# define MEC_CLOCK_GATE_DISABLE (1 << 2)
# define MECO_CLOCK_GATE_DISABLE (1 << 1)
-/** This bit must be set on 855,865. */
+/* This bit must be set on 855,865. */
# define SV_CLOCK_GATE_DISABLE (1 << 0)
# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16)
# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15)
@@ -1462,14 +1810,14 @@ enum punit_power_well {
# define I915_BY_CLOCK_GATE_DISABLE (1 << 0)
# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30)
-/** This bit must always be set on 965G/965GM */
+/* This bit must always be set on 965G/965GM */
# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29)
# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28)
# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27)
# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26)
# define I965_GW_CLOCK_GATE_DISABLE (1 << 25)
# define I965_TD_CLOCK_GATE_DISABLE (1 << 24)
-/** This bit must always be set on 965G */
+/* This bit must always be set on 965G */
# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23)
# define I965_IC_CLOCK_GATE_DISABLE (1 << 22)
# define I965_EU_CLOCK_GATE_DISABLE (1 << 21)
@@ -1494,6 +1842,10 @@ enum punit_power_well {
#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
+
+#define VDECCLK_GATE_D 0x620C /* g4x only */
+#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4)
+
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
@@ -1513,6 +1865,7 @@ enum punit_power_well {
*/
#define PALETTE_A_OFFSET 0xa000
#define PALETTE_B_OFFSET 0xa800
+#define CHV_PALETTE_C_OFFSET 0xc000
#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
dev_priv->info.display_mmio_offset)
@@ -1535,7 +1888,7 @@ enum punit_power_well {
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
-/** 915-945 and GM965 MCH register controlling DRAM channel access */
+/* 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
@@ -1544,15 +1897,15 @@ enum punit_power_well {
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
-/** Pineview MCH register contains DDR3 setting */
+/* Pineview MCH register contains DDR3 setting */
#define CSHRDDR3CTL 0x101a8
#define CSHRDDR3CTL_DDR3 (1 << 2)
-/** 965 MCH register controlling DRAM channel configuration */
+/* 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
#define C1DRB3 0x10606
-/** snb MCH registers for reading the DRAM channel configuration */
+/* snb MCH registers for reading the DRAM channel configuration */
#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
@@ -1574,7 +1927,7 @@ enum punit_power_well {
#define MAD_DIMM_A_SIZE_SHIFT 0
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
-/** snb MCH registers for priority tuning */
+/* snb MCH registers for priority tuning */
#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
@@ -2002,6 +2355,7 @@ enum punit_power_well {
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
+#define CHV_TRANSCODER_C_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
@@ -2226,6 +2580,7 @@ enum punit_power_well {
#define GEN3_SDVOC 0x61160
#define GEN4_HDMIB GEN3_SDVOB
#define GEN4_HDMIC GEN3_SDVOC
+#define CHV_HDMID 0x6116C
#define PCH_SDVOB 0xe1140
#define PCH_HDMIB PCH_SDVOB
#define PCH_HDMIC 0xe1150
@@ -2246,7 +2601,7 @@ enum punit_power_well {
#define SDVO_PIPE_B_SELECT (1 << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
-/**
+/*
* 915G/GM SDVO pixel multiplier.
* Programmed value is multiplier - 1, up to 5x.
* \sa DPLL_MD_UDI_MULTIPLIER_MASK
@@ -2286,6 +2641,10 @@ enum punit_power_well {
#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+/* CHV SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+
/* DVO port control */
#define DVOA 0x61120
@@ -2556,65 +2915,65 @@ enum punit_power_well {
/* TV port control */
#define TV_CTL 0x68000
-/** Enables the TV encoder */
+/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
-/** Sources the TV encoder input from pipe B instead of A. */
+/* Sources the TV encoder input from pipe B instead of A. */
# define TV_ENC_PIPEB_SELECT (1 << 30)
-/** Outputs composite video (DAC A only) */
+/* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
-/** Outputs SVideo video (DAC B/C) */
+/* Outputs SVideo video (DAC B/C) */
# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
-/** Outputs Component video (DAC A/B/C) */
+/* Outputs Component video (DAC A/B/C) */
# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
-/** Outputs Composite and SVideo (DAC A/B/C) */
+/* Outputs Composite and SVideo (DAC A/B/C) */
# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
# define TV_TRILEVEL_SYNC (1 << 21)
-/** Enables slow sync generation (945GM only) */
+/* Enables slow sync generation (945GM only) */
# define TV_SLOW_SYNC (1 << 20)
-/** Selects 4x oversampling for 480i and 576p */
+/* Selects 4x oversampling for 480i and 576p */
# define TV_OVERSAMPLE_4X (0 << 18)
-/** Selects 2x oversampling for 720p and 1080i */
+/* Selects 2x oversampling for 720p and 1080i */
# define TV_OVERSAMPLE_2X (1 << 18)
-/** Selects no oversampling for 1080p */
+/* Selects no oversampling for 1080p */
# define TV_OVERSAMPLE_NONE (2 << 18)
-/** Selects 8x oversampling */
+/* Selects 8x oversampling */
# define TV_OVERSAMPLE_8X (3 << 18)
-/** Selects progressive mode rather than interlaced */
+/* Selects progressive mode rather than interlaced */
# define TV_PROGRESSIVE (1 << 17)
-/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
+/* Sets the colorburst to PAL mode. Required for non-M PAL modes. */
# define TV_PAL_BURST (1 << 16)
-/** Field for setting delay of Y compared to C */
+/* Field for setting delay of Y compared to C */
# define TV_YC_SKEW_MASK (7 << 12)
-/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
+/* Enables a fix for 480p/576p standard definition modes on the 915GM only */
# define TV_ENC_SDP_FIX (1 << 11)
-/**
+/*
* Enables a fix for the 915GM only.
*
* Not sure what it does.
*/
# define TV_ENC_C0_FIX (1 << 10)
-/** Bits that must be preserved by software */
+/* Bits that must be preserved by software */
# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
# define TV_FUSE_STATE_MASK (3 << 4)
-/** Read-only state that reports all features enabled */
+/* Read-only state that reports all features enabled */
# define TV_FUSE_STATE_ENABLED (0 << 4)
-/** Read-only state that reports that Macrovision is disabled in hardware*/
+/* Read-only state that reports that Macrovision is disabled in hardware*/
# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
-/** Read-only state that reports that TV-out is disabled in hardware. */
+/* Read-only state that reports that TV-out is disabled in hardware. */
# define TV_FUSE_STATE_DISABLED (2 << 4)
-/** Normal operation */
+/* Normal operation */
# define TV_TEST_MODE_NORMAL (0 << 0)
-/** Encoder test pattern 1 - combo pattern */
+/* Encoder test pattern 1 - combo pattern */
# define TV_TEST_MODE_PATTERN_1 (1 << 0)
-/** Encoder test pattern 2 - full screen vertical 75% color bars */
+/* Encoder test pattern 2 - full screen vertical 75% color bars */
# define TV_TEST_MODE_PATTERN_2 (2 << 0)
-/** Encoder test pattern 3 - full screen horizontal 75% color bars */
+/* Encoder test pattern 3 - full screen horizontal 75% color bars */
# define TV_TEST_MODE_PATTERN_3 (3 << 0)
-/** Encoder test pattern 4 - random noise */
+/* Encoder test pattern 4 - random noise */
# define TV_TEST_MODE_PATTERN_4 (4 << 0)
-/** Encoder test pattern 5 - linear color ramps */
+/* Encoder test pattern 5 - linear color ramps */
# define TV_TEST_MODE_PATTERN_5 (5 << 0)
-/**
+/*
* This test mode forces the DACs to 50% of full output.
*
* This is used for load detection in combination with TVDAC_SENSE_MASK
@@ -2624,35 +2983,35 @@ enum punit_power_well {
#define TV_DAC 0x68004
# define TV_DAC_SAVE 0x00ffff00
-/**
+/*
* Reports that DAC state change logic has reported change (RO).
*
* This gets cleared when TV_DAC_STATE_EN is cleared
*/
# define TVDAC_STATE_CHG (1 << 31)
# define TVDAC_SENSE_MASK (7 << 28)
-/** Reports that DAC A voltage is above the detect threshold */
+/* Reports that DAC A voltage is above the detect threshold */
# define TVDAC_A_SENSE (1 << 30)
-/** Reports that DAC B voltage is above the detect threshold */
+/* Reports that DAC B voltage is above the detect threshold */
# define TVDAC_B_SENSE (1 << 29)
-/** Reports that DAC C voltage is above the detect threshold */
+/* Reports that DAC C voltage is above the detect threshold */
# define TVDAC_C_SENSE (1 << 28)
-/**
+/*
* Enables DAC state detection logic, for load-based TV detection.
*
* The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
* to off, for load detection to work.
*/
# define TVDAC_STATE_CHG_EN (1 << 27)
-/** Sets the DAC A sense value to high */
+/* Sets the DAC A sense value to high */
# define TVDAC_A_SENSE_CTL (1 << 26)
-/** Sets the DAC B sense value to high */
+/* Sets the DAC B sense value to high */
# define TVDAC_B_SENSE_CTL (1 << 25)
-/** Sets the DAC C sense value to high */
+/* Sets the DAC C sense value to high */
# define TVDAC_C_SENSE_CTL (1 << 24)
-/** Overrides the ENC_ENABLE and DAC voltage levels */
+/* Overrides the ENC_ENABLE and DAC voltage levels */
# define DAC_CTL_OVERRIDE (1 << 7)
-/** Sets the slew rate. Must be preserved in software */
+/* Sets the slew rate. Must be preserved in software */
# define ENC_TVDAC_SLEW_FAST (1 << 6)
# define DAC_A_1_3_V (0 << 4)
# define DAC_A_1_1_V (1 << 4)
@@ -2667,7 +3026,7 @@ enum punit_power_well {
# define DAC_C_0_7_V (2 << 0)
# define DAC_C_MASK (3 << 0)
-/**
+/*
* CSC coefficients are stored in a floating point format with 9 bits of
* mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
* where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
@@ -2682,7 +3041,7 @@ enum punit_power_well {
#define TV_CSC_Y2 0x68014
# define TV_BY_MASK 0x07ff0000
# define TV_BY_SHIFT 16
-/**
+/*
* Y attenuation for component video.
*
* Stored in 1.9 fixed point.
@@ -2699,7 +3058,7 @@ enum punit_power_well {
#define TV_CSC_U2 0x6801c
# define TV_BU_MASK 0x07ff0000
# define TV_BU_SHIFT 16
-/**
+/*
* U attenuation for component video.
*
* Stored in 1.9 fixed point.
@@ -2716,7 +3075,7 @@ enum punit_power_well {
#define TV_CSC_V2 0x68024
# define TV_BV_MASK 0x07ff0000
# define TV_BV_SHIFT 16
-/**
+/*
* V attenuation for component video.
*
* Stored in 1.9 fixed point.
@@ -2725,74 +3084,74 @@ enum punit_power_well {
# define TV_AV_SHIFT 0
#define TV_CLR_KNOBS 0x68028
-/** 2s-complement brightness adjustment */
+/* 2s-complement brightness adjustment */
# define TV_BRIGHTNESS_MASK 0xff000000
# define TV_BRIGHTNESS_SHIFT 24
-/** Contrast adjustment, as a 2.6 unsigned floating point number */
+/* Contrast adjustment, as a 2.6 unsigned floating point number */
# define TV_CONTRAST_MASK 0x00ff0000
# define TV_CONTRAST_SHIFT 16
-/** Saturation adjustment, as a 2.6 unsigned floating point number */
+/* Saturation adjustment, as a 2.6 unsigned floating point number */
# define TV_SATURATION_MASK 0x0000ff00
# define TV_SATURATION_SHIFT 8
-/** Hue adjustment, as an integer phase angle in degrees */
+/* Hue adjustment, as an integer phase angle in degrees */
# define TV_HUE_MASK 0x000000ff
# define TV_HUE_SHIFT 0
#define TV_CLR_LEVEL 0x6802c
-/** Controls the DAC level for black */
+/* Controls the DAC level for black */
# define TV_BLACK_LEVEL_MASK 0x01ff0000
# define TV_BLACK_LEVEL_SHIFT 16
-/** Controls the DAC level for blanking */
+/* Controls the DAC level for blanking */
# define TV_BLANK_LEVEL_MASK 0x000001ff
# define TV_BLANK_LEVEL_SHIFT 0
#define TV_H_CTL_1 0x68030
-/** Number of pixels in the hsync. */
+/* Number of pixels in the hsync. */
# define TV_HSYNC_END_MASK 0x1fff0000
# define TV_HSYNC_END_SHIFT 16
-/** Total number of pixels minus one in the line (display and blanking). */
+/* Total number of pixels minus one in the line (display and blanking). */
# define TV_HTOTAL_MASK 0x00001fff
# define TV_HTOTAL_SHIFT 0
#define TV_H_CTL_2 0x68034
-/** Enables the colorburst (needed for non-component color) */
+/* Enables the colorburst (needed for non-component color) */
# define TV_BURST_ENA (1 << 31)
-/** Offset of the colorburst from the start of hsync, in pixels minus one. */
+/* Offset of the colorburst from the start of hsync, in pixels minus one. */
# define TV_HBURST_START_SHIFT 16
# define TV_HBURST_START_MASK 0x1fff0000
-/** Length of the colorburst */
+/* Length of the colorburst */
# define TV_HBURST_LEN_SHIFT 0
# define TV_HBURST_LEN_MASK 0x0001fff
#define TV_H_CTL_3 0x68038
-/** End of hblank, measured in pixels minus one from start of hsync */
+/* End of hblank, measured in pixels minus one from start of hsync */
# define TV_HBLANK_END_SHIFT 16
# define TV_HBLANK_END_MASK 0x1fff0000
-/** Start of hblank, measured in pixels minus one from start of hsync */
+/* Start of hblank, measured in pixels minus one from start of hsync */
# define TV_HBLANK_START_SHIFT 0
# define TV_HBLANK_START_MASK 0x0001fff
#define TV_V_CTL_1 0x6803c
-/** XXX */
+/* XXX */
# define TV_NBR_END_SHIFT 16
# define TV_NBR_END_MASK 0x07ff0000
-/** XXX */
+/* XXX */
# define TV_VI_END_F1_SHIFT 8
# define TV_VI_END_F1_MASK 0x00003f00
-/** XXX */
+/* XXX */
# define TV_VI_END_F2_SHIFT 0
# define TV_VI_END_F2_MASK 0x0000003f
#define TV_V_CTL_2 0x68040
-/** Length of vsync, in half lines */
+/* Length of vsync, in half lines */
# define TV_VSYNC_LEN_MASK 0x07ff0000
# define TV_VSYNC_LEN_SHIFT 16
-/** Offset of the start of vsync in field 1, measured in one less than the
+/* Offset of the start of vsync in field 1, measured in one less than the
* number of half lines.
*/
# define TV_VSYNC_START_F1_MASK 0x00007f00
# define TV_VSYNC_START_F1_SHIFT 8
-/**
+/*
* Offset of the start of vsync in field 2, measured in one less than the
* number of half lines.
*/
@@ -2800,17 +3159,17 @@ enum punit_power_well {
# define TV_VSYNC_START_F2_SHIFT 0
#define TV_V_CTL_3 0x68044
-/** Enables generation of the equalization signal */
+/* Enables generation of the equalization signal */
# define TV_EQUAL_ENA (1 << 31)
-/** Length of vsync, in half lines */
+/* Length of vsync, in half lines */
# define TV_VEQ_LEN_MASK 0x007f0000
# define TV_VEQ_LEN_SHIFT 16
-/** Offset of the start of equalization in field 1, measured in one less than
+/* Offset of the start of equalization in field 1, measured in one less than
* the number of half lines.
*/
# define TV_VEQ_START_F1_MASK 0x0007f00
# define TV_VEQ_START_F1_SHIFT 8
-/**
+/*
* Offset of the start of equalization in field 2, measured in one less than
* the number of half lines.
*/
@@ -2818,13 +3177,13 @@ enum punit_power_well {
# define TV_VEQ_START_F2_SHIFT 0
#define TV_V_CTL_4 0x68048
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F1_MASK 0x003f0000
# define TV_VBURST_START_F1_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -2832,13 +3191,13 @@ enum punit_power_well {
# define TV_VBURST_END_F1_SHIFT 0
#define TV_V_CTL_5 0x6804c
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F2_MASK 0x003f0000
# define TV_VBURST_START_F2_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -2846,13 +3205,13 @@ enum punit_power_well {
# define TV_VBURST_END_F2_SHIFT 0
#define TV_V_CTL_6 0x68050
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F3_MASK 0x003f0000
# define TV_VBURST_START_F3_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -2860,13 +3219,13 @@ enum punit_power_well {
# define TV_VBURST_END_F3_SHIFT 0
#define TV_V_CTL_7 0x68054
-/**
+/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F4_MASK 0x003f0000
# define TV_VBURST_START_F4_SHIFT 16
-/**
+/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
@@ -2874,56 +3233,56 @@ enum punit_power_well {
# define TV_VBURST_END_F4_SHIFT 0
#define TV_SC_CTL_1 0x68060
-/** Turns on the first subcarrier phase generation DDA */
+/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA1_EN (1 << 31)
-/** Turns on the first subcarrier phase generation DDA */
+/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA2_EN (1 << 30)
-/** Turns on the first subcarrier phase generation DDA */
+/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA3_EN (1 << 29)
-/** Sets the subcarrier DDA to reset frequency every other field */
+/* Sets the subcarrier DDA to reset frequency every other field */
# define TV_SC_RESET_EVERY_2 (0 << 24)
-/** Sets the subcarrier DDA to reset frequency every fourth field */
+/* Sets the subcarrier DDA to reset frequency every fourth field */
# define TV_SC_RESET_EVERY_4 (1 << 24)
-/** Sets the subcarrier DDA to reset frequency every eighth field */
+/* Sets the subcarrier DDA to reset frequency every eighth field */
# define TV_SC_RESET_EVERY_8 (2 << 24)
-/** Sets the subcarrier DDA to never reset the frequency */
+/* Sets the subcarrier DDA to never reset the frequency */
# define TV_SC_RESET_NEVER (3 << 24)
-/** Sets the peak amplitude of the colorburst.*/
+/* Sets the peak amplitude of the colorburst.*/
# define TV_BURST_LEVEL_MASK 0x00ff0000
# define TV_BURST_LEVEL_SHIFT 16
-/** Sets the increment of the first subcarrier phase generation DDA */
+/* Sets the increment of the first subcarrier phase generation DDA */
# define TV_SCDDA1_INC_MASK 0x00000fff
# define TV_SCDDA1_INC_SHIFT 0
#define TV_SC_CTL_2 0x68064
-/** Sets the rollover for the second subcarrier phase generation DDA */
+/* Sets the rollover for the second subcarrier phase generation DDA */
# define TV_SCDDA2_SIZE_MASK 0x7fff0000
# define TV_SCDDA2_SIZE_SHIFT 16
-/** Sets the increent of the second subcarrier phase generation DDA */
+/* Sets the increent of the second subcarrier phase generation DDA */
# define TV_SCDDA2_INC_MASK 0x00007fff
# define TV_SCDDA2_INC_SHIFT 0
#define TV_SC_CTL_3 0x68068
-/** Sets the rollover for the third subcarrier phase generation DDA */
+/* Sets the rollover for the third subcarrier phase generation DDA */
# define TV_SCDDA3_SIZE_MASK 0x7fff0000
# define TV_SCDDA3_SIZE_SHIFT 16
-/** Sets the increent of the third subcarrier phase generation DDA */
+/* Sets the increent of the third subcarrier phase generation DDA */
# define TV_SCDDA3_INC_MASK 0x00007fff
# define TV_SCDDA3_INC_SHIFT 0
#define TV_WIN_POS 0x68070
-/** X coordinate of the display from the start of horizontal active */
+/* X coordinate of the display from the start of horizontal active */
# define TV_XPOS_MASK 0x1fff0000
# define TV_XPOS_SHIFT 16
-/** Y coordinate of the display from the start of vertical active (NBR) */
+/* Y coordinate of the display from the start of vertical active (NBR) */
# define TV_YPOS_MASK 0x00000fff
# define TV_YPOS_SHIFT 0
#define TV_WIN_SIZE 0x68074
-/** Horizontal size of the display window, measured in pixels*/
+/* Horizontal size of the display window, measured in pixels*/
# define TV_XSIZE_MASK 0x1fff0000
# define TV_XSIZE_SHIFT 16
-/**
+/*
* Vertical size of the display window, measured in pixels.
*
* Must be even for interlaced modes.
@@ -2932,28 +3291,28 @@ enum punit_power_well {
# define TV_YSIZE_SHIFT 0
#define TV_FILTER_CTL_1 0x68080
-/**
+/*
* Enables automatic scaling calculation.
*
* If set, the rest of the registers are ignored, and the calculated values can
* be read back from the register.
*/
# define TV_AUTO_SCALE (1 << 31)
-/**
+/*
* Disables the vertical filter.
*
* This is required on modes more than 1024 pixels wide */
# define TV_V_FILTER_BYPASS (1 << 29)
-/** Enables adaptive vertical filtering */
+/* Enables adaptive vertical filtering */
# define TV_VADAPT (1 << 28)
# define TV_VADAPT_MODE_MASK (3 << 26)
-/** Selects the least adaptive vertical filtering mode */
+/* Selects the least adaptive vertical filtering mode */
# define TV_VADAPT_MODE_LEAST (0 << 26)
-/** Selects the moderately adaptive vertical filtering mode */
+/* Selects the moderately adaptive vertical filtering mode */
# define TV_VADAPT_MODE_MODERATE (1 << 26)
-/** Selects the most adaptive vertical filtering mode */
+/* Selects the most adaptive vertical filtering mode */
# define TV_VADAPT_MODE_MOST (3 << 26)
-/**
+/*
* Sets the horizontal scaling factor.
*
* This should be the fractional part of the horizontal scaling factor divided
@@ -2965,14 +3324,14 @@ enum punit_power_well {
# define TV_HSCALE_FRAC_SHIFT 0
#define TV_FILTER_CTL_2 0x68084
-/**
+/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
* TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
*/
# define TV_VSCALE_INT_MASK 0x00038000
# define TV_VSCALE_INT_SHIFT 15
-/**
+/*
* Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
*
* \sa TV_VSCALE_INT_MASK
@@ -2981,7 +3340,7 @@ enum punit_power_well {
# define TV_VSCALE_FRAC_SHIFT 0
#define TV_FILTER_CTL_3 0x68088
-/**
+/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
* TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
@@ -2990,7 +3349,7 @@ enum punit_power_well {
*/
# define TV_VSCALE_IP_INT_MASK 0x00038000
# define TV_VSCALE_IP_INT_SHIFT 15
-/**
+/*
* Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
*
* For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
@@ -3002,26 +3361,26 @@ enum punit_power_well {
#define TV_CC_CONTROL 0x68090
# define TV_CC_ENABLE (1 << 31)
-/**
+/*
* Specifies which field to send the CC data in.
*
* CC data is usually sent in field 0.
*/
# define TV_CC_FID_MASK (1 << 27)
# define TV_CC_FID_SHIFT 27
-/** Sets the horizontal position of the CC data. Usually 135. */
+/* Sets the horizontal position of the CC data. Usually 135. */
# define TV_CC_HOFF_MASK 0x03ff0000
# define TV_CC_HOFF_SHIFT 16
-/** Sets the vertical position of the CC data. Usually 21 */
+/* Sets the vertical position of the CC data. Usually 21 */
# define TV_CC_LINE_MASK 0x0000003f
# define TV_CC_LINE_SHIFT 0
#define TV_CC_DATA 0x68094
# define TV_CC_RDY (1 << 31)
-/** Second word of CC data to be transmitted. */
+/* Second word of CC data to be transmitted. */
# define TV_CC_DATA_2_MASK 0x007f0000
# define TV_CC_DATA_2_SHIFT 16
-/** First word of CC data to be transmitted. */
+/* First word of CC data to be transmitted. */
# define TV_CC_DATA_1_MASK 0x0000007f
# define TV_CC_DATA_1_SHIFT 0
@@ -3043,6 +3402,8 @@ enum punit_power_well {
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
#define DP_PIPE_MASK (1 << 30)
+#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
+#define DP_PIPE_MASK_CHV (3 << 16)
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -3090,32 +3451,32 @@ enum punit_power_well {
#define DP_PLL_FREQ_160MHZ (1 << 16)
#define DP_PLL_FREQ_MASK (3 << 16)
-/** locked once port is enabled */
+/* locked once port is enabled */
#define DP_PORT_REVERSAL (1 << 15)
/* eDP */
#define DP_PLL_ENABLE (1 << 14)
-/** sends the clock on lane 15 of the PEG for debug */
+/* sends the clock on lane 15 of the PEG for debug */
#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
#define DP_SCRAMBLING_DISABLE (1 << 12)
#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
-/** limit RGB values to avoid confusing TVs */
+/* limit RGB values to avoid confusing TVs */
#define DP_COLOR_RANGE_16_235 (1 << 8)
-/** Turn on the audio link */
+/* Turn on the audio link */
#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
-/** vs and hs sync polarity */
+/* vs and hs sync polarity */
#define DP_SYNC_VS_HIGH (1 << 4)
#define DP_SYNC_HS_HIGH (1 << 3)
-/** A fantasy */
+/* A fantasy */
#define DP_DETECTED (1 << 2)
-/** The aux channel provides a way to talk to the
+/* The aux channel provides a way to talk to the
* signal sink for DDC etc. Max packet size supported
* is 20 bytes in each direction, hence the 5 fixed
* data registers
@@ -3258,6 +3619,7 @@ enum punit_power_well {
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
+#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
@@ -3276,6 +3638,7 @@ enum punit_power_well {
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
@@ -3287,8 +3650,10 @@ enum punit_power_well {
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL<<19)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17)
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
@@ -3296,6 +3661,7 @@ enum punit_power_well {
#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
@@ -3304,20 +3670,25 @@ enum punit_power_well {
#define PIPE_DPST_EVENT_STATUS (1UL<<7)
#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3)
#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1)
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
+#define PIPE_HBLANK_INT_STATUS (1UL<<0)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
-#define PIPE_A_OFFSET 0x70000
-#define PIPE_B_OFFSET 0x71000
-#define PIPE_C_OFFSET 0x72000
+#define PIPE_A_OFFSET 0x70000
+#define PIPE_B_OFFSET 0x71000
+#define PIPE_C_OFFSET 0x72000
+#define CHV_PIPE_C_OFFSET 0x74000
/*
* There's actually no pipe EDP. Some pipe registers have
* simply shifted from the pipe to the transcoder, while
@@ -3355,14 +3726,25 @@ enum punit_power_well {
#define SPRITED_FLIP_DONE_INT_EN (1<<26)
#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
#define PLANEB_FLIP_DONE_INT_EN (1<<24)
+#define PIPE_PSR_INT_EN (1<<22)
#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
#define PIPEA_HLINE_INT_EN (1<<20)
#define PIPEA_VBLANK_INT_EN (1<<19)
#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
-
-#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
+#define PIPEC_LINE_COMPARE_INT_EN (1<<13)
+#define PIPEC_HLINE_INT_EN (1<<12)
+#define PIPEC_VBLANK_INT_EN (1<<11)
+#define SPRITEF_FLIPDONE_INT_EN (1<<10)
+#define SPRITEE_FLIPDONE_INT_EN (1<<9)
+#define PLANEC_FLIPDONE_INT_EN (1<<8)
+
+#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
+#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
+#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
+#define PLANEC_INVALID_GTT_INT_EN (1<<25)
+#define CURSORC_INVALID_GTT_INT_EN (1<<24)
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
@@ -3372,6 +3754,11 @@ enum punit_power_well {
#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
#define PLANEA_INVALID_GTT_INT_EN (1<<16)
#define DPINVGTT_EN_MASK 0xff0000
+#define DPINVGTT_EN_MASK_CHV 0xfff0000
+#define SPRITEF_INVALID_GTT_STATUS (1<<11)
+#define SPRITEE_INVALID_GTT_STATUS (1<<10)
+#define PLANEC_INVALID_GTT_STATUS (1<<9)
+#define CURSORC_INVALID_GTT_STATUS (1<<8)
#define CURSORB_INVALID_GTT_STATUS (1<<7)
#define CURSORA_INVALID_GTT_STATUS (1<<6)
#define SPRITED_INVALID_GTT_STATUS (1<<5)
@@ -3381,6 +3768,7 @@ enum punit_power_well {
#define SPRITEA_INVALID_GTT_STATUS (1<<1)
#define PLANEA_INVALID_GTT_STATUS (1<<0)
#define DPINVGTT_STATUS_MASK 0xff
+#define DPINVGTT_STATUS_MASK_CHV 0xfff
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -3420,14 +3808,43 @@ enum punit_power_well {
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_SHIFT 24
+#define DDL_SPRITEB_PRECISION_32 (1<<23)
+#define DDL_SPRITEB_PRECISION_16 (0<<23)
+#define DDL_SPRITEB_SHIFT 16
+#define DDL_SPRITEA_PRECISION_32 (1<<15)
+#define DDL_SPRITEA_PRECISION_16 (0<<15)
+#define DDL_SPRITEA_SHIFT 8
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define DDL_PLANEA_SHIFT 0
+
#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_SHIFT 24
+#define DDL_SPRITED_PRECISION_32 (1<<23)
+#define DDL_SPRITED_PRECISION_16 (0<<23)
+#define DDL_SPRITED_SHIFT 16
+#define DDL_SPRITEC_PRECISION_32 (1<<15)
+#define DDL_SPRITEC_PRECISION_16 (0<<15)
+#define DDL_SPRITEC_SHIFT 8
#define DDL_PLANEB_PRECISION_32 (1<<7)
#define DDL_PLANEB_PRECISION_16 (0<<7)
+#define DDL_PLANEB_SHIFT 0
+
+#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
+#define DDL_CURSORC_PRECISION_32 (1<<31)
+#define DDL_CURSORC_PRECISION_16 (0<<31)
+#define DDL_CURSORC_SHIFT 24
+#define DDL_SPRITEF_PRECISION_32 (1<<23)
+#define DDL_SPRITEF_PRECISION_16 (0<<23)
+#define DDL_SPRITEF_SHIFT 16
+#define DDL_SPRITEE_PRECISION_32 (1<<15)
+#define DDL_SPRITEE_PRECISION_16 (0<<15)
+#define DDL_SPRITEE_SHIFT 8
+#define DDL_PLANEC_PRECISION_32 (1<<7)
+#define DDL_PLANEC_PRECISION_16 (0<<7)
+#define DDL_PLANEC_SHIFT 0
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -3535,12 +3952,13 @@ enum punit_power_well {
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040)
-#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044)
-#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
+#define _PIPEA_FRMCOUNT_GM45 0x70040
+#define _PIPEA_FLIPCOUNT_GM45 0x70044
+#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45)
+#define PIPE_FLIPCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_GM45)
/* Cursor A & B regs */
-#define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080)
+#define _CURACNTR 0x70080
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
@@ -3567,28 +3985,34 @@ enum punit_power_well {
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
-#define _CURABASE (dev_priv->info.display_mmio_offset + 0x70084)
-#define _CURAPOS (dev_priv->info.display_mmio_offset + 0x70088)
+#define _CURABASE 0x70084
+#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define _CURBCNTR (dev_priv->info.display_mmio_offset + 0x700c0)
-#define _CURBBASE (dev_priv->info.display_mmio_offset + 0x700c4)
-#define _CURBPOS (dev_priv->info.display_mmio_offset + 0x700c8)
+#define _CURBCNTR 0x700c0
+#define _CURBBASE 0x700c4
+#define _CURBPOS 0x700c8
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
-#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
-#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
+#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \
+ dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+
+#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
+#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
+#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
-#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
-#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
-#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
+#define CURSOR_A_OFFSET 0x70080
+#define CURSOR_B_OFFSET 0x700c0
+#define CHV_CURSOR_C_OFFSET 0x700e0
+#define IVB_CURSOR_B_OFFSET 0x71080
+#define IVB_CURSOR_C_OFFSET 0x72080
/* Display A control */
#define _DSPACNTR 0x70180
@@ -4093,6 +4517,7 @@ enum punit_power_well {
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
#define GEN8_GT_VECS_IRQ (1<<6)
+#define GEN8_GT_PM_IRQ (1<<4)
#define GEN8_GT_VCS2_IRQ (1<<3)
#define GEN8_GT_VCS1_IRQ (1<<2)
#define GEN8_GT_BCS_IRQ (1<<1)
@@ -4120,7 +4545,7 @@ enum punit_power_well {
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
-#define GEN8_PIPE_FLIP_DONE (1 << 4)
+#define GEN8_PIPE_PRIMARY_FLIP_DONE (1 << 4)
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
@@ -4832,6 +5257,8 @@ enum punit_power_well {
#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
+#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
+#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
@@ -4888,6 +5315,8 @@ enum punit_power_well {
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+#define VLV_PMWGICZ 0x1300a4
+
#define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4
@@ -4896,15 +5325,22 @@ enum punit_power_well {
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define VLV_GTLC_WAKE_CTRL 0x130090
+#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
+#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
+#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
+
#define VLV_GTLC_PW_STATUS 0x130094
-#define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80
-#define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20
+#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
+#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
+#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
+#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
+#define VLV_SPAREG2H 0xA194
#define GTFIFODBG 0x120000
#define GT_FIFO_SBDROPERR (1<<6)
@@ -4924,6 +5360,7 @@ enum punit_power_well {
#define HSW_EDRAM_PRESENT 0x120010
#define GEN6_UCGCTL1 0x9400
+# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -4934,12 +5371,19 @@ enum punit_power_well {
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+#define GEN6_UCGCTL3 0x9408
+
#define GEN7_UCGCTL4 0x940c
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN6_RCGCTL1 0x9410
+#define GEN6_RCGCTL2 0x9414
+#define GEN6_RSTCTL 0x9420
+
#define GEN8_UCGCTL6 0x9430
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define GEN6_GFXPAUSE 0xA000
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -4992,6 +5436,9 @@ enum punit_power_well {
#define GEN6_RP_UP_EI 0xA068
#define GEN6_RP_DOWN_EI 0xA06C
#define GEN6_RP_IDLE_HYSTERSIS 0xA070
+#define GEN6_RPDEUHWTC 0xA080
+#define GEN6_RPDEUC 0xA084
+#define GEN6_RPDEUCSW 0xA088
#define GEN6_RC_STATE 0xA094
#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
@@ -4999,11 +5446,15 @@ enum punit_power_well {
#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
#define GEN6_RC_SLEEP 0xA0B0
+#define GEN6_RCUBMABDTMR 0xA0B0
#define GEN6_RC1e_THRESHOLD 0xA0B4
#define GEN6_RC6_THRESHOLD 0xA0B8
#define GEN6_RC6p_THRESHOLD 0xA0BC
+#define VLV_RCEDATA 0xA0BC
#define GEN6_RC6pp_THRESHOLD 0xA0C0
#define GEN6_PMINTRMSK 0xA168
+#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
+#define VLV_PWRDWNUPCTL 0xA294
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024 /* rps_lock */
@@ -5020,6 +5471,9 @@ enum punit_power_well {
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define GEN7_GT_SCRATCH_BASE 0x4F100
+#define GEN7_GT_SCRATCH_REG_NUM 8
+
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define VLV_GFX_CLK_STATUS_BIT (1<<3)
#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
@@ -5030,6 +5484,9 @@ enum punit_power_well {
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
+#define VLV_GT_RENDER_RC6 0x138108
+#define VLV_GT_MEDIA_RC6 0x13810C
+
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 56785e8fb2eb..043123c77a1f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -328,8 +328,6 @@ int i915_save_state(struct drm_device *dev)
}
}
- intel_disable_gt_powersave(dev);
-
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 9c57029f6f4b..86ce39aad0ff 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -186,7 +186,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
int ret;
@@ -263,6 +263,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+ intel_runtime_pm_get(dev_priv);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
@@ -273,6 +275,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
}
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
+
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 23c26f1f8b37..f5aa0067755a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -7,6 +7,7 @@
#include <drm/drmP.h>
#include "i915_drv.h"
+#include "intel_drv.h"
#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
@@ -14,6 +15,80 @@
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE i915_trace
+/* pipe updates */
+
+TRACE_EVENT(i915_pipe_update_start,
+ TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max),
+ TP_ARGS(crtc, min, max),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, min)
+ __field(u32, max)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->min = min;
+ __entry->max = max;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->min, __entry->max)
+);
+
+TRACE_EVENT(i915_pipe_update_vblank_evaded,
+ TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame),
+ TP_ARGS(crtc, min, max, frame),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, min)
+ __field(u32, max)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = frame;
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->min = min;
+ __entry->max = max;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->min, __entry->max)
+);
+
+TRACE_EVENT(i915_pipe_update_end,
+ TP_PROTO(struct intel_crtc *crtc, u32 frame),
+ TP_ARGS(crtc, frame),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = frame;
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline)
+);
+
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
@@ -251,8 +326,8 @@ TRACE_EVENT(i915_gem_evict_vm,
);
TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct intel_ring_buffer *from,
- struct intel_ring_buffer *to,
+ TP_PROTO(struct intel_engine_cs *from,
+ struct intel_engine_cs *to,
u32 seqno),
TP_ARGS(from, to, seqno),
@@ -277,7 +352,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry(
@@ -300,7 +375,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
);
TRACE_EVENT(i915_gem_ring_flush,
- TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
+ TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
TP_ARGS(ring, invalidate, flush),
TP_STRUCT__entry(
@@ -323,7 +398,7 @@ TRACE_EVENT(i915_gem_ring_flush,
);
DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
@@ -343,12 +418,12 @@ DECLARE_EVENT_CLASS(i915_gem_request,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
TRACE_EVENT(i915_gem_request_complete,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
@@ -368,12 +443,12 @@ TRACE_EVENT(i915_gem_request_complete,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
@@ -402,12 +477,12 @@ TRACE_EVENT(i915_gem_request_wait_begin,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
DECLARE_EVENT_CLASS(i915_ring,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
@@ -424,12 +499,12 @@ DECLARE_EVENT_CLASS(i915_ring,
);
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring)
);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index aff4a113cda3..827498e081df 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -49,13 +49,19 @@ find_section(struct bdb_header *bdb, int section_id)
total = bdb->bdb_size;
/* walk the sections looking for section_id */
- while (index < total) {
+ while (index + 3 < total) {
current_id = *(base + index);
index++;
+
current_size = *((u16 *)(base + index));
index += 2;
+
+ if (index + current_size > total)
+ return NULL;
+
if (current_id == section_id)
return base + index;
+
index += current_size;
}
@@ -206,7 +212,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
- int i, downclock;
+ int i, downclock, drrs_mode;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
@@ -218,6 +224,28 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
panel_type = lvds_options->panel_type;
+ drrs_mode = (lvds_options->dps_panel_type_bits
+ >> (panel_type * 2)) & MODE_MASK;
+ /*
+ * VBT has static DRRS = 0 and seamless DRRS = 2.
+ * The below piece of code is required to adjust vbt.drrs_type
+ * to match the enum drrs_support_type.
+ */
+ switch (drrs_mode) {
+ case 0:
+ dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
+ DRM_DEBUG_KMS("DRRS supported mode is static\n");
+ break;
+ case 2:
+ dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
+ DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
+ break;
+ default:
+ dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
+ break;
+ }
+
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
return;
@@ -287,9 +315,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
- /* Err to enabling backlight if no backlight block. */
- dev_priv->vbt.backlight.present = true;
-
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
@@ -526,6 +551,16 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (driver->dual_frequency)
dev_priv->render_reclock_avail = true;
+
+ DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ /*
+ * If DRRS is not supported, drrs_type has to be set to 0.
+ * This is because, VBT is configured in such a way that
+ * static DRRS is 0 and DRRS not supported is represented by
+ * driver->drrs_enabled=false
+ */
+ if (!driver->drrs_enabled)
+ dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
}
static void
@@ -628,19 +663,221 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
}
}
+static u8 *goto_next_sequence(u8 *data, int *size)
+{
+ u16 len;
+ int tmp = *size;
+
+ if (--tmp < 0)
+ return NULL;
+
+ /* goto first element */
+ data++;
+ while (1) {
+ switch (*data) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ /*
+ * skip by this element payload size
+ * skip elem id, command flag and data type
+ */
+ tmp -= 5;
+ if (tmp < 0)
+ return NULL;
+
+ data += 3;
+ len = *((u16 *)data);
+
+ tmp -= len;
+ if (tmp < 0)
+ return NULL;
+
+ /* skip by len */
+ data = data + 2 + len;
+ break;
+ case MIPI_SEQ_ELEM_DELAY:
+ /* skip by elem id, and delay is 4 bytes */
+ tmp -= 5;
+ if (tmp < 0)
+ return NULL;
+
+ data += 5;
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ tmp -= 3;
+ if (tmp < 0)
+ return NULL;
+
+ data += 3;
+ break;
+ default:
+ DRM_ERROR("Unknown element\n");
+ return NULL;
+ }
+
+ /* end of sequence ? */
+ if (*data == 0)
+ break;
+ }
+
+ /* goto next sequence or end of block byte */
+ if (--tmp < 0)
+ return NULL;
+
+ data++;
+
+ /* update amount of data left for the sequence block to be parsed */
+ *size = tmp;
+ return data;
+}
+
static void
parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
- struct bdb_mipi *mipi;
+ struct bdb_mipi_config *start;
+ struct bdb_mipi_sequence *sequence;
+ struct mipi_config *config;
+ struct mipi_pps_data *pps;
+ u8 *data, *seq_data;
+ int i, panel_id, seq_size;
+ u16 block_size;
+
+ /* parse MIPI blocks only if LFP type is MIPI */
+ if (!dev_priv->vbt.has_mipi)
+ return;
+
+ /* Initialize this to undefined indicating no generic MIPI support */
+ dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
- mipi = find_section(bdb, BDB_MIPI_CONFIG);
- if (!mipi) {
- DRM_DEBUG_KMS("No MIPI BDB found");
+ /* Block #40 is already parsed and panel_fixed_mode is
+ * stored in dev_priv->lfp_lvds_vbt_mode
+ * resuse this when needed
+ */
+
+ /* Parse #52 for panel index used from panel_type already
+ * parsed
+ */
+ start = find_section(bdb, BDB_MIPI_CONFIG);
+ if (!start) {
+ DRM_DEBUG_KMS("No MIPI config BDB found");
return;
}
- /* XXX: add more info */
+ DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
+ panel_type);
+
+ /*
+ * get hold of the correct configuration block and pps data as per
+ * the panel_type as index
+ */
+ config = &start->config[panel_type];
+ pps = &start->pps[panel_type];
+
+ /* store as of now full data. Trim when we realise all is not needed */
+ dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.config)
+ return;
+
+ dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.pps) {
+ kfree(dev_priv->vbt.dsi.config);
+ return;
+ }
+
+ /* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+
+ /* Check if we have sequence block as well */
+ sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
+ if (!sequence) {
+ DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
+
+ block_size = get_blocksize(sequence);
+
+ /*
+ * parse the sequence block for individual sequences
+ */
+ dev_priv->vbt.dsi.seq_version = sequence->version;
+
+ seq_data = &sequence->data[0];
+
+ /*
+ * sequence block is variable length and hence we need to parse and
+ * get the sequence data for specific panel id
+ */
+ for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
+ panel_id = *seq_data;
+ seq_size = *((u16 *) (seq_data + 1));
+ if (panel_id == panel_type)
+ break;
+
+ /* skip the sequence including seq header of 3 bytes */
+ seq_data = seq_data + 3 + seq_size;
+ if ((seq_data - &sequence->data[0]) > block_size) {
+ DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
+ return;
+ }
+ }
+
+ if (i == MAX_MIPI_CONFIGURATIONS) {
+ DRM_ERROR("Sequence block detected but no valid configuration\n");
+ return;
+ }
+
+ /* check if found sequence is completely within the sequence block
+ * just being paranoid */
+ if (seq_size > block_size) {
+ DRM_ERROR("Corrupted sequence/size, bailing out\n");
+ return;
+ }
+
+ /* skip the panel id(1 byte) and seq size(2 bytes) */
+ dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.data)
+ return;
+
+ /*
+ * loop into the sequence data and split into multiple sequneces
+ * There are only 5 types of sequences as of now
+ */
+ data = dev_priv->vbt.dsi.data;
+ dev_priv->vbt.dsi.size = seq_size;
+
+ /* two consecutive 0x00 indicate end of all sequences */
+ while (1) {
+ int seq_id = *data;
+ if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
+ dev_priv->vbt.dsi.sequence[seq_id] = data;
+ DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
+ } else {
+ DRM_ERROR("undefined sequence\n");
+ goto err;
+ }
+
+ /* partial parsing to skip elements */
+ data = goto_next_sequence(data, &seq_size);
+
+ if (data == NULL) {
+ DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
+ goto err;
+ }
+
+ if (*data == 0)
+ break; /* end of sequence reached */
+ }
+
+ DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
+ return;
+err:
+ kfree(dev_priv->vbt.dsi.data);
+ dev_priv->vbt.dsi.data = NULL;
+
+ /* error during parsing so set all pointers to null
+ * because of partial parsing */
+ memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX);
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
@@ -823,6 +1060,15 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
/* skip the device block if device type is invalid */
continue;
}
+
+ if (p_child->common.dvo_port >= DVO_PORT_MIPIA
+ && p_child->common.dvo_port <= DVO_PORT_MIPID
+ &&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
+ DRM_DEBUG_KMS("Found MIPI as LFP\n");
+ dev_priv->vbt.has_mipi = 1;
+ dev_priv->vbt.dsi.port = p_child->common.dvo_port;
+ }
+
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
memcpy((void *)child_dev_ptr, (void *)p_child,
@@ -839,6 +1085,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
+ /* Default to having backlight */
+ dev_priv->vbt.backlight.present = true;
+
/* LFP panel data */
dev_priv->vbt.lvds_dither = 1;
dev_priv->vbt.lvds_vbt = 0;
@@ -893,6 +1142,46 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
+static struct bdb_header *validate_vbt(char *base, size_t size,
+ struct vbt_header *vbt,
+ const char *source)
+{
+ size_t offset;
+ struct bdb_header *bdb;
+
+ if (vbt == NULL) {
+ DRM_DEBUG_DRIVER("VBT signature missing\n");
+ return NULL;
+ }
+
+ offset = (char *)vbt - base;
+ if (offset + sizeof(struct vbt_header) > size) {
+ DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ return NULL;
+ }
+
+ if (memcmp(vbt->signature, "$VBT", 4)) {
+ DRM_DEBUG_DRIVER("VBT invalid signature\n");
+ return NULL;
+ }
+
+ offset += vbt->bdb_offset;
+ if (offset + sizeof(struct bdb_header) > size) {
+ DRM_DEBUG_DRIVER("BDB header incomplete\n");
+ return NULL;
+ }
+
+ bdb = (struct bdb_header *)(base + offset);
+ if (offset + bdb->bdb_size > size) {
+ DRM_DEBUG_DRIVER("BDB incomplete\n");
+ return NULL;
+ }
+
+ DRM_DEBUG_KMS("Using VBT from %s: %20s\n",
+ source, vbt->signature);
+ return bdb;
+}
+
/**
* intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device
@@ -916,20 +1205,13 @@ intel_parse_bios(struct drm_device *dev)
init_vbt_defaults(dev_priv);
/* XXX Should this validation be moved to intel_opregion.c? */
- if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
- struct vbt_header *vbt = dev_priv->opregion.vbt;
- if (memcmp(vbt->signature, "$VBT", 4) == 0) {
- DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
- vbt->signature);
- bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
- } else
- dev_priv->opregion.vbt = NULL;
- }
+ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
+ bdb = validate_vbt((char *)dev_priv->opregion.header, OPREGION_SIZE,
+ (struct vbt_header *)dev_priv->opregion.vbt,
+ "OpRegion");
if (bdb == NULL) {
- struct vbt_header *vbt = NULL;
- size_t size;
- int i;
+ size_t i, size;
bios = pci_map_rom(pdev, &size);
if (!bios)
@@ -937,19 +1219,18 @@ intel_parse_bios(struct drm_device *dev)
/* Scour memory looking for the VBT signature */
for (i = 0; i + 4 < size; i++) {
- if (!memcmp(bios + i, "$VBT", 4)) {
- vbt = (struct vbt_header *)(bios + i);
+ if (memcmp(bios + i, "$VBT", 4) == 0) {
+ bdb = validate_vbt(bios, size,
+ (struct vbt_header *)(bios + i),
+ "PCI ROM");
break;
}
}
- if (!vbt) {
- DRM_DEBUG_DRIVER("VBT signature missing\n");
+ if (!bdb) {
pci_unmap_rom(pdev, bios);
return -1;
}
-
- bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
}
/* Grab useful general definitions */
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index f27f7b282465..b98667796337 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -282,6 +282,9 @@ struct bdb_general_definitions {
union child_device_config devices[0];
} __packed;
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK 0x3
+
struct bdb_lvds_options {
u8 panel_type;
u8 rsvd1;
@@ -294,6 +297,18 @@ struct bdb_lvds_options {
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
+ /* LVDS Panel channel bits stored here */
+ u32 lvds_panel_channel_bits;
+ /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+ u16 ssc_bits;
+ u16 ssc_freq;
+ u16 ssc_ddt;
+ /* Panel color depth defined here */
+ u16 panel_color_depth;
+ /* LVDS panel type bits stored here */
+ u32 dps_panel_type_bits;
+ /* LVDS backlight control type bits stored here */
+ u32 blt_control_type_bits;
} __packed;
/* LFP pointer table contains entries to the struct below */
@@ -482,6 +497,20 @@ struct bdb_driver_features {
u8 hdmi_termination;
u8 custom_vbt_version;
+ /* Driver features data block */
+ u16 rmpm_enabled:1;
+ u16 s2ddt_enabled:1;
+ u16 dpst_enabled:1;
+ u16 bltclt_enabled:1;
+ u16 adb_enabled:1;
+ u16 drrs_enabled:1;
+ u16 grs_enabled:1;
+ u16 gpmt_enabled:1;
+ u16 tbt_enabled:1;
+ u16 psr_enabled:1;
+ u16 ips_enabled:1;
+ u16 reserved3:4;
+ u16 pc_feature_valid:1;
} __packed;
#define EDP_18BPP 0
@@ -714,6 +743,10 @@ int intel_parse_bios(struct drm_device *dev);
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
+#define DVO_PORT_MIPIA 21
+#define DVO_PORT_MIPIB 22
+#define DVO_PORT_MIPIC 23
+#define DVO_PORT_MIPID 24
/* Block 52 contains MIPI Panel info
* 6 such enteries will there. Index into correct
@@ -870,4 +903,35 @@ struct bdb_mipi_sequence {
u8 data[0];
};
+/* MIPI Sequnece Block definitions */
+enum mipi_seq {
+ MIPI_SEQ_UNDEFINED = 0,
+ MIPI_SEQ_ASSERT_RESET,
+ MIPI_SEQ_INIT_OTP,
+ MIPI_SEQ_DISPLAY_ON,
+ MIPI_SEQ_DISPLAY_OFF,
+ MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_MAX
+};
+
+enum mipi_seq_element {
+ MIPI_SEQ_ELEM_UNDEFINED = 0,
+ MIPI_SEQ_ELEM_SEND_PKT,
+ MIPI_SEQ_ELEM_DELAY,
+ MIPI_SEQ_ELEM_GPIO,
+ MIPI_SEQ_ELEM_STATUS,
+ MIPI_SEQ_ELEM_MAX
+};
+
+enum mipi_gpio_pin_index {
+ MIPI_GPIO_UNDEFINED = 0,
+ MIPI_GPIO_PANEL_ENABLE,
+ MIPI_GPIO_BL_ENABLE,
+ MIPI_GPIO_PWM_ENABLE,
+ MIPI_GPIO_RESET_N,
+ MIPI_GPIO_PWR_DOWN_R,
+ MIPI_GPIO_STDBY_RST_N,
+ MIPI_GPIO_MAX
+};
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index aa5a3dc43342..5a045d3bd77e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -144,28 +144,49 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- u32 temp;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ u32 adpa;
+
+ if (INTEL_INFO(dev)->gen >= 5)
+ adpa = ADPA_HOTPLUG_BITS;
+ else
+ adpa = 0;
- temp = I915_READ(crt->adpa_reg);
- temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
- temp &= ~ADPA_DAC_ENABLE;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ /* For CPT allow 3 pipe config, for others just use A or B */
+ if (HAS_PCH_LPT(dev))
+ ; /* Those bits don't exist here */
+ else if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
+ else if (crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(BCLRPAT(crtc->pipe), 0);
switch (mode) {
case DRM_MODE_DPMS_ON:
- temp |= ADPA_DAC_ENABLE;
+ adpa |= ADPA_DAC_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
- temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_SUSPEND:
- temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_OFF:
- temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
}
- I915_WRITE(crt->adpa_reg, temp);
+ I915_WRITE(crt->adpa_reg, adpa);
}
static void intel_disable_crt(struct intel_encoder *encoder)
@@ -274,42 +295,6 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
return true;
}
-static void intel_crt_mode_set(struct intel_encoder *encoder)
-{
-
- struct drm_device *dev = encoder->base.dev;
- struct intel_crt *crt = intel_encoder_to_crt(encoder);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
- u32 adpa;
-
- if (INTEL_INFO(dev)->gen >= 5)
- adpa = ADPA_HOTPLUG_BITS;
- else
- adpa = 0;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- adpa |= ADPA_HSYNC_ACTIVE_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- adpa |= ADPA_VSYNC_ACTIVE_HIGH;
-
- /* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev))
- ; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev))
- adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
- else if (crtc->pipe == 0)
- adpa |= ADPA_PIPE_A_SELECT;
- else
- adpa |= ADPA_PIPE_B_SELECT;
-
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT(crtc->pipe), 0);
-
- I915_WRITE(crt->adpa_reg, adpa);
-}
-
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -645,11 +630,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
enum intel_display_power_domain power_domain;
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
+ struct drm_modeset_acquire_ctx ctx;
intel_runtime_pm_get(dev_priv);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, drm_get_connector_name(connector),
+ connector->base.id, connector->name,
force);
power_domain = intel_display_port_power_domain(intel_encoder);
@@ -688,12 +674,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
}
/* for pre-945g platforms use load detect */
- if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
+ if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(connector, &tmp);
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
} else
status = connector_status_unknown;
@@ -867,7 +853,6 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config;
- crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
if (I915_HAS_HOTPLUG(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 0ad4e9600063..b17b9c7c769f 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -364,55 +364,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
-static void intel_ddi_mode_set(struct intel_encoder *encoder)
-{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- int port = intel_ddi_get_encoder_port(encoder);
- int pipe = crtc->pipe;
- int type = encoder->type;
- struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
-
- DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
- port_name(port), pipe_name(pipe));
-
- crtc->eld_vld = false;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
-
- intel_dp->DP = intel_dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
- intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
-
- if (intel_dp->has_audio) {
- DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
- pipe_name(crtc->pipe));
-
- /* write eld */
- DRM_DEBUG_DRIVER("DP audio: write eld information\n");
- intel_write_eld(&encoder->base, adjusted_mode);
- }
- } else if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-
- if (intel_hdmi->has_audio) {
- /* Proper support for digital audio needs a new logic
- * and a new set of registers, so we leave it for future
- * patch bombing.
- */
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
- pipe_name(crtc->pipe));
-
- /* write eld */
- DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
- intel_write_eld(&encoder->base, adjusted_mode);
- }
-
- intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
- }
-}
-
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
{
@@ -1062,9 +1013,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
}
if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
- if (intel_hdmi->has_hdmi_sink)
+ if (intel_crtc->config.has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
@@ -1293,28 +1242,48 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_crtc *crtc = encoder->crtc;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
+ if (crtc->config.has_audio) {
+ DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
+ pipe_name(crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
+ intel_write_eld(encoder, &crtc->config.adjusted_mode);
+ }
+
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
- WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+ WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+ intel_dp->DP = intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
if (port != PORT_A)
intel_dp_stop_link_train(intel_dp);
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ intel_hdmi->set_infoframes(encoder,
+ crtc->config.has_hdmi_sink,
+ &crtc->config.adjusted_mode);
}
}
@@ -1385,7 +1354,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
intel_edp_psr_enable(intel_dp);
}
- if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
+ if (intel_crtc->config.has_audio) {
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
@@ -1403,11 +1373,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
+ /* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
+ * register is part of the power well on Haswell. */
+ if (intel_crtc->config.has_audio) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
(pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
if (type == INTEL_OUTPUT_EDP) {
@@ -1580,6 +1553,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
+ pipe_config->has_hdmi_sink = true;
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
@@ -1592,6 +1566,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
+ if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
+ pipe_config->has_audio = true;
+ }
+
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
/*
@@ -1708,7 +1688,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
DRM_MODE_ENCODER_TMDS);
intel_encoder->compute_config = intel_ddi_compute_config;
- intel_encoder->mode_set = intel_ddi_mode_set;
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5b60e25baa32..e27e7804c0b9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,6 +41,9 @@
#include <drm/drm_crtc_helper.h>
#include <linux/dma_remapping.h>
+#define DIV_ROUND_CLOSEST_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -55,6 +58,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
+static void intel_dp_set_m_n(struct intel_crtc *crtc);
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
+static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n);
+static void ironlake_set_pipeconf(struct drm_crtc *crtc);
+static void haswell_set_pipeconf(struct drm_crtc *crtc);
+static void intel_set_pipe_csc(struct drm_crtc *crtc);
+static void vlv_prepare_pll(struct intel_crtc *crtc);
typedef struct {
int min, max;
@@ -328,6 +340,22 @@ static const intel_limit_t intel_limits_vlv = {
.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
};
+static const intel_limit_t intel_limits_chv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 540000 * 5},
+ .vco = { .min = 4860000, .max = 6700000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ .m2 = { .min = 24 << 22, .max = 175 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 14 },
+};
+
static void vlv_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
@@ -412,6 +440,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
+ } else if (IS_CHERRYVIEW(dev)) {
+ limit = &intel_limits_chv;
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
@@ -456,6 +486,17 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
+static void chv_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return;
+ clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
+ clock->n << 22);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+}
+
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
@@ -731,6 +772,58 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
return found;
}
+static bool
+chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+ uint64_t m2;
+ int found = false;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /*
+ * Based on hardware doc, the n always set to 1, and m1 always
+ * set to 2. If requires to support 200Mhz refclk, we need to
+ * revisit this because n may not 1 anymore.
+ */
+ clock.n = 1, clock.m1 = 2;
+ target *= 5; /* fast clock */
+
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast;
+ clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+
+ clock.p = clock.p1 * clock.p2;
+
+ m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
+ clock.n) << 22, refclk * clock.m1);
+
+ if (m2 > INT_MAX/clock.m1)
+ continue;
+
+ clock.m2 = m2;
+
+ chv_clock(refclk, &clock);
+
+ if (!intel_PLL_is_valid(dev, limit, &clock))
+ continue;
+
+ /* based on hardware requirement, prefer bigger p
+ */
+ if (clock.p > best_clock->p) {
+ *best_clock = clock;
+ found = true;
+ }
+ }
+ }
+
+ return found;
+}
+
bool intel_crtc_active(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -765,7 +858,7 @@ static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
frame = I915_READ(frame_reg);
if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
- DRM_DEBUG_KMS("vblank wait timed out\n");
+ WARN(1, "vblank wait timed out\n");
}
/**
@@ -878,7 +971,7 @@ bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
u32 bit;
if (HAS_PCH_IBX(dev_priv->dev)) {
- switch(port->port) {
+ switch (port->port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
@@ -892,7 +985,7 @@ bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
return true;
}
} else {
- switch(port->port) {
+ switch (port->port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT;
break;
@@ -1097,10 +1190,8 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
- else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
- cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
else
- cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+ cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
@@ -1253,6 +1344,9 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
return false;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
+ return false;
} else {
if ((val & DP_PIPE_MASK) != (pipe << 30))
return false;
@@ -1269,6 +1363,9 @@ static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
if (HAS_PCH_CPT(dev_priv->dev)) {
if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
return false;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
+ return false;
} else {
if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
return false;
@@ -1367,7 +1464,17 @@ static void intel_init_dpio(struct drm_device *dev)
if (!IS_VALLEYVIEW(dev))
return;
- DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+ /*
+ * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+ * CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (IS_CHERRYVIEW(dev)) {
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+ DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
+ } else {
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+ }
}
static void intel_reset_dpio(struct drm_device *dev)
@@ -1377,25 +1484,48 @@ static void intel_reset_dpio(struct drm_device *dev)
if (!IS_VALLEYVIEW(dev))
return;
- /*
- * Enable the CRI clock source so we can get at the display and the
- * reference clock for VGA hotplug / manual detection.
- */
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_INTEGRATED_CRI_CLK_VLV);
+ if (IS_CHERRYVIEW(dev)) {
+ enum dpio_phy phy;
+ u32 val;
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all be set
- * to 0.
- *
- * This should only be done on init and resume from S3 with both
- * PLLs disabled, or we risk losing DPIO and PLL synchronization.
- */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+ for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
+ /* Poll for phypwrgood signal */
+ if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
+ PHY_POWERGOOD(phy), 1))
+ DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+ /*
+ * Deassert common lane reset for PHY.
+ *
+ * This should only be done on init and resume from S3
+ * with both PLLs disabled, or we risk losing DPIO and
+ * PLL synchronization.
+ */
+ val = I915_READ(DISPLAY_PHY_CONTROL);
+ I915_WRITE(DISPLAY_PHY_CONTROL,
+ PHY_COM_LANE_RESET_DEASSERT(phy, val));
+ }
+
+ } else {
+ /*
+ * If DPIO has already been reset, e.g. by BIOS, just skip all
+ * this.
+ */
+ if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
+ false);
+ __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
+ true);
+ }
}
static void vlv_enable_pll(struct intel_crtc *crtc)
@@ -1436,6 +1566,44 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
udelay(150); /* wait for warmup */
}
+static void chv_enable_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 tmp;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Enable back the 10bit clock to display controller */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp |= DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+
+ /*
+ * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+ */
+ udelay(1);
+
+ /* Enable PLL */
+ I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
+
+ /* Check PLL is locked */
+ if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("PLL %d failed to lock\n", pipe);
+
+ /* not sure when this should be written */
+ I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(pipe));
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -1519,45 +1687,92 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
+
+}
+
+static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 val;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ /* Set PLL en = 0 */
+ val = DPLL_SSC_REF_CLOCK_CHV;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ I915_WRITE(DPLL(pipe), val);
+ POSTING_READ(DPLL(pipe));
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Disable 10bit clock to display controller */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ val &= ~DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport)
{
u32 port_mask;
+ int dpll_reg;
switch (dport->port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
+ dpll_reg = DPLL(0);
break;
case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
+ dpll_reg = DPLL(0);
+ break;
+ case PORT_D:
+ port_mask = DPLL_PORTD_READY_MASK;
+ dpll_reg = DPIO_PHY_STATUS;
break;
default:
BUG();
}
- if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
+ if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
- port_name(dport->port), I915_READ(DPLL(0)));
+ port_name(dport->port), I915_READ(dpll_reg));
+}
+
+static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+ WARN_ON(!pll->refcount);
+ if (pll->active == 0) {
+ DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+ WARN_ON(pll->on);
+ assert_shared_dpll_disabled(dev_priv, pll);
+
+ pll->mode_set(dev_priv, pll);
+ }
}
/**
- * ironlake_enable_shared_dpll - enable PCH PLL
+ * intel_enable_shared_dpll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
+static void intel_enable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
- /* PCH PLLs only available on ILK, SNB and IVB */
- BUG_ON(INTEL_INFO(dev)->gen < 5);
if (WARN_ON(pll == NULL))
return;
@@ -1804,16 +2019,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
I915_WRITE(reg, val | PIPECONF_ENABLE);
POSTING_READ(reg);
-
- /*
- * There's no guarantee the pipe will really start running now. It
- * depends on the Gen, the output type and the relative order between
- * pipe and plane enabling. Avoid waiting on HSW+ since it's not
- * necessary.
- * TODO: audit the previous gens.
- */
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
- intel_wait_for_vblank(dev_priv->dev, pipe);
}
/**
@@ -1882,6 +2087,7 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
+ struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
@@ -1890,18 +2096,25 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, pipe);
- WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
+ if (intel_crtc->primary_enabled)
+ return;
intel_crtc->primary_enabled = true;
reg = DSPCNTR(plane);
val = I915_READ(reg);
- if (val & DISPLAY_PLANE_ENABLE)
- return;
+ WARN_ON(val & DISPLAY_PLANE_ENABLE);
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
- intel_wait_for_vblank(dev_priv->dev, pipe);
+
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
/**
@@ -1920,18 +2133,17 @@ static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
+ if (!intel_crtc->primary_enabled)
+ return;
intel_crtc->primary_enabled = false;
reg = DSPCNTR(plane);
val = I915_READ(reg);
- if ((val & DISPLAY_PLANE_ENABLE) == 0)
- return;
+ WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
- intel_wait_for_vblank(dev_priv->dev, pipe);
}
static bool need_vtd_wa(struct drm_device *dev)
@@ -1954,7 +2166,7 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled)
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+ struct intel_engine_cs *pipelined)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
@@ -2134,7 +2346,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
*/
- list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, c) {
i = to_intel_crtc(c);
if (c == &intel_crtc->base)
@@ -2152,9 +2364,9 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
}
}
-static int i9xx_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void i9xx_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2166,15 +2378,6 @@ static int i9xx_update_primary_plane(struct drm_crtc *crtc,
u32 dspcntr;
u32 reg;
- switch (plane) {
- case 0:
- case 1:
- break;
- default:
- DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
- return -EINVAL;
- }
-
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
@@ -2249,13 +2452,11 @@ static int i9xx_update_primary_plane(struct drm_crtc *crtc,
} else
I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
-
- return 0;
}
-static int ironlake_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void ironlake_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2267,16 +2468,6 @@ static int ironlake_update_primary_plane(struct drm_crtc *crtc,
u32 dspcntr;
u32 reg;
- switch (plane) {
- case 0:
- case 1:
- case 2:
- break;
- default:
- DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
- return -EINVAL;
- }
-
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
@@ -2343,8 +2534,6 @@ static int ironlake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(DSPLINOFF(plane), linear_offset);
}
POSTING_READ(reg);
-
- return 0;
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -2359,7 +2548,9 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(crtc);
- return dev_priv->display.update_primary_plane(crtc, fb, x, y);
+ dev_priv->display.update_primary_plane(crtc, fb, x, y);
+
+ return 0;
}
void intel_display_handle_reset(struct drm_device *dev)
@@ -2381,7 +2572,7 @@ void intel_display_handle_reset(struct drm_device *dev)
* pending_flip_queue really got woken up.
*/
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
@@ -2389,10 +2580,10 @@ void intel_display_handle_reset(struct drm_device *dev)
intel_finish_page_flip_plane(dev, plane);
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- mutex_lock(&crtc->mutex);
+ drm_modeset_lock(&crtc->mutex, NULL);
/*
* FIXME: Once we have proper support for primary planes (and
* disabling them without disabling the entire crtc) allow again
@@ -2403,7 +2594,7 @@ void intel_display_handle_reset(struct drm_device *dev)
crtc->primary->fb,
crtc->x,
crtc->y);
- mutex_unlock(&crtc->mutex);
+ drm_modeset_unlock(&crtc->mutex);
}
}
@@ -2518,14 +2709,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
- ret = dev_priv->display.update_primary_plane(crtc, fb, x, y);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
- mutex_unlock(&dev->struct_mutex);
- DRM_ERROR("failed to update base address\n");
- return ret;
- }
+ dev_priv->display.update_primary_plane(crtc, fb, x, y);
old_fb = crtc->primary->fb;
crtc->primary->fb = fb;
@@ -2628,12 +2812,10 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
u32 reg, temp, tries;
- /* FDI needs bits from pipe & plane first */
+ /* FDI needs bits from pipe first */
assert_pipe_enabled(dev_priv, pipe);
- assert_plane_enabled(dev_priv, plane);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -3064,9 +3246,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev))
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- }
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
@@ -3104,7 +3285,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
* cannot claim and pin a new fb without at least acquring the
* struct_mutex and so serialising with us.
*/
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (atomic_read(&crtc->unpin_work_count) == 0)
continue;
@@ -3117,7 +3298,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
return false;
}
-static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3127,8 +3308,9 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
- wait_event(dev_priv->pending_flip_queue,
- !intel_crtc_has_pending_flip(crtc));
+ WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
+ !intel_crtc_has_pending_flip(crtc),
+ 60*HZ) == 0);
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->primary->fb);
@@ -3341,7 +3523,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
- ironlake_enable_shared_dpll(intel_crtc);
+ intel_enable_shared_dpll(intel_crtc);
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
@@ -3445,6 +3627,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
+ WARN_ON(pll->refcount);
+
goto found;
}
@@ -3478,20 +3662,13 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
return NULL;
found:
+ if (pll->refcount == 0)
+ pll->hw_state = crtc->config.dpll_hw_state;
+
crtc->config.shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
- if (pll->active == 0) {
- memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
- sizeof(pll->hw_state));
-
- DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
- WARN_ON(pll->on);
- assert_shared_dpll_disabled(dev_priv, pll);
-
- pll->mode_set(dev_priv, pll);
- }
pll->refcount++;
return pll;
@@ -3562,17 +3739,17 @@ static void intel_disable_planes(struct drm_crtc *crtc)
void hsw_enable_ips(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (!crtc->config.ips_enabled)
return;
- /* We can only enable IPS after we enable a plane and wait for a vblank.
- * We guarantee that the plane is enabled by calling intel_enable_ips
- * only after intel_enable_plane. And intel_enable_plane already waits
- * for a vblank, so all we need to do here is to enable the IPS bit. */
+ /* We can only enable IPS after we enable a plane and wait for a vblank */
+ intel_wait_for_vblank(dev, crtc->pipe);
+
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(crtc->base.dev)) {
+ if (IS_BROADWELL(dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3602,10 +3779,13 @@ void hsw_disable_ips(struct intel_crtc *crtc)
return;
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(crtc->base.dev)) {
+ if (IS_BROADWELL(dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
+ /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+ if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
+ DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
POSTING_READ(IPS_CTL);
@@ -3662,6 +3842,94 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
hsw_enable_ips(intel_crtc);
}
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+ if (!enable && intel_crtc->overlay) {
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->mm.interruptible = false;
+ (void) intel_overlay_switch_off(intel_crtc->overlay);
+ dev_priv->mm.interruptible = true;
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway.
+ */
+}
+
+/**
+ * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
+ * cursor plane briefly if not already running after enabling the display
+ * plane.
+ * This workaround avoids occasional blank screens when self refresh is
+ * enabled.
+ */
+static void
+g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 cntl = I915_READ(CURCNTR(pipe));
+
+ if ((cntl & CURSOR_MODE) == 0) {
+ u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
+
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
+ I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+ I915_WRITE(CURCNTR(pipe), cntl);
+ I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self);
+ }
+}
+
+static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ intel_enable_primary_hw_plane(dev_priv, plane, pipe);
+ intel_enable_planes(crtc);
+ /* The fixup needs to happen before cursor is enabled */
+ if (IS_G4X(dev))
+ g4x_fixup_plane(dev_priv, pipe);
+ intel_crtc_update_cursor(crtc, true);
+ intel_crtc_dpms_overlay(intel_crtc, true);
+
+ hsw_enable_ips(intel_crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_crtc_vblank_off(crtc);
+
+ if (dev_priv->fbc.plane == plane)
+ intel_disable_fbc(dev);
+
+ hsw_disable_ips(intel_crtc);
+
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ intel_crtc_update_cursor(crtc, false);
+ intel_disable_planes(crtc);
+ intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+}
+
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3669,13 +3937,35 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
+ enum plane plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
if (intel_crtc->active)
return;
+ if (intel_crtc->config.has_pch_encoder)
+ intel_prepare_shared_dpll(intel_crtc);
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(intel_crtc,
+ &intel_crtc->config.fdi_m_n);
+ }
+
+ ironlake_set_pipeconf(crtc);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -3705,32 +3995,19 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- intel_enable_primary_hw_plane(dev_priv, plane, pipe);
- intel_enable_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
if (intel_crtc->config.has_pch_encoder)
ironlake_pch_enable(crtc);
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
- /*
- * There seems to be a race in PCH platform hw (at least on some
- * outputs) where an enabled pipe still completes any pageflip right
- * away (as if the pipe is off) instead of waiting for vblank. As soon
- * as the first vblank happend, everything works as expected. Hence just
- * wait for one vblank before returning to avoid strange things
- * happening.
- */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_crtc_enable_planes(crtc);
+
+ drm_crtc_vblank_on(crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -3739,47 +4016,6 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
-static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
-
- intel_enable_primary_hw_plane(dev_priv, plane, pipe);
- intel_enable_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
-
- hsw_enable_ips(intel_crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-}
-
-static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
-
- intel_crtc_wait_for_pending_flips(crtc);
- drm_vblank_off(dev, pipe);
-
- /* FBC must be disabled before disabling the plane on HSW. */
- if (dev_priv->fbc.plane == plane)
- intel_disable_fbc(dev);
-
- hsw_disable_ips(intel_crtc);
-
- intel_crtc_update_cursor(crtc, false);
- intel_disable_planes(crtc);
- intel_disable_primary_hw_plane(dev_priv, plane, pipe);
-}
-
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
@@ -3793,7 +4029,7 @@ static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
/* We want to get the other_active_crtc only if there's only 1 other
* active crtc. */
- list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc_it) {
if (!crtc_it->active || crtc_it == crtc)
continue;
@@ -3816,12 +4052,34 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
+ enum plane plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
if (intel_crtc->active)
return;
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(intel_crtc,
+ &intel_crtc->config.fdi_m_n);
+ }
+
+ haswell_set_pipeconf(crtc);
+
+ intel_set_pipe_csc(crtc);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -3862,7 +4120,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
- haswell_crtc_enable_planes(crtc);
+ intel_crtc_enable_planes(crtc);
+
+ drm_crtc_vblank_on(crtc);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -3887,26 +4147,16 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
u32 reg, temp;
-
if (!intel_crtc->active)
return;
+ intel_crtc_disable_planes(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
- intel_crtc_wait_for_pending_flips(crtc);
- drm_vblank_off(dev, pipe);
-
- if (dev_priv->fbc.plane == plane)
- intel_disable_fbc(dev);
-
- intel_crtc_update_cursor(crtc, false);
- intel_disable_planes(crtc);
- intel_disable_primary_hw_plane(dev_priv, plane, pipe);
-
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3950,6 +4200,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -3965,7 +4216,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- haswell_crtc_disable_planes(crtc);
+ intel_crtc_disable_planes(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
@@ -3997,6 +4248,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4011,48 +4263,6 @@ static void haswell_crtc_off(struct drm_crtc *crtc)
intel_ddi_put_crtc_pll(crtc);
}
-static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
-{
- if (!enable && intel_crtc->overlay) {
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- dev_priv->mm.interruptible = false;
- (void) intel_overlay_switch_off(intel_crtc->overlay);
- dev_priv->mm.interruptible = true;
- mutex_unlock(&dev->struct_mutex);
- }
-
- /* Let userspace switch the overlay on again. In most cases userspace
- * has to recompute where to put it anyway.
- */
-}
-
-/**
- * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
- * cursor plane briefly if not already running after enabling the display
- * plane.
- * This workaround avoids occasional blank screens when self refresh is
- * enabled.
- */
-static void
-g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- u32 cntl = I915_READ(CURCNTR(pipe));
-
- if ((cntl & CURSOR_MODE) == 0) {
- u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
-
- I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
- I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
- intel_wait_for_vblank(dev_priv->dev, pipe);
- I915_WRITE(CURCNTR(pipe), cntl);
- I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
- I915_WRITE(FW_BLC_SELF, fw_bcl_self);
- }
-}
-
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4164,7 +4374,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
* First get all needed power domains, then put all unneeded, to avoid
* any unnecessary toggling of the power wells.
*/
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
if (!crtc->base.enabled)
@@ -4176,7 +4386,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
intel_display_power_get(dev_priv, domain);
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
for_each_power_domain(domain, crtc->enabled_power_domains)
@@ -4207,6 +4417,9 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
+ WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
+ dev_priv->vlv_cdclk_freq = cdclk;
+
if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
cmd = 2;
else if (cdclk == 266)
@@ -4261,7 +4474,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
intel_i2c_reset(dev);
}
-static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
+int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
{
int cur_cdclk, vco;
int divider;
@@ -4282,10 +4495,6 @@ static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
- int cur_cdclk;
-
- cur_cdclk = valleyview_cur_cdclk(dev_priv);
-
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
@@ -4311,8 +4520,7 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
struct intel_crtc *intel_crtc;
int max_pixclk = 0;
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->new_enabled)
max_pixclk = max(max_pixclk,
intel_crtc->new_config->adjusted_mode.crtc_clock);
@@ -4327,14 +4535,13 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int max_pixclk = intel_mode_max_pixclk(dev_priv);
- int cur_cdclk = valleyview_cur_cdclk(dev_priv);
- if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
+ if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
+ dev_priv->vlv_cdclk_freq)
return;
/* disable/enable all currently active pipes while we change cdclk */
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
- base.head)
+ for_each_intel_crtc(dev, intel_crtc)
if (intel_crtc->base.enabled)
*prepare_pipes |= (1 << intel_crtc->pipe);
}
@@ -4343,10 +4550,9 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev_priv);
- int cur_cdclk = valleyview_cur_cdclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
- if (req_cdclk != cur_cdclk)
+ if (req_cdclk != dev_priv->vlv_cdclk_freq)
valleyview_set_cdclk(dev, req_cdclk);
modeset_update_crtc_power_domains(dev);
}
@@ -4360,22 +4566,56 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
bool is_dsi;
+ u32 dspcntr;
WARN_ON(!crtc->enabled);
if (intel_crtc->active)
return;
+ is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+ if (!is_dsi && !IS_CHERRYVIEW(dev))
+ vlv_prepare_pll(intel_crtc);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+
+ i9xx_set_pipeconf(intel_crtc);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
intel_crtc->active = true;
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
-
- if (!is_dsi)
- vlv_enable_pll(intel_crtc);
+ if (!is_dsi) {
+ if (IS_CHERRYVIEW(dev))
+ chv_enable_pll(intel_crtc);
+ else
+ vlv_enable_pll(intel_crtc);
+ }
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -4387,15 +4627,25 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- intel_enable_primary_hw_plane(dev_priv, plane, pipe);
- intel_enable_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
-
- intel_update_fbc(dev);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+
+ intel_crtc_enable_planes(crtc);
+
+ drm_crtc_vblank_on(crtc);
+
+ /* Underruns don't raise interrupts, so check manually. */
+ i9xx_check_fifo_underruns(dev);
+}
+
+static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
+ I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -4406,14 +4656,49 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ u32 dspcntr;
WARN_ON(!crtc->enabled);
if (intel_crtc->active)
return;
+ i9xx_set_pll_dividers(intel_crtc);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_set_pipe_timings(intel_crtc);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+
+ i9xx_set_pipeconf(intel_crtc);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+
+ dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
+ crtc->x, crtc->y);
+
intel_crtc->active = true;
+ if (!IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
@@ -4426,21 +4711,26 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- intel_enable_primary_hw_plane(dev_priv, plane, pipe);
- intel_enable_planes(crtc);
- /* The fixup needs to happen before cursor is enabled */
- if (IS_G4X(dev))
- g4x_fixup_plane(dev_priv, pipe);
- intel_crtc_update_cursor(crtc, true);
-
- /* Give the overlay scaler a chance to enable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, true);
-
- intel_update_fbc(dev);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+
+ intel_crtc_enable_planes(crtc);
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So don't enable underrun reporting before at least some planes
+ * are enabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
+ */
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+
+ drm_crtc_vblank_on(crtc);
+
+ /* Underruns don't raise interrupts, so check manually. */
+ i9xx_check_fifo_underruns(dev);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -4465,27 +4755,31 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
if (!intel_crtc->active)
return;
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->disable(encoder);
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So diasble underrun reporting before all the planes get disabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
+ */
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
- /* Give the overlay scaler a chance to disable if it's on this pipe */
- intel_crtc_wait_for_pending_flips(crtc);
- drm_vblank_off(dev, pipe);
+ intel_crtc_disable_planes(crtc);
- if (dev_priv->fbc.plane == plane)
- intel_disable_fbc(dev);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
- intel_crtc_dpms_overlay(intel_crtc, false);
- intel_crtc_update_cursor(crtc, false);
- intel_disable_planes(crtc);
- intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+ /*
+ * On gen2 planes are double buffered but the pipe isn't, so we must
+ * wait for planes to fully turn off before disabling the pipe.
+ */
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
intel_disable_pipe(dev_priv, pipe);
i9xx_pfit_disable(intel_crtc);
@@ -4494,15 +4788,25 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
- vlv_disable_pll(dev_priv, pipe);
- else if (!IS_VALLEYVIEW(dev))
- i9xx_disable_pll(dev_priv, pipe);
+ if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
+ if (IS_CHERRYVIEW(dev))
+ chv_disable_pll(dev_priv, pipe);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_disable_pll(dev_priv, pipe);
+ else
+ i9xx_disable_pll(dev_priv, pipe);
+ }
+
+ if (!IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
intel_crtc->active = false;
intel_update_watermarks(crtc);
+ mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
+ mutex_unlock(&dev->struct_mutex);
}
static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -4565,13 +4869,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
dev_priv->display.crtc_disable(crtc);
- intel_crtc->eld_vld = false;
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
@@ -4635,7 +4937,7 @@ static void intel_connector_check_state(struct intel_connector *connector)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base));
+ connector->base.name);
WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
@@ -5039,8 +5341,6 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = crtc->pipe;
u32 fp, fp2 = 0;
if (IS_PINEVIEW(dev)) {
@@ -5053,17 +5353,14 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
- I915_WRITE(FP0(pipe), fp);
crtc->config.dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
- I915_WRITE(FP1(pipe), fp2);
crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
- I915_WRITE(FP1(pipe), fp);
crtc->config.dpll_hw_state.fp1 = fp;
}
}
@@ -5141,12 +5438,34 @@ static void intel_dp_set_m_n(struct intel_crtc *crtc)
static void vlv_update_pll(struct intel_crtc *crtc)
{
+ u32 dpll, dpll_md;
+
+ /*
+ * Enable DPIO clock input. We should never disable the reference
+ * clock for pipe B, since VGA hotplug / manual detection depends
+ * on it.
+ */
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+ /* We should never disable this, set it here for state tracking */
+ if (crtc->pipe == PIPE_B)
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ dpll |= DPLL_VCO_ENABLE;
+ crtc->config.dpll_hw_state.dpll = dpll;
+
+ dpll_md = (crtc->config.pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
+}
+
+static void vlv_prepare_pll(struct intel_crtc *crtc)
+{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- u32 dpll, mdiv;
+ u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
- u32 coreclk, reg_val, dpll_md;
+ u32 coreclk, reg_val;
mutex_lock(&dev_priv->dpio_lock);
@@ -5159,7 +5478,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
/* See eDP HDMI DPIO driver vbios notes doc */
/* PLL B needs special handling */
- if (pipe)
+ if (pipe == PIPE_B)
vlv_pllb_recal_opamp(dev_priv, pipe);
/* Set up Tx target for periodic Rcomp update */
@@ -5203,7 +5522,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
- if (!pipe)
+ if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df40000);
else
@@ -5211,7 +5530,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
- if (!pipe)
+ if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df70000);
else
@@ -5227,26 +5546,84 @@ static void vlv_update_pll(struct intel_crtc *crtc)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void chv_update_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ int dpll_reg = DPLL(crtc->pipe);
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 loopfilter, intcoeff;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
+ int refclk;
+
+ crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+ DPLL_VCO_ENABLE;
+ if (pipe != PIPE_A)
+ crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ crtc->config.dpll_hw_state.dpll_md =
+ (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+
+ bestn = crtc->config.dpll.n;
+ bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
+ bestm1 = crtc->config.dpll.m1;
+ bestm2 = crtc->config.dpll.m2 >> 22;
+ bestp1 = crtc->config.dpll.p1;
+ bestp2 = crtc->config.dpll.p2;
/*
- * Enable DPIO clock input. We should never disable the reference
- * clock for pipe B, since VGA hotplug / manual detection depends
- * on it.
+ * Enable Refclk and SSC
*/
- dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
- /* We should never disable this, set it here for state tracking */
- if (pipe == PIPE_B)
- dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
- dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ I915_WRITE(dpll_reg,
+ crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
- dpll_md = (crtc->config.pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->config.dpll_hw_state.dpll_md = dpll_md;
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* p1 and p2 divider */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+ 5 << DPIO_CHV_S1_DIV_SHIFT |
+ bestp1 << DPIO_CHV_P1_DIV_SHIFT |
+ bestp2 << DPIO_CHV_P2_DIV_SHIFT |
+ 1 << DPIO_CHV_K_DIV_SHIFT);
+
+ /* Feedback post-divider - m2 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+
+ /* Feedback refclk divider - n and m1 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+ DPIO_CHV_M1_DIV_BY_2 |
+ 1 << DPIO_CHV_N_DIV_SHIFT);
+
+ /* M2 fraction division */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+
+ /* M2 fraction division enable */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
+ DPIO_CHV_FRAC_DIV_EN |
+ (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
+
+ /* Loop filter */
+ refclk = i9xx_get_refclk(&crtc->base, 0);
+ loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
+ 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
+ if (refclk == 100000)
+ intcoeff = 11;
+ else if (refclk == 38400)
+ intcoeff = 10;
+ else
+ intcoeff = 9;
+ loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
- if (crtc->config.has_dp_encoder)
- intel_dp_set_m_n(crtc);
+ /* AFC Recal */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
+ vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+ DPIO_AFC_RECAL);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -5325,9 +5702,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc->config.dpll_hw_state.dpll_md = dpll_md;
}
-
- if (crtc->config.has_dp_encoder)
- intel_dp_set_m_n(crtc);
}
static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -5567,16 +5941,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dspcntr;
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
- int ret;
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
@@ -5592,7 +5962,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
if (is_dsi)
- goto skip_dpll;
+ return 0;
if (!intel_crtc->config.clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
@@ -5637,43 +6007,17 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
i8xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
+ } else if (IS_CHERRYVIEW(dev)) {
+ chv_update_pll(intel_crtc);
} else if (IS_VALLEYVIEW(dev)) {
vlv_update_pll(intel_crtc);
} else {
i9xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
- }
-
-skip_dpll:
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
-
- if (!IS_VALLEYVIEW(dev)) {
- if (pipe == 0)
- dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
- else
- dspcntr |= DISPPLANE_SEL_PIPE_B;
+ num_connectors);
}
- intel_set_pipe_timings(intel_crtc);
-
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
- I915_WRITE(DSPPOS(plane), 0);
-
- i9xx_set_pipeconf(intel_crtc);
-
- I915_WRITE(DSPCNTR(plane), dspcntr);
- POSTING_READ(DSPCNTR(plane));
-
- ret = intel_pipe_set_base(crtc, x, y, fb);
-
- return ret;
+ return 0;
}
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
@@ -5793,6 +6137,36 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
}
+static void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = pipe_config->cpu_transcoder;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ intel_clock_t clock;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
+ int refclk = 100000;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
+ pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
+ pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
+ pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
+ clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
+ clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
+ clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+
+ chv_clock(refclk, &clock);
+
+ /* clock.dot is the fast clock */
+ pipe_config->port_clock = clock.dot / 5;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -5827,6 +6201,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
}
+ if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
+ pipe_config->limited_color_range = true;
+
if (INTEL_INFO(dev)->gen < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
@@ -5862,7 +6239,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
DPLL_PORTB_READY_MASK);
}
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ chv_crtc_clock_get(crtc, pipe_config);
+ else if (IS_VALLEYVIEW(dev))
vlv_crtc_clock_get(crtc, pipe_config);
else
i9xx_crtc_clock_get(crtc, pipe_config);
@@ -5983,8 +6362,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- }
- else
+ } else
val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
@@ -6563,10 +6941,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0;
@@ -6574,7 +6949,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
bool is_lvds = false;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
- int ret;
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
@@ -6624,36 +6998,18 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(pipe));
+ pipe_name(intel_crtc->pipe));
return -EINVAL;
}
} else
intel_put_shared_dpll(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
-
if (is_lvds && has_reduced_clock && i915.powersave)
intel_crtc->lowfreq_avail = true;
else
intel_crtc->lowfreq_avail = false;
- intel_set_pipe_timings(intel_crtc);
-
- if (intel_crtc->config.has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n);
- }
-
- ironlake_set_pipeconf(crtc);
-
- /* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
- POSTING_READ(DSPCNTR(plane));
-
- ret = intel_pipe_set_base(crtc, x, y, fb);
-
- return ret;
+ return 0;
}
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
@@ -6831,6 +7187,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
break;
}
+ if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+ pipe_config->limited_color_range = true;
+
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
@@ -6880,10 +7239,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc;
- unsigned long irqflags;
- uint32_t val;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+ for_each_intel_crtc(dev, crtc)
WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
@@ -6902,14 +7259,29 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
"Utility pin enabled\n");
WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- val = I915_READ(DEIMR);
- WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
- "Unexpected DEIMR bits enabled: 0x%x\n", val);
- val = I915_READ(SDEIMR);
- WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
- "Unexpected SDEIMR bits enabled: 0x%x\n", val);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ /*
+ * In theory we can still leave IRQs enabled, as long as only the HPD
+ * interrupts remain enabled. We used to check for that, but since it's
+ * gen-specific and since we only disable LCPLL after we fully disable
+ * the interrupts, the check below should be enough.
+ */
+ WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
+}
+
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (IS_HASWELL(dev)) {
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
+ val))
+ DRM_ERROR("Failed to disable D_COMP\n");
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ } else {
+ I915_WRITE(D_COMP, val);
+ }
+ POSTING_READ(D_COMP);
}
/*
@@ -6949,11 +7321,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
val = I915_READ(D_COMP);
val |= D_COMP_COMP_DISABLE;
- mutex_lock(&dev_priv->rps.hw_lock);
- if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
- DRM_ERROR("Failed to disable D_COMP\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
- POSTING_READ(D_COMP);
+ hsw_write_dcomp(dev_priv, val);
ndelay(100);
if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
@@ -7008,11 +7376,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val = I915_READ(D_COMP);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
- mutex_lock(&dev_priv->rps.hw_lock);
- if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
- DRM_ERROR("Failed to enable D_COMP\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
- POSTING_READ(D_COMP);
+ hsw_write_dcomp(dev_priv, val);
val = I915_READ(LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
@@ -7066,8 +7430,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
uint32_t val;
- WARN_ON(!HAS_PC8(dev));
-
DRM_DEBUG_KMS("Enabling package C8+\n");
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
@@ -7077,7 +7439,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
}
lpt_disable_clkout_dp(dev);
- hsw_runtime_pm_disable_interrupts(dev);
hsw_disable_lcpll(dev_priv, true, true);
}
@@ -7086,12 +7447,9 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
uint32_t val;
- WARN_ON(!HAS_PC8(dev));
-
DRM_DEBUG_KMS("Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
- hsw_runtime_pm_restore_interrupts(dev);
lpt_init_pch_refclk(dev);
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
@@ -7101,10 +7459,11 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
}
intel_prepare_ddi(dev);
- i915_gem_init_swizzling(dev);
- mutex_lock(&dev_priv->rps.hw_lock);
- gen6_update_ring_freq(dev);
- mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void snb_modeset_global_resources(struct drm_device *dev)
+{
+ modeset_update_crtc_power_domains(dev);
}
static void haswell_modeset_global_resources(struct drm_device *dev)
@@ -7116,39 +7475,15 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane;
- int ret;
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
intel_ddi_pll_enable(intel_crtc);
- if (intel_crtc->config.has_dp_encoder)
- intel_dp_set_m_n(intel_crtc);
-
intel_crtc->lowfreq_avail = false;
- intel_set_pipe_timings(intel_crtc);
-
- if (intel_crtc->config.has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n);
- }
-
- haswell_set_pipeconf(crtc);
-
- intel_set_pipe_csc(crtc);
-
- /* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
- POSTING_READ(DSPCNTR(plane));
-
- ret = intel_pipe_set_base(crtc, x, y, fb);
-
- return ret;
+ return 0;
}
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
@@ -7228,38 +7563,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
return true;
}
-static int intel_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
- int pipe = intel_crtc->pipe;
- int ret;
-
- drm_vblank_pre_modeset(dev, pipe);
-
- ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
-
- drm_vblank_post_modeset(dev, pipe);
-
- if (ret != 0)
- return ret;
-
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
- encoder->base.base.id,
- drm_get_encoder_name(&encoder->base),
- mode->base.id, mode->name);
- encoder->mode_set(encoder);
- }
-
- return 0;
-}
-
static struct {
int clock;
u32 config;
@@ -7374,8 +7677,6 @@ static void haswell_write_eld(struct drm_connector *connector,
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t eldv;
uint32_t i;
int len;
@@ -7387,17 +7688,14 @@ static void haswell_write_eld(struct drm_connector *connector,
int aud_config = HSW_AUD_CFG(pipe);
int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
-
- DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
-
/* Audio output enable */
DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
tmp = I915_READ(aud_cntrl_st2);
tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
+ POSTING_READ(aud_cntrl_st2);
- /* Wait for 1 vertical blank */
- intel_wait_for_vblank(dev, pipe);
+ assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
/* Set ELD valid state */
tmp = I915_READ(aud_cntrl_st2);
@@ -7417,7 +7715,6 @@ static void haswell_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
- intel_crtc->eld_vld = true;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
@@ -7564,9 +7861,9 @@ void intel_write_eld(struct drm_encoder *encoder,
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
- drm_get_connector_name(connector),
+ connector->name,
connector->encoder->base.id,
- drm_get_encoder_name(connector->encoder));
+ connector->encoder->name);
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
@@ -7579,29 +7876,33 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- bool visible = base != 0;
- u32 cntl;
-
- if (intel_crtc->cursor_visible == visible)
- return;
+ uint32_t cntl;
- cntl = I915_READ(_CURACNTR);
- if (visible) {
+ if (base != intel_crtc->cursor_base) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
*/
+ if (intel_crtc->cursor_cntl) {
+ I915_WRITE(_CURACNTR, 0);
+ POSTING_READ(_CURACNTR);
+ intel_crtc->cursor_cntl = 0;
+ }
+
I915_WRITE(_CURABASE, base);
+ POSTING_READ(_CURABASE);
+ }
- cntl &= ~(CURSOR_FORMAT_MASK);
- /* XXX width must be 64, stride 256 => 0x00 << 28 */
- cntl |= CURSOR_ENABLE |
+ /* XXX width must be 64, stride 256 => 0x00 << 28 */
+ cntl = 0;
+ if (base)
+ cntl = (CURSOR_ENABLE |
CURSOR_GAMMA_ENABLE |
- CURSOR_FORMAT_ARGB;
- } else
- cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- I915_WRITE(_CURACNTR, cntl);
-
- intel_crtc->cursor_visible = visible;
+ CURSOR_FORMAT_ARGB);
+ if (intel_crtc->cursor_cntl != cntl) {
+ I915_WRITE(_CURACNTR, cntl);
+ POSTING_READ(_CURACNTR);
+ intel_crtc->cursor_cntl = cntl;
+ }
}
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -7610,16 +7911,12 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- bool visible = base != 0;
+ uint32_t cntl;
- if (intel_crtc->cursor_visible != visible) {
- int16_t width = intel_crtc->cursor_width;
- uint32_t cntl = I915_READ(CURCNTR(pipe));
- if (base) {
- cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
- cntl |= MCURSOR_GAMMA_ENABLE;
-
- switch (width) {
+ cntl = 0;
+ if (base) {
+ cntl = MCURSOR_GAMMA_ENABLE;
+ switch (intel_crtc->cursor_width) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
@@ -7632,18 +7929,16 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
default:
WARN_ON(1);
return;
- }
- cntl |= pipe << 28; /* Connect to correct pipe */
- } else {
- cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
- cntl |= CURSOR_MODE_DISABLE;
}
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ }
+ if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
-
- intel_crtc->cursor_visible = visible;
+ POSTING_READ(CURCNTR(pipe));
+ intel_crtc->cursor_cntl = cntl;
}
+
/* and commit changes on next vblank */
- POSTING_READ(CURCNTR(pipe));
I915_WRITE(CURBASE(pipe), base);
POSTING_READ(CURBASE(pipe));
}
@@ -7654,15 +7949,12 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- bool visible = base != 0;
-
- if (intel_crtc->cursor_visible != visible) {
- int16_t width = intel_crtc->cursor_width;
- uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
- if (base) {
- cntl &= ~CURSOR_MODE;
- cntl |= MCURSOR_GAMMA_ENABLE;
- switch (width) {
+ uint32_t cntl;
+
+ cntl = 0;
+ if (base) {
+ cntl = MCURSOR_GAMMA_ENABLE;
+ switch (intel_crtc->cursor_width) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
@@ -7675,23 +7967,20 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
default:
WARN_ON(1);
return;
- }
- } else {
- cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
- cntl |= CURSOR_MODE_DISABLE;
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- cntl |= CURSOR_PIPE_CSC_ENABLE;
- cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
- }
- I915_WRITE(CURCNTR_IVB(pipe), cntl);
+ }
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
- intel_crtc->cursor_visible = visible;
+ if (intel_crtc->cursor_cntl != cntl) {
+ I915_WRITE(CURCNTR(pipe), cntl);
+ POSTING_READ(CURCNTR(pipe));
+ intel_crtc->cursor_cntl = cntl;
}
+
/* and commit changes on next vblank */
- POSTING_READ(CURCNTR_IVB(pipe));
- I915_WRITE(CURBASE_IVB(pipe), base);
- POSTING_READ(CURBASE_IVB(pipe));
+ I915_WRITE(CURBASE(pipe), base);
+ POSTING_READ(CURBASE(pipe));
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -7705,7 +7994,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
u32 base = 0, pos = 0;
- bool visible;
if (on)
base = intel_crtc->cursor_addr;
@@ -7734,20 +8022,18 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
}
pos |= y << CURSOR_Y_SHIFT;
- visible = base != 0;
- if (!visible && !intel_crtc->cursor_visible)
+ if (base == 0 && intel_crtc->cursor_base == 0)
return;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- I915_WRITE(CURPOS_IVB(pipe), pos);
+ I915_WRITE(CURPOS(pipe), pos);
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
ivb_update_cursor(crtc, base);
- } else {
- I915_WRITE(CURPOS(pipe), pos);
- if (IS_845G(dev) || IS_I865G(dev))
- i845_update_cursor(crtc, base);
- else
- i9xx_update_cursor(crtc, base);
- }
+ else if (IS_845G(dev) || IS_I865G(dev))
+ i845_update_cursor(crtc, base);
+ else
+ i9xx_update_cursor(crtc, base);
+ intel_crtc->cursor_base = base;
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -8015,7 +8301,8 @@ mode_fits_in_fbdev(struct drm_device *dev,
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
- struct intel_load_detect_pipe *old)
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct intel_crtc *intel_crtc;
struct intel_encoder *intel_encoder =
@@ -8025,11 +8312,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
- int i = -1;
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret, i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector),
- encoder->base.id, drm_get_encoder_name(encoder));
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
+
+ drm_modeset_acquire_init(ctx, 0);
+
+retry:
+ ret = drm_modeset_lock(&config->connection_mutex, ctx);
+ if (ret)
+ goto fail_unlock;
/*
* Algorithm gets a little messy:
@@ -8045,7 +8340,9 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
if (encoder->crtc) {
crtc = encoder->crtc;
- mutex_lock(&crtc->mutex);
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
@@ -8058,7 +8355,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
}
/* Find an unused one (if possible) */
- list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, possible_crtc) {
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
@@ -8073,10 +8370,12 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
*/
if (!crtc) {
DRM_DEBUG_KMS("no pipe available for load-detect\n");
- return false;
+ goto fail_unlock;
}
- mutex_lock(&crtc->mutex);
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
@@ -8126,12 +8425,21 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
intel_crtc->new_config = &intel_crtc->config;
else
intel_crtc->new_config = NULL;
- mutex_unlock(&crtc->mutex);
+fail_unlock:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+
return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old)
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
@@ -8140,8 +8448,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector),
- encoder->base.id, drm_get_encoder_name(encoder));
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
if (old->load_detect_temp) {
to_intel_connector(connector)->new_encoder = NULL;
@@ -8155,7 +8463,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
drm_framebuffer_unreference(old->release_fb);
}
- mutex_unlock(&crtc->mutex);
+ goto unlock;
return;
}
@@ -8163,7 +8471,9 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
- mutex_unlock(&crtc->mutex);
+unlock:
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -8449,7 +8759,7 @@ void intel_mark_idle(struct drm_device *dev)
if (!i915.powersave)
goto out;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
@@ -8464,7 +8774,7 @@ out:
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
@@ -8472,7 +8782,7 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
if (!i915.powersave)
return;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
@@ -8560,7 +8870,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
if (work->event)
drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
- drm_vblank_put(dev, intel_crtc->pipe);
+ drm_crtc_vblank_put(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -8587,6 +8897,48 @@ void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
do_intel_finish_page_flip(dev, crtc);
}
+/* Is 'a' after or equal to 'b'? */
+static bool g4x_flip_count_after_eq(u32 a, u32 b)
+{
+ return !((a - b) & 0x80000000);
+}
+
+static bool page_flip_finished(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * The relevant registers doen't exist on pre-ctg.
+ * As the flip done interrupt doesn't trigger for mmio
+ * flips on gmch platforms, a flip count check isn't
+ * really needed there. But since ctg has the registers,
+ * include it in the check anyway.
+ */
+ if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
+ return true;
+
+ /*
+ * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
+ * used the same base address. In that case the mmio flip might
+ * have completed, but the CS hasn't even executed the flip yet.
+ *
+ * A flip count check isn't enough as the CS might have updated
+ * the base address just after start of vblank, but before we
+ * managed to process the interrupt. This means we'd complete the
+ * CS flip too soon.
+ *
+ * Combining both checks should get us a good enough result. It may
+ * still happen that the CS flip has been executed, but has not
+ * yet actually completed. But in case the base address is the same
+ * anyway, we don't really care.
+ */
+ return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
+ crtc->unpin_work->gtt_offset &&
+ g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
+ crtc->unpin_work->flip_count);
+}
+
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8599,12 +8951,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
* is also accompanied by a spurious intel_prepare_page_flip().
*/
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work)
+ if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
{
/* Ensure that the work item is consistent when activating it ... */
smp_wmb();
@@ -8617,21 +8969,16 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
ret = intel_ring_begin(ring, 6);
if (ret)
- goto err_unpin;
+ return ret;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
@@ -8645,38 +8992,28 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
ret = intel_ring_begin(ring, 6);
if (ret)
- goto err_unpin;
+ return ret;
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
@@ -8687,38 +9024,29 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
ret = intel_ring_begin(ring, 4);
if (ret)
- goto err_unpin;
+ return ret;
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
@@ -8727,8 +9055,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring,
- (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -8742,37 +9069,28 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
ret = intel_ring_begin(ring, 4);
if (ret)
- goto err_unpin;
+ return ret;
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -8787,34 +9105,20 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring;
uint32_t plane_bit = 0;
int len, ret;
- ring = obj->ring;
- if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
- ring = &dev_priv->ring[BCS];
-
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
- if (ret)
- goto err;
-
- switch(intel_crtc->plane) {
+ switch (intel_crtc->plane) {
case PLANE_A:
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
break;
@@ -8826,13 +9130,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
break;
default:
WARN_ONCE(1, "unknown plane in flip command\n");
- ret = -ENODEV;
- goto err_unpin;
+ return -ENODEV;
}
len = 4;
- if (ring->id == RCS)
+ if (ring->id == RCS) {
len += 6;
+ /*
+ * On Gen 8, SRM is now taking an extra dword to accommodate
+ * 48bits addresses, and we need a NOOP for the batch size to
+ * stay even.
+ */
+ if (IS_GEN8(dev))
+ len += 2;
+ }
/*
* BSpec MI_DISPLAY_FLIP for IVB:
@@ -8846,11 +9157,11 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
*/
ret = intel_ring_cacheline_align(ring);
if (ret)
- goto err_unpin;
+ return ret;
ret = intel_ring_begin(ring, len);
if (ret)
- goto err_unpin;
+ return ret;
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
@@ -8867,31 +9178,35 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
- MI_SRM_LRM_GLOBAL_GTT);
+ if (IS_GEN8(dev))
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
+ else
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ if (IS_GEN8(dev)) {
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ }
}
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
-
-err_unpin:
- intel_unpin_fb_obj(obj);
-err:
- return ret;
}
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
uint32_t flags)
{
return -ENODEV;
@@ -8908,6 +9223,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
+ struct intel_engine_cs *ring;
unsigned long flags;
int ret;
@@ -8936,7 +9252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
- ret = drm_vblank_get(dev, intel_crtc->pipe);
+ ret = drm_crtc_vblank_get(crtc);
if (ret)
goto free_work;
@@ -8945,7 +9261,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
- drm_vblank_put(dev, intel_crtc->pipe);
+ drm_crtc_vblank_put(crtc);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@@ -8973,10 +9289,30 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+
+ if (IS_VALLEYVIEW(dev)) {
+ ring = &dev_priv->ring[BCS];
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ ring = obj->ring;
+ if (ring == NULL || ring->id != RCS)
+ ring = &dev_priv->ring[BCS];
+ } else {
+ ring = &dev_priv->ring[RCS];
+ }
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto cleanup_pending;
+ work->gtt_offset =
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
+
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+ if (ret)
+ goto cleanup_unpin;
+
intel_disable_fbc(dev);
intel_mark_fb_busy(obj, NULL);
mutex_unlock(&dev->struct_mutex);
@@ -8985,6 +9321,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
+cleanup_unpin:
+ intel_unpin_fb_obj(obj);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
crtc->primary->fb = old_fb;
@@ -8997,7 +9335,7 @@ cleanup:
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
- drm_vblank_put(dev, intel_crtc->pipe);
+ drm_crtc_vblank_put(crtc);
free_work:
kfree(work);
@@ -9040,8 +9378,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
to_intel_crtc(encoder->base.crtc);
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = crtc->base.enabled;
if (crtc->new_enabled)
@@ -9072,21 +9409,20 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
encoder->base.crtc = &encoder->new_crtc->base;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
crtc->base.enabled = crtc->new_enabled;
}
}
static void
-connected_sink_compute_bpp(struct intel_connector * connector,
+connected_sink_compute_bpp(struct intel_connector *connector,
struct intel_crtc_config *pipe_config)
{
int bpp = pipe_config->pipe_bpp;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base));
+ connector->base.name);
/* Don't use an invalid EDID bpc value */
if (connector->base.display_info.bpc &&
@@ -9427,8 +9763,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
}
/* Check for pipes that will be enabled/disabled ... */
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->base.enabled == intel_crtc->new_enabled)
continue;
@@ -9501,8 +9836,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
intel_modeset_commit_output_state(dev);
/* Double check state. */
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
intel_crtc->new_config != &intel_crtc->config);
@@ -9631,6 +9965,12 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
+ PIPE_CONF_CHECK_I(has_hdmi_sink);
+ if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
+ IS_VALLEYVIEW(dev))
+ PIPE_CONF_CHECK_I(limited_color_range);
+
+ PIPE_CONF_CHECK_I(has_audio);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
@@ -9728,7 +10068,7 @@ check_encoder_state(struct drm_device *dev)
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
encoder->base.base.id,
- drm_get_encoder_name(&encoder->base));
+ encoder->base.name);
WARN(&encoder->new_crtc->base != encoder->base.crtc,
"encoder's stage crtc doesn't match current crtc\n");
@@ -9780,8 +10120,7 @@ check_crtc_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_crtc_config pipe_config;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
bool enabled = false;
bool active = false;
@@ -9870,8 +10209,7 @@ check_shared_dpll_state(struct drm_device *dev)
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
@@ -9911,6 +10249,44 @@ void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config
pipe_config->adjusted_mode.crtc_clock, dotclock);
}
+static void update_scanline_offset(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+
+ /*
+ * The scanline counter increments at the leading edge of hsync.
+ *
+ * On most platforms it starts counting from vtotal-1 on the
+ * first active line. That means the scanline counter value is
+ * always one less than what we would expect. Ie. just after
+ * start of vblank, which also occurs at start of hsync (on the
+ * last active line), the scanline counter will read vblank_start-1.
+ *
+ * On gen2 the scanline counter starts counting from 1 instead
+ * of vtotal-1, so we have to subtract one (or rather add vtotal-1
+ * to keep the value positive), instead of adding one.
+ *
+ * On HSW+ the behaviour of the scanline counter depends on the output
+ * type. For DP ports it behaves like most other platforms, but on HDMI
+ * there's an extra 1 line difference. So we need to add two instead of
+ * one to the value.
+ */
+ if (IS_GEN2(dev)) {
+ const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ int vtotal;
+
+ vtotal = mode->crtc_vtotal;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ crtc->scanline_offset = vtotal - 1;
+ } else if (HAS_DDI(dev) &&
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
+ crtc->scanline_offset = 2;
+ } else
+ crtc->scanline_offset = 1;
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
@@ -10002,15 +10378,38 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- ret = intel_crtc_mode_set(&intel_crtc->base,
- x, y, fb);
+ struct drm_framebuffer *old_fb;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(fb)->obj,
+ NULL);
+ if (ret != 0) {
+ DRM_ERROR("pin & fence failed\n");
+ mutex_unlock(&dev->struct_mutex);
+ goto done;
+ }
+ old_fb = crtc->primary->fb;
+ if (old_fb)
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ crtc->primary->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
+ ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
+ x, y, fb);
if (ret)
goto done;
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ update_scanline_offset(intel_crtc);
+
dev_priv->display.crtc_enable(&intel_crtc->base);
+ }
/* FIXME: add subpixel order */
done:
@@ -10086,7 +10485,7 @@ static int intel_set_config_save_state(struct drm_device *dev,
* restored, not the drivers personal bookkeeping.
*/
count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
config->save_crtc_enabled[count++] = crtc->enabled;
}
@@ -10112,7 +10511,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
int count;
count = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = config->save_crtc_enabled[count++];
if (crtc->new_enabled)
@@ -10236,7 +10635,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base));
+ connector->base.name);
}
@@ -10271,7 +10670,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base),
+ connector->base.name,
new_crtc->base.id);
}
@@ -10302,8 +10701,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
/* Now we've also updated encoder->new_crtc for all encoders. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
list_for_each_entry(encoder,
@@ -10516,7 +10914,7 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
/* Make sure no transcoder isn't still depending on us. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (intel_crtc_to_shared_dpll(crtc) == pll)
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
@@ -10573,16 +10971,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
- if (IS_GEN2(dev)) {
- intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH;
- intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT;
- } else {
- intel_crtc->max_cursor_width = CURSOR_WIDTH;
- intel_crtc->max_cursor_height = CURSOR_HEIGHT;
- }
- dev->mode_config.cursor_width = intel_crtc->max_cursor_width;
- dev->mode_config.cursor_height = intel_crtc->max_cursor_height;
-
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
@@ -10601,19 +10989,27 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->plane = !pipe;
}
+ intel_crtc->cursor_base = ~0;
+ intel_crtc->cursor_cntl = ~0;
+
+ init_waitqueue_head(&intel_crtc->vbl_wait);
+
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+
+ WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
{
struct drm_encoder *encoder = connector->base.encoder;
+ struct drm_device *dev = connector->base.dev;
- WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (!encoder)
return INVALID_PIPE;
@@ -10701,6 +11097,22 @@ const char *intel_output_name(int output)
return names[output];
}
+static bool intel_crt_present(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_ULT(dev))
+ return false;
+
+ if (IS_CHERRYVIEW(dev))
+ return false;
+
+ if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
+ return false;
+
+ return true;
+}
+
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10709,7 +11121,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_lvds_init(dev);
- if (!IS_ULT(dev))
+ if (intel_crt_present(dev))
intel_crt_init(dev);
if (HAS_DDI(dev)) {
@@ -10773,6 +11185,15 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
}
+ if (IS_CHERRYVIEW(dev)) {
+ if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
+ PORT_D);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
+ }
+ }
+
intel_dsi_init(dev);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -11002,6 +11423,8 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
dev_priv->display.find_dpll = g4x_find_best_dpll;
+ else if (IS_CHERRYVIEW(dev))
+ dev_priv->display.find_dpll = chv_find_best_dpll;
else if (IS_VALLEYVIEW(dev))
dev_priv->display.find_dpll = vlv_find_best_dpll;
else if (IS_PINEVIEW(dev))
@@ -11083,6 +11506,8 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_GEN6(dev)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ snb_modeset_global_resources;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
@@ -11166,6 +11591,14 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ DRM_INFO("applying backlight present quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -11211,9 +11644,6 @@ static struct intel_quirk intel_quirks[] = {
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
- /* 830 needs to leave pipe A & dpll A up */
- { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
-
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@@ -11237,6 +11667,12 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 5336 */
{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+ /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -11287,9 +11723,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
intel_reset_dpio(dev);
- mutex_lock(&dev->struct_mutex);
intel_enable_gt_powersave(dev);
- mutex_unlock(&dev->struct_mutex);
}
void intel_modeset_suspend_hw(struct drm_device *dev)
@@ -11333,6 +11767,15 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
+
+ if (IS_GEN2(dev)) {
+ dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
+ dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
+ } else {
+ dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
+ dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
+ }
+
dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
@@ -11362,12 +11805,11 @@ void intel_modeset_init(struct drm_device *dev)
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, false);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (!crtc->active)
continue;
@@ -11395,6 +11837,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
struct intel_connector *connector;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
+ struct drm_modeset_acquire_ctx ctx;
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
@@ -11411,8 +11854,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
if (!crt)
return;
- if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
- intel_release_load_detect_pipe(crt, &load_detect_temp);
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
+ intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
}
@@ -11447,6 +11890,12 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
reg = PIPECONF(crtc->config.cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ /* restore vblank interrupts to correct state */
+ if (crtc->active)
+ drm_vblank_on(dev, crtc->pipe);
+ else
+ drm_vblank_off(dev, crtc->pipe);
+
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
@@ -11525,16 +11974,25 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
encoder->base.crtc = NULL;
}
}
- if (crtc->active) {
+
+ if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
*
+ * Also on gmch platforms we dont have any hardware bits to
+ * disable the underrun reporting. Which means we need to start
+ * out with underrun reporting disabled also on inactive pipes,
+ * since otherwise we'll complain about the garbage we read when
+ * e.g. coming up after runtime pm.
+ *
* No protection against concurrent access is required - at
* worst a fifo underrun happens which also sets this to false.
*/
crtc->cpu_fifo_underrun_disabled = true;
crtc->pch_fifo_underrun_disabled = true;
+
+ update_scanline_offset(crtc);
}
}
@@ -11552,7 +12010,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
if (encoder->connectors_active && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
- drm_get_encoder_name(&encoder->base));
+ encoder->base.name);
/* Connector is active, but has no active pipe. This is
* fallout from our resume register restoring. Disable
@@ -11560,7 +12018,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
if (encoder->base.crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
- drm_get_encoder_name(&encoder->base));
+ encoder->base.name);
encoder->disable(encoder);
}
encoder->base.crtc = NULL;
@@ -11611,6 +12069,16 @@ void i915_redisable_vga(struct drm_device *dev)
i915_redisable_vga_power_on(dev);
}
+static bool primary_get_hw_state(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+ if (!crtc->active)
+ return false;
+
+ return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
+}
+
static void intel_modeset_readout_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11620,8 +12088,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_connector *connector;
int i;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
memset(&crtc->config, 0, sizeof(crtc->config));
crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
@@ -11630,7 +12097,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
&crtc->config);
crtc->base.enabled = crtc->active;
- crtc->primary_enabled = crtc->active;
+ crtc->primary_enabled = primary_get_hw_state(crtc);
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
@@ -11646,8 +12113,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
pll->active = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
pll->active++;
}
@@ -11672,7 +12138,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
encoder->connectors_active = false;
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id,
- drm_get_encoder_name(&encoder->base),
+ encoder->base.name,
encoder->base.crtc ? "enabled" : "disabled",
pipe_name(pipe));
}
@@ -11689,7 +12155,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id,
- drm_get_connector_name(&connector->base),
+ connector->base.name,
connector->base.encoder ? "enabled" : "disabled");
}
}
@@ -11712,8 +12178,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
* Note that this could go away if we move to using crtc_config
* checking everywhere.
*/
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- base.head) {
+ for_each_intel_crtc(dev, crtc) {
if (crtc->active && i915.fastboot) {
intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
@@ -11789,7 +12254,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
* for this.
*/
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, c) {
if (!c->primary->fb)
continue;
@@ -11835,7 +12300,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_unregister_dsm_handler();
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
/* Skip inactive CRTCs */
if (!crtc->primary->fb)
continue;
@@ -11933,6 +12398,7 @@ struct intel_display_error_state {
struct intel_pipe_error_state {
bool power_domain_on;
u32 source;
+ u32 stat;
} pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
@@ -11985,20 +12451,14 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(i) {
error->pipe[i].power_domain_on =
- intel_display_power_enabled_sw(dev_priv,
- POWER_DOMAIN_PIPE(i));
+ intel_display_power_enabled_unlocked(dev_priv,
+ POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
- if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
- error->cursor[i].control = I915_READ(CURCNTR(i));
- error->cursor[i].position = I915_READ(CURPOS(i));
- error->cursor[i].base = I915_READ(CURBASE(i));
- } else {
- error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
- error->cursor[i].position = I915_READ(CURPOS_IVB(i));
- error->cursor[i].base = I915_READ(CURBASE_IVB(i));
- }
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
@@ -12014,6 +12474,9 @@ intel_display_capture_error_state(struct drm_device *dev)
}
error->pipe[i].source = I915_READ(PIPESRC(i));
+
+ if (!HAS_PCH_SPLIT(dev))
+ error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
error->num_transcoders = INTEL_INFO(dev)->num_pipes;
@@ -12024,7 +12487,7 @@ intel_display_capture_error_state(struct drm_device *dev)
enum transcoder cpu_transcoder = transcoders[i];
error->transcoder[i].power_domain_on =
- intel_display_power_enabled_sw(dev_priv,
+ intel_display_power_enabled_unlocked(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
@@ -12064,6 +12527,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " Power: %s\n",
error->pipe[i].power_domain_on ? "on" : "off");
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
+ err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2a00cb828d20..075170d1844f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -28,6 +28,8 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -64,6 +66,24 @@ static const struct dp_link_dpll vlv_dpll[] = {
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
};
+/*
+ * CHV supports eDP 1.4 that have more link rates.
+ * Below only provides the fixed rate but exclude variable rate.
+ */
+static const struct dp_link_dpll chv_dpll[] = {
+ /*
+ * CHV requires to program fractional division for m2.
+ * m2 is stored in fixed point format using formula below
+ * (m2_int << 22) | m2_fraction
+ */
+ { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
+ { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
+ { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
+ { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
+ { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
+ { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
+};
+
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -318,6 +338,37 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
+/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
+ This function only applicable when panel PM state is not to be tracked */
+static int edp_notify_handler(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
+ edp_notifier);
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_div;
+ u32 pp_ctrl_reg, pp_div_reg;
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ if (!is_edp(intel_dp) || code != SYS_RESTART)
+ return 0;
+
+ if (IS_VALLEYVIEW(dev)) {
+ pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+ pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
+ pp_div = I915_READ(pp_div_reg);
+ pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+ /* 0x1F write to PP_DIV_REG sets max cycle delay */
+ I915_WRITE(pp_div_reg, pp_div | 0x1F);
+ I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
+ msleep(intel_dp->panel_power_cycle_delay);
+ }
+
+ return 0;
+}
+
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -330,8 +381,12 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ enum intel_display_power_domain power_domain;
- return !dev_priv->pm.suspended &&
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ return intel_display_power_enabled(dev_priv, power_domain) &&
(I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
}
@@ -697,9 +752,9 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
DRM_DEBUG_KMS("registering %s bus for %s\n", name,
connector->base.kdev->kobj.name);
- ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux);
+ ret = drm_dp_aux_register(&intel_dp->aux);
if (ret < 0) {
- DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n",
+ DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
name, ret);
return;
}
@@ -709,7 +764,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
intel_dp->aux.ddc.dev.kobj.name);
if (ret < 0) {
DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
- drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
+ drm_dp_aux_unregister(&intel_dp->aux);
}
}
@@ -739,6 +794,9 @@ intel_dp_set_clock(struct intel_encoder *encoder,
} else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
+ } else if (IS_CHERRYVIEW(dev)) {
+ divisor = chv_dpll;
+ count = ARRAY_SIZE(chv_dpll);
} else if (IS_VALLEYVIEW(dev)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
@@ -755,6 +813,20 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
+static void
+intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder transcoder = crtc->config.cpu_transcoder;
+
+ I915_WRITE(PIPE_DATA_M2(transcoder),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
+ I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
+ I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
+}
+
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
@@ -780,6 +852,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->has_dp_encoder = true;
+ pipe_config->has_audio = intel_dp->has_audio;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -880,6 +953,14 @@ found:
pipe_config->port_clock,
&pipe_config->dp_m_n);
+ if (intel_connector->panel.downclock_mode != NULL &&
+ intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
+ intel_link_compute_m_n(bpp, lane_count,
+ intel_connector->panel.downclock_mode->clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m2_n2);
+ }
+
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
return true;
@@ -915,7 +996,7 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
udelay(500);
}
-static void intel_dp_mode_set(struct intel_encoder *encoder)
+static void intel_dp_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -950,7 +1031,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
- if (intel_dp->has_audio) {
+ if (crtc->config.has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
@@ -983,14 +1064,15 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (crtc->pipe == 1)
- intel_dp->DP |= DP_PIPEB_SELECT;
+ if (!IS_CHERRYVIEW(dev)) {
+ if (crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+ } else {
+ intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
+ }
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
-
- if (port == PORT_A && !IS_VALLEYVIEW(dev))
- ironlake_set_pll_cpu_edp(intel_dp);
}
#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@@ -1082,7 +1164,10 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
@@ -1095,7 +1180,8 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
- intel_runtime_pm_get(dev_priv);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
DRM_DEBUG_KMS("Turning eDP VDD on\n");
@@ -1139,9 +1225,14 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
+ struct intel_digital_port *intel_dig_port =
+ dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ enum intel_display_power_domain power_domain;
+
DRM_DEBUG_KMS("Turning eDP VDD off\n");
pp = ironlake_get_pp_control(intel_dp);
@@ -1160,7 +1251,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & POWER_TARGET_ON) == 0)
intel_dp->last_power_cycle = jiffies;
- intel_runtime_pm_put(dev_priv);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_put(dev_priv, power_domain);
}
}
@@ -1170,9 +1262,9 @@ static void edp_panel_vdd_work(struct work_struct *__work)
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -1244,8 +1336,11 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_ctrl_reg;
@@ -1275,7 +1370,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
wait_panel_off(intel_dp);
/* We got a reference when we enabled the VDD. */
- intel_runtime_pm_put(dev_priv);
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_put(dev_priv, power_domain);
}
void intel_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1432,6 +1528,8 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
*pipe = PORT_TO_PIPE_CPT(tmp);
+ } else if (IS_CHERRYVIEW(dev)) {
+ *pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
*pipe = PORT_TO_PIPE(tmp);
} else {
@@ -1479,8 +1577,11 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int dotclock;
+ tmp = I915_READ(intel_dp->output_reg);
+ if (tmp & DP_AUDIO_OUTPUT_ENABLE)
+ pipe_config->has_audio = true;
+
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
- tmp = I915_READ(intel_dp->output_reg);
if (tmp & DP_SYNC_HS_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -1816,17 +1917,59 @@ static void intel_disable_dp(struct intel_encoder *encoder)
intel_dp_link_down(intel_dp);
}
-static void intel_post_disable_dp(struct intel_encoder *encoder)
+static void g4x_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
+
+ if (port != PORT_A)
+ return;
+
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_pll_off(intel_dp);
+}
+
+static void vlv_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_dp_link_down(intel_dp);
+}
+
+static void chv_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
- if (port == PORT_A || IS_VALLEYVIEW(dev)) {
- intel_dp_link_down(intel_dp);
- if (!IS_VALLEYVIEW(dev))
- ironlake_edp_pll_off(intel_dp);
- }
+ intel_dp_link_down(intel_dp);
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Propagate soft reset to data lane reset */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_enable_dp(struct intel_encoder *encoder)
@@ -1868,8 +2011,13 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- if (dport->port == PORT_A)
+ intel_dp_prepare(encoder);
+
+ /* Only ilk+ has port A */
+ if (dport->port == PORT_A) {
+ ironlake_set_pll_cpu_edp(intel_dp);
ironlake_edp_pll_on(intel_dp);
+ }
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
@@ -1921,6 +2069,8 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
+ intel_dp_prepare(encoder);
+
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
@@ -1939,6 +2089,69 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
}
+static void chv_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_power_seq power_seq;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ int data, i;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Deassert soft data lane reset*/
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ /* Program Tx lane latency optimal setting*/
+ for (i = 0; i < 4; i++) {
+ /* Set the latency optimal bit */
+ data = (i == 1) ? 0x0 : 0x6;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
+ data << DPIO_FRC_LATENCY_SHFIT);
+
+ /* Set the upar bit */
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ /* FIXME: Fix up value only after power analysis */
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
+
+ intel_enable_dp(encoder);
+
+ vlv_wait_port_ready(dev_priv, dport);
+}
+
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
@@ -2163,6 +2376,166 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
+static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+ u32 deemph_reg_value, margin_reg_value, val;
+ uint8_t train_set = intel_dp->train_set[0];
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ int i;
+
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 128;
+ margin_reg_value = 52;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ deemph_reg_value = 128;
+ margin_reg_value = 77;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ deemph_reg_value = 128;
+ margin_reg_value = 102;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ deemph_reg_value = 128;
+ margin_reg_value = 154;
+ /* FIXME extra to set for 1200 */
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 85;
+ margin_reg_value = 78;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ deemph_reg_value = 85;
+ margin_reg_value = 116;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ deemph_reg_value = 85;
+ margin_reg_value = 154;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 64;
+ margin_reg_value = 104;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ deemph_reg_value = 64;
+ margin_reg_value = 154;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ deemph_reg_value = 43;
+ margin_reg_value = 154;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* Program swing deemph */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ /* Program swing margin */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val &= ~DPIO_SWING_MARGIN_MASK;
+ val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /* Disable unique transition scale */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
+ == DP_TRAIN_PRE_EMPHASIS_0) &&
+ ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
+ == DP_TRAIN_VOLTAGE_SWING_1200)) {
+
+ /*
+ * The document said it needs to set bit 27 for ch0 and bit 26
+ * for ch1. Might be a typo in the doc.
+ * For now, for this unique transition scale selection, set bit
+ * 27 for ch0 and ch1.
+ */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+ }
+
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* LRC Bypass */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ val |= DPIO_LRC_BYPASS;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return 0;
+}
+
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
@@ -2377,6 +2750,9 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
} else if (IS_HASWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
+ } else if (IS_CHERRYVIEW(dev)) {
+ signal_levels = intel_chv_signal_levels(intel_dp);
+ mask = 0;
} else if (IS_VALLEYVIEW(dev)) {
signal_levels = intel_vlv_signal_levels(intel_dp);
mask = 0;
@@ -2743,22 +3119,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
- /*
- * DDI code has a strict mode set sequence and we should try to respect
- * it, otherwise we might hang the machine in many different ways. So we
- * really should be disabling the port only on a complete crtc_disable
- * sequence. This function is just called under two conditions on DDI
- * code:
- * - Link train failed while doing crtc_enable, and on this case we
- * really should respect the mode set sequence and wait for a
- * crtc_disable.
- * - Someone turned the monitor off and intel_dp_check_link_status
- * called us. We don't need to disable the whole port on this case, so
- * when someone turns the monitor on again,
- * intel_ddi_prepare_link_retrain will take care of redoing the link
- * train.
- */
- if (HAS_DDI(dev))
+ if (WARN_ON(HAS_DDI(dev)))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -2775,9 +3136,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
}
POSTING_READ(intel_dp->output_reg);
- /* We don't really know why we're doing this */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
@@ -2948,6 +3306,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
+ /* FIXME: This access isn't protected by any locks. */
if (!intel_encoder->connectors_active)
return;
@@ -2980,7 +3339,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- drm_get_encoder_name(&intel_encoder->base));
+ intel_encoder->base.name);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
@@ -3166,7 +3525,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_display_power_get(dev_priv, power_domain);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
intel_dp->has_audio = false;
@@ -3374,13 +3733,17 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
+ drm_dp_aux_unregister(&intel_dp->aux);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (intel_dp->edp_notifier.notifier_call) {
+ unregister_reboot_notifier(&intel_dp->edp_notifier);
+ intel_dp->edp_notifier.notifier_call = NULL;
+ }
}
kfree(intel_dig_port);
}
@@ -3651,6 +4014,130 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(pp_div_reg));
}
+void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp = NULL;
+ struct intel_crtc_config *config = NULL;
+ struct intel_crtc *intel_crtc = NULL;
+ struct intel_connector *intel_connector = dev_priv->drrs.connector;
+ u32 reg, val;
+ enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
+
+ if (refresh_rate <= 0) {
+ DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
+ return;
+ }
+
+ if (intel_connector == NULL) {
+ DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
+ return;
+ }
+
+ if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
+ DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
+ return;
+ }
+
+ encoder = intel_attached_encoder(&intel_connector->base);
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_crtc = encoder->new_crtc;
+
+ if (!intel_crtc) {
+ DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
+ return;
+ }
+
+ config = &intel_crtc->config;
+
+ if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
+ DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
+ return;
+ }
+
+ if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
+ index = DRRS_LOW_RR;
+
+ if (index == intel_dp->drrs_state.refresh_rate_type) {
+ DRM_DEBUG_KMS(
+ "DRRS requested for previously set RR...ignoring\n");
+ return;
+ }
+
+ if (!intel_crtc->active) {
+ DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
+ return;
+ }
+
+ if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
+ reg = PIPECONF(intel_crtc->config.cpu_transcoder);
+ val = I915_READ(reg);
+ if (index > DRRS_HIGH_RR) {
+ val |= PIPECONF_EDP_RR_MODE_SWITCH;
+ intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
+ } else {
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
+ }
+ I915_WRITE(reg, val);
+ }
+
+ /*
+ * mutex taken to ensure that there is no race between differnt
+ * drrs calls trying to update refresh rate. This scenario may occur
+ * in future when idleness detection based DRRS in kernel and
+ * possible calls from user space to set differnt RR are made.
+ */
+
+ mutex_lock(&intel_dp->drrs_state.mutex);
+
+ intel_dp->drrs_state.refresh_rate_type = index;
+
+ mutex_unlock(&intel_dp->drrs_state.mutex);
+
+ DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+}
+
+static struct drm_display_mode *
+intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector,
+ struct drm_display_mode *fixed_mode)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *downclock_mode = NULL;
+
+ if (INTEL_INFO(dev)->gen <= 6) {
+ DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
+ return NULL;
+ }
+
+ if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
+ DRM_INFO("VBT doesn't support DRRS\n");
+ return NULL;
+ }
+
+ downclock_mode = intel_find_panel_downclock
+ (dev, fixed_mode, connector);
+
+ if (!downclock_mode) {
+ DRM_INFO("DRRS not supported\n");
+ return NULL;
+ }
+
+ dev_priv->drrs.connector = intel_connector;
+
+ mutex_init(&intel_dp->drrs_state.mutex);
+
+ intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
+
+ intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
+ DRM_INFO("seamless DRRS supported for eDP panel.\n");
+ return downclock_mode;
+}
+
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector,
struct edp_power_seq *power_seq)
@@ -3661,10 +4148,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
+ struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
+ intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
+
if (!is_edp(intel_dp))
return true;
@@ -3715,6 +4205,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
+ downclock_mode = intel_dp_drrs_init(
+ intel_dig_port,
+ intel_connector, fixed_mode);
break;
}
}
@@ -3728,7 +4221,12 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_unlock(&dev->mode_config.mutex);
- intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ if (IS_VALLEYVIEW(dev)) {
+ intel_dp->edp_notifier.notifier_call = edp_notify_handler;
+ register_reboot_notifier(&intel_dp->edp_notifier);
+ }
+
+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
return true;
@@ -3826,12 +4324,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->psr_setup_done = false;
if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
- drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
+ drm_dp_aux_unregister(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -3877,25 +4375,36 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
DRM_MODE_ENCODER_TMDS);
intel_encoder->compute_config = intel_dp_compute_config;
- intel_encoder->mode_set = intel_dp_mode_set;
intel_encoder->disable = intel_disable_dp;
- intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_enable = chv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->post_disable = chv_post_disable_dp;
+ } else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->post_disable = vlv_post_disable_dp;
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
+ intel_encoder->post_disable = g4x_post_disable_dp;
}
intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ if (IS_CHERRYVIEW(dev)) {
+ if (port == PORT_D)
+ intel_encoder->crtc_mask = 1 << 2;
+ else
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ } else {
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ }
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 328b1a70264b..f67340ed2c12 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -81,8 +81,8 @@
/* Maximum cursor sizes */
#define GEN2_CURSOR_WIDTH 64
#define GEN2_CURSOR_HEIGHT 64
-#define CURSOR_WIDTH 256
-#define CURSOR_HEIGHT 256
+#define MAX_CURSOR_WIDTH 256
+#define MAX_CURSOR_HEIGHT 256
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
@@ -106,8 +106,8 @@
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
-#define INTEL_DSI_COMMAND_MODE 0
-#define INTEL_DSI_VIDEO_MODE 1
+#define INTEL_DSI_VIDEO_MODE 0
+#define INTEL_DSI_COMMAND_MODE 1
struct intel_framebuffer {
struct drm_framebuffer base;
@@ -273,6 +273,13 @@ struct intel_crtc_config {
* accordingly. */
bool has_dp_encoder;
+ /* Whether we should send NULL infoframes. Required for audio. */
+ bool has_hdmi_sink;
+
+ /* Audio enabled on this pipe. Only valid if either has_hdmi_sink or
+ * has_dp_encoder is set. */
+ bool has_audio;
+
/*
* Enable dithering, used when the selected pipe bpp doesn't match the
* plane bpp.
@@ -306,6 +313,9 @@ struct intel_crtc_config {
int pipe_bpp;
struct intel_link_m_n dp_m_n;
+ /* m2_n2 for eDP downclock */
+ struct intel_link_m_n dp_m2_n2;
+
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
@@ -343,6 +353,9 @@ struct intel_pipe_wm {
struct intel_wm_level wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
+ bool pipe_enabled;
+ bool sprites_enabled;
+ bool sprites_scaled;
};
struct intel_crtc {
@@ -357,7 +370,6 @@ struct intel_crtc {
*/
bool active;
unsigned long enabled_power_domains;
- bool eld_vld;
bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
@@ -374,8 +386,8 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
- int16_t max_cursor_width, max_cursor_height;
- bool cursor_visible;
+ uint32_t cursor_cntl;
+ uint32_t cursor_base;
struct intel_plane_config plane_config;
struct intel_crtc_config config;
@@ -396,6 +408,10 @@ struct intel_crtc {
/* watermarks currently being used */
struct intel_pipe_wm active;
} wm;
+
+ wait_queue_head_t vbl_wait;
+
+ int scanline_offset;
};
struct intel_plane_wm_parameters {
@@ -479,11 +495,23 @@ struct intel_hdmi {
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode);
};
#define DP_MAX_DOWNSTREAM_PORTS 0x10
+/**
+ * HIGH_RR is the highest eDP panel refresh rate read from EDID
+ * LOW_RR is the lowest eDP panel refresh rate found from EDID
+ * parsing for same resolution.
+ */
+enum edp_drrs_refresh_rate_type {
+ DRRS_HIGH_RR,
+ DRRS_LOW_RR,
+ DRRS_MAX_RR, /* RR count */
+};
+
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
@@ -510,6 +538,8 @@ struct intel_dp {
unsigned long last_power_on;
unsigned long last_backlight_off;
bool psr_setup_done;
+ struct notifier_block edp_notifier;
+
bool use_tps3;
struct intel_connector *attached_connector;
@@ -522,6 +552,12 @@ struct intel_dp {
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
+ struct {
+ enum drrs_support_type type;
+ enum edp_drrs_refresh_rate_type refresh_rate_type;
+ struct mutex mutex;
+ } drrs_state;
+
};
struct intel_digital_port {
@@ -537,6 +573,7 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
case PORT_B:
+ case PORT_D:
return DPIO_CH0;
case PORT_C:
return DPIO_CH1;
@@ -545,6 +582,20 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
}
}
+static inline int
+vlv_pipe_to_channel(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ case PIPE_C:
+ return DPIO_CH0;
+ case PIPE_B:
+ return DPIO_CH1;
+ default:
+ BUG();
+ }
+}
+
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@@ -569,6 +620,8 @@ struct intel_unpin_work {
#define INTEL_FLIP_INACTIVE 0
#define INTEL_FLIP_PENDING 1
#define INTEL_FLIP_COMPLETE 2
+ u32 flip_count;
+ u32 gtt_offset;
bool enable_stall_check;
};
@@ -620,8 +673,6 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
/* i915_irq.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable);
-bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
@@ -629,8 +680,12 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void hsw_runtime_pm_disable_interrupts(struct drm_device *dev);
-void hsw_runtime_pm_restore_interrupts(struct drm_device *dev);
+void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
+void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+int intel_get_crtc_scanline(struct intel_crtc *crtc);
+void i9xx_check_fifo_underruns(struct drm_device *dev);
/* intel_crt.c */
@@ -666,9 +721,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
+int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring);
+ struct intel_engine_cs *ring);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
@@ -695,12 +751,14 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport);
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
- struct intel_load_detect_pipe *old);
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old);
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
+ struct intel_engine_cs *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
@@ -751,6 +809,8 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
+void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
+
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -774,7 +834,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_edp_psr_update(struct drm_device *dev);
-
+void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
/* intel_dsi.c */
bool intel_dsi_init(struct drm_device *dev);
@@ -876,6 +936,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
+int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
@@ -891,8 +952,8 @@ int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_remove(struct drm_i915_private *);
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
@@ -902,6 +963,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
+void intel_reset_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
@@ -909,11 +971,13 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
-
+void __vlv_set_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id, bool enable);
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 33656647f8bc..3fd082933c87 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -35,6 +35,11 @@
/* the sub-encoders aka panel drivers */
static const struct intel_dsi_device intel_dsi_devices[] = {
+ {
+ .panel_id = MIPI_DSI_GENERIC_PANEL_ID,
+ .name = "vbt-generic-dsi-vid-mode-display",
+ .dev_ops = &vbt_generic_dsi_display_ops,
+ },
};
static void band_gap_reset(struct drm_i915_private *dev_priv)
@@ -59,12 +64,12 @@ static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
- return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
+ return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
}
static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
{
- return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
+ return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
static void intel_dsi_hot_plug(struct intel_encoder *encoder)
@@ -94,13 +99,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
return true;
}
-static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
-{
- DRM_DEBUG_KMS("\n");
-
- vlv_enable_dsi_pll(encoder);
-}
-
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
@@ -110,32 +108,27 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
+ mutex_lock(&dev_priv->dpio_lock);
+ /* program rcomp for compliance, reduce from 50 ohms to 45 ohms
+ * needed everytime after power gate */
+ vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* bandgap reset is needed after everytime we do power gate */
+ band_gap_reset(dev_priv);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ usleep_range(2500, 3000);
+
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
- usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
- usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
- usleep_range(2000, 2500);
-}
-static void intel_dsi_pre_enable(struct intel_encoder *encoder)
-{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-
- DRM_DEBUG_KMS("\n");
-
- if (intel_dsi->dev.dev_ops->panel_reset)
- intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
- /* put device in ready state */
- intel_dsi_device_ready(encoder);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ usleep_range(2500, 3000);
- if (intel_dsi->dev.dev_ops->send_otp_cmds)
- intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+ usleep_range(2500, 3000);
}
static void intel_dsi_enable(struct intel_encoder *encoder)
@@ -153,18 +146,78 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
else {
msleep(20); /* XXX */
- dpi_send_cmd(intel_dsi, TURN_ON);
+ dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
msleep(100);
+ if (intel_dsi->dev.dev_ops->enable)
+ intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+
/* assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
temp = temp | intel_dsi->port_bits;
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
}
+}
+
+static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 tmp;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* Disable DPOunit clock gating, can stall pipe
+ * and we need DPLL REFA always enabled */
+ tmp = I915_READ(DPLL(pipe));
+ tmp |= DPLL_REFA_CLK_ENABLE_VLV;
+ I915_WRITE(DPLL(pipe), tmp);
+
+ tmp = I915_READ(DSPCLK_GATE_D);
+ tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, tmp);
+
+ /* put device in ready state */
+ intel_dsi_device_ready(encoder);
+
+ msleep(intel_dsi->panel_on_delay);
+
+ if (intel_dsi->dev.dev_ops->panel_reset)
+ intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
+
+ if (intel_dsi->dev.dev_ops->send_otp_cmds)
+ intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
+
+ /* Enable port in pre-enable phase itself because as per hw team
+ * recommendation, port should be enabled befor plane & pipe */
+ intel_dsi_enable(encoder);
+}
+
+static void intel_dsi_enable_nop(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+
+ /* for DSI port enable has to be done before pipe
+ * and plane enable, so port enable is done in
+ * pre_enable phase itself unlike other encoders
+ */
+}
+
+static void intel_dsi_pre_disable(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ DRM_DEBUG_KMS("\n");
- if (intel_dsi->dev.dev_ops->enable)
- intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+ if (is_vid_mode(intel_dsi)) {
+ /* Send Shutdown command to the panel in LP mode */
+ dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
+ msleep(10);
+ }
}
static void intel_dsi_disable(struct intel_encoder *encoder)
@@ -179,9 +232,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
- dpi_send_cmd(intel_dsi, SHUTDOWN);
- msleep(10);
-
/* de-assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
@@ -190,6 +240,23 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
msleep(2);
}
+ /* Panel commands can be sent when clock is in LP11 */
+ I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
+
+ temp = I915_READ(MIPI_CTRL(pipe));
+ temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ I915_WRITE(MIPI_CTRL(pipe), temp |
+ intel_dsi->escape_clk_div <<
+ ESCAPE_CLOCK_DIVIDER_SHIFT);
+
+ I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
+
+ temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
+ temp &= ~VID_MODE_FORMAT_MASK;
+ I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
+
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
if (intel_dsi->dev.dev_ops->disable)
@@ -205,38 +272,50 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- val = I915_READ(MIPI_PORT_CTRL(pipe));
- I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
-
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
usleep_range(2000, 2500);
vlv_disable_dsi_pll(encoder);
}
+
static void intel_dsi_post_disable(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 val;
DRM_DEBUG_KMS("\n");
+ intel_dsi_disable(encoder);
+
intel_dsi_clear_device_ready(encoder);
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
+
if (intel_dsi->dev.dev_ops->disable_panel_power)
intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+
+ msleep(intel_dsi->panel_off_delay);
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -364,7 +443,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
}
-static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
@@ -379,9 +458,6 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
- /* XXX: Location of the call */
- band_gap_reset(dev_priv);
-
/* escape clock divider, 20MHz, shared for A and C. device ready must be
* off when doing this! txclkesc? */
tmp = I915_READ(MIPI_CTRL(0));
@@ -452,10 +528,20 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
/* dphy stuff */
/* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
+ I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
+
+ val = 0;
+ if (intel_dsi->eotp_pkt == 0)
+ val |= EOT_DISABLE;
+
+ if (intel_dsi->clock_stop)
+ val |= CLOCKSTOP;
/* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
+ I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
+
+ /* in terms of low power clock */
+ I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
@@ -484,9 +570,23 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi))
+ /* Some panels might have resolution which is not a multiple of
+ * 64 like 1366 x 768. Enable RANDOM resolution support for such
+ * panels by default */
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
intel_dsi->video_frmt_cfg_bits |
- intel_dsi->video_mode_format);
+ intel_dsi->video_mode_format |
+ IP_TG_CONFIG |
+ RANDOM_DPI_DISPLAY_RESOLUTION);
+}
+
+static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi_prepare(encoder);
+
+ vlv_enable_dsi_pll(encoder);
}
static enum drm_connector_status
@@ -566,11 +666,16 @@ bool intel_dsi_init(struct drm_device *dev)
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *fixed_mode = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_dsi_device *dsi;
unsigned int i;
DRM_DEBUG_KMS("\n");
+ /* There is no detection method for MIPI so rely on VBT */
+ if (!dev_priv->vbt.has_mipi)
+ return false;
+
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
return false;
@@ -585,6 +690,13 @@ bool intel_dsi_init(struct drm_device *dev)
encoder = &intel_encoder->base;
intel_dsi->attached_connector = intel_connector;
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ } else {
+ DRM_ERROR("Unsupported Mipi device to reg base");
+ return false;
+ }
+
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -594,9 +706,8 @@ bool intel_dsi_init(struct drm_device *dev)
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
- intel_encoder->enable = intel_dsi_enable;
- intel_encoder->mode_set = intel_dsi_mode_set;
- intel_encoder->disable = intel_dsi_disable;
+ intel_encoder->enable = intel_dsi_enable_nop;
+ intel_encoder->disable = intel_dsi_pre_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
intel_encoder->get_config = intel_dsi_get_config;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index b4a27cec882f..31db33d3e5cc 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -31,7 +31,6 @@
struct intel_dsi_device {
unsigned int panel_id;
const char *name;
- int type;
const struct intel_dsi_dev_ops *dev_ops;
void *dev_priv;
};
@@ -85,6 +84,9 @@ struct intel_dsi {
/* virtual channel */
int channel;
+ /* Video mode or command mode */
+ u16 operation_mode;
+
/* number of DSI lanes */
unsigned int lane_count;
@@ -95,8 +97,10 @@ struct intel_dsi {
u32 video_mode_format;
/* eot for MIPI_EOT_DISABLE register */
- u32 eot_disable;
+ u8 eotp_pkt;
+ u8 clock_stop;
+ u8 escape_clk_div;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
@@ -110,6 +114,15 @@ struct intel_dsi {
u16 hs_to_lp_count;
u16 clk_lp_to_hs_count;
u16 clk_hs_to_lp_count;
+
+ u16 init_count;
+
+ /* all delays in ms */
+ u16 backlight_off_delay;
+ u16 backlight_on_delay;
+ u16 panel_on_delay;
+ u16 panel_off_delay;
+ u16 panel_pwr_cycle_delay;
};
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
@@ -120,4 +133,6 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
+
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 7c40f981d2c7..933c86305237 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -389,7 +389,7 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
*
* XXX: commands with data in MIPI_DPI_DATA?
*/
-int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
@@ -399,17 +399,11 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
u32 mask;
/* XXX: pipe, hs */
- if (intel_dsi->hs)
+ if (hs)
cmd &= ~DPI_LP_MODE;
else
cmd |= DPI_LP_MODE;
- /* DPI virtual channel?! */
-
- mask = DPI_FIFO_EMPTY;
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
- DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
-
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 54c8a234a2e0..9a18cbfa5460 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -33,6 +33,9 @@
#include "intel_drv.h"
#include "intel_dsi.h"
+#define DPI_LP_MODE_EN false
+#define DPI_HS_MODE_EN true
+
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
@@ -47,7 +50,7 @@ int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen);
-int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
new file mode 100644
index 000000000000..21a0d348cedc
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Shobhit Kumar <shobhit.kumar@intel.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <linux/slab.h>
+#include <video/mipi_display.h>
+#include <asm/intel-mid.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+#define MIPI_TRANSFER_MODE_SHIFT 0
+#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
+#define MIPI_PORT_SHIFT 3
+
+#define PREPARE_CNT_MAX 0x3F
+#define EXIT_ZERO_CNT_MAX 0x3F
+#define CLK_ZERO_CNT_MAX 0xFF
+#define TRAIL_CNT_MAX 0x1F
+
+#define NS_KHZ_RATIO 1000000
+
+#define GPI0_NC_0_HV_DDI0_HPD 0x4130
+#define GPIO_NC_0_HV_DDI0_PAD 0x4138
+#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
+#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128
+#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
+#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118
+#define GPIO_NC_3_PANEL0_VDDEN 0x4140
+#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148
+#define GPIO_NC_4_PANEL0_BLKEN 0x4150
+#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158
+#define GPIO_NC_5_PANEL0_BLKCTL 0x4160
+#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168
+#define GPIO_NC_6_PCONF0 0x4180
+#define GPIO_NC_6_PAD 0x4188
+#define GPIO_NC_7_PCONF0 0x4190
+#define GPIO_NC_7_PAD 0x4198
+#define GPIO_NC_8_PCONF0 0x4170
+#define GPIO_NC_8_PAD 0x4178
+#define GPIO_NC_9_PCONF0 0x4100
+#define GPIO_NC_9_PAD 0x4108
+#define GPIO_NC_10_PCONF0 0x40E0
+#define GPIO_NC_10_PAD 0x40E8
+#define GPIO_NC_11_PCONF0 0x40F0
+#define GPIO_NC_11_PAD 0x40F8
+
+struct gpio_table {
+ u16 function_reg;
+ u16 pad_reg;
+ u8 init;
+};
+
+static struct gpio_table gtable[] = {
+ { GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
+ { GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
+ { GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
+ { GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
+ { GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
+ { GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
+ { GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
+ { GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
+ { GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
+ { GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
+ { GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
+ { GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
+};
+
+static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
+{
+ u8 type, byte, mode, vc, port;
+ u16 len;
+
+ byte = *data++;
+ mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
+ vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
+ port = (byte >> MIPI_PORT_SHIFT) & 0x3;
+
+ /* LP or HS mode */
+ intel_dsi->hs = mode;
+
+ /* get packet type and increment the pointer */
+ type = *data++;
+
+ len = *((u16 *) data);
+ data += 2;
+
+ switch (type) {
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ dsi_vc_generic_write_0(intel_dsi, vc);
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ dsi_vc_generic_write_1(intel_dsi, vc, *data);
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
+ break;
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
+ break;
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ dsi_vc_generic_write(intel_dsi, vc, data, len);
+ break;
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ dsi_vc_dcs_write_0(intel_dsi, vc, *data);
+ break;
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
+ break;
+ case MIPI_DSI_DCS_READ:
+ DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
+ break;
+ case MIPI_DSI_DCS_LONG_WRITE:
+ dsi_vc_dcs_write(intel_dsi, vc, data, len);
+ break;
+ };
+
+ data += len;
+
+ return data;
+}
+
+static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
+{
+ u32 delay = *((u32 *) data);
+
+ usleep_range(delay, delay + 10);
+ data += 4;
+
+ return data;
+}
+
+static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
+{
+ u8 gpio, action;
+ u16 function, pad;
+ u32 val;
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gpio = *data++;
+
+ /* pull up/down */
+ action = *data++;
+
+ function = gtable[gpio].function_reg;
+ pad = gtable[gpio].pad_reg;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ if (!gtable[gpio].init) {
+ /* program the function */
+ /* FIXME: remove constant below */
+ vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
+ gtable[gpio].init = 1;
+ }
+
+ val = 0x4 | action;
+
+ /* pull up/down */
+ vlv_gpio_nc_write(dev_priv, pad, val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return data;
+}
+
+typedef u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, u8 *data);
+static const fn_mipi_elem_exec exec_elem[] = {
+ NULL, /* reserved */
+ mipi_exec_send_packet,
+ mipi_exec_delay,
+ mipi_exec_gpio,
+ NULL, /* status read; later */
+};
+
+/*
+ * MIPI Sequence from VBT #53 parsing logic
+ * We have already separated each seqence during bios parsing
+ * Following is generic execution function for any sequence
+ */
+
+static const char * const seq_name[] = {
+ "UNDEFINED",
+ "MIPI_SEQ_ASSERT_RESET",
+ "MIPI_SEQ_INIT_OTP",
+ "MIPI_SEQ_DISPLAY_ON",
+ "MIPI_SEQ_DISPLAY_OFF",
+ "MIPI_SEQ_DEASSERT_RESET"
+};
+
+static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
+{
+ u8 *data = sequence;
+ fn_mipi_elem_exec mipi_elem_exec;
+ int index;
+
+ if (!sequence)
+ return;
+
+ DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
+
+ /* go to the first element of the sequence */
+ data++;
+
+ /* parse each byte till we reach end of sequence byte - 0x00 */
+ while (1) {
+ index = *data;
+ mipi_elem_exec = exec_elem[index];
+ if (!mipi_elem_exec) {
+ DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
+ return;
+ }
+
+ /* goto element payload */
+ data++;
+
+ /* execute the element specific rotines */
+ data = mipi_elem_exec(intel_dsi, data);
+
+ /*
+ * After processing the element, data should point to
+ * next element or end of sequence
+ * check if have we reached end of sequence
+ */
+ if (*data == 0x00)
+ break;
+ }
+}
+
+static bool generic_init(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
+ struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ u32 bits_per_pixel = 24;
+ u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
+ u32 ui_num, ui_den;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 tclk_prepare_clkzero, ths_prepare_hszero;
+ u32 lp_to_hs_switch, hs_to_lp_switch;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
+ intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
+ intel_dsi->lane_count = mipi_config->lane_cnt + 1;
+ intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
+
+ if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
+ bits_per_pixel = 18;
+ else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
+ bits_per_pixel = 16;
+
+ bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count;
+
+ intel_dsi->operation_mode = mipi_config->is_cmd_mode;
+ intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
+ intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
+ intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
+ intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
+ intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
+ intel_dsi->init_count = mipi_config->master_init_timer;
+ intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
+ intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+
+ switch (intel_dsi->escape_clk_div) {
+ case 0:
+ tlpx_ns = 50;
+ break;
+ case 1:
+ tlpx_ns = 100;
+ break;
+
+ case 2:
+ tlpx_ns = 200;
+ break;
+ default:
+ tlpx_ns = 50;
+ break;
+ }
+
+ switch (intel_dsi->lane_count) {
+ case 1:
+ case 2:
+ extra_byte_count = 2;
+ break;
+ case 3:
+ extra_byte_count = 4;
+ break;
+ case 4:
+ default:
+ extra_byte_count = 3;
+ break;
+ }
+
+ /*
+ * ui(s) = 1/f [f in hz]
+ * ui(ns) = 10^9 / (f*10^6) [f in Mhz] -> 10^3/f(Mhz)
+ */
+
+ /* in Kbps */
+ ui_num = NS_KHZ_RATIO;
+ ui_den = bitrate;
+
+ tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
+ ths_prepare_hszero = mipi_config->ths_prepare_hszero;
+
+ /*
+ * B060
+ * LP byte clock = TLPX/ (8UI)
+ */
+ intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
+
+ /* count values in UI = (ns value) * (bitrate / (2 * 10^6))
+ *
+ * Since txddrclkhs_i is 2xUI, all the count values programmed in
+ * DPHY param register are divided by 2
+ *
+ * prepare count
+ */
+ ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
+
+ /* exit zero count */
+ exit_zero_cnt = DIV_ROUND_UP(
+ (ths_prepare_hszero - ths_prepare_ns) * ui_den,
+ ui_num * 2
+ );
+
+ /*
+ * Exit zero is unified val ths_zero and ths_exit
+ * minimum value for ths_exit = 110ns
+ * min (exit_zero_cnt * 2) = 110/UI
+ * exit_zero_cnt = 55/UI
+ */
+ if (exit_zero_cnt < (55 * ui_den / ui_num))
+ if ((55 * ui_den) % ui_num)
+ exit_zero_cnt += 1;
+
+ /* clk zero count */
+ clk_zero_cnt = DIV_ROUND_UP(
+ (tclk_prepare_clkzero - ths_prepare_ns)
+ * ui_den, 2 * ui_num);
+
+ /* trail count */
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, 2 * ui_num);
+
+ if (prepare_cnt > PREPARE_CNT_MAX ||
+ exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
+ clk_zero_cnt > CLK_ZERO_CNT_MAX ||
+ trail_cnt > TRAIL_CNT_MAX)
+ DRM_DEBUG_DRIVER("Values crossing maximum limits, restricting to max values\n");
+
+ if (prepare_cnt > PREPARE_CNT_MAX)
+ prepare_cnt = PREPARE_CNT_MAX;
+
+ if (exit_zero_cnt > EXIT_ZERO_CNT_MAX)
+ exit_zero_cnt = EXIT_ZERO_CNT_MAX;
+
+ if (clk_zero_cnt > CLK_ZERO_CNT_MAX)
+ clk_zero_cnt = CLK_ZERO_CNT_MAX;
+
+ if (trail_cnt > TRAIL_CNT_MAX)
+ trail_cnt = TRAIL_CNT_MAX;
+
+ /* B080 */
+ intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
+ clk_zero_cnt << 8 | prepare_cnt;
+
+ /*
+ * LP to HS switch count = 4TLPX + PREP_COUNT * 2 + EXIT_ZERO_COUNT * 2
+ * + 10UI + Extra Byte Count
+ *
+ * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
+ * Extra Byte Count is calculated according to number of lanes.
+ * High Low Switch Count is the Max of LP to HS and
+ * HS to LP switch count
+ *
+ */
+ tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
+
+ /* B044 */
+ /* FIXME:
+ * The comment above does not match with the code */
+ lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * 2 +
+ exit_zero_cnt * 2 + 10, 8);
+
+ hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
+
+ intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
+ intel_dsi->hs_to_lp_count += extra_byte_count;
+
+ /* B088 */
+ /* LP -> HS for clock lanes
+ * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
+ * extra byte count
+ * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
+ * 2(in UI) + extra byte count
+ * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
+ * 8 + extra byte count
+ */
+ intel_dsi->clk_lp_to_hs_count =
+ DIV_ROUND_UP(
+ 4 * tlpx_ui + prepare_cnt * 2 +
+ clk_zero_cnt * 2,
+ 8);
+
+ intel_dsi->clk_lp_to_hs_count += extra_byte_count;
+
+ /* HS->LP for Clock Lanes
+ * Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
+ * Extra byte count
+ * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
+ * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
+ * Extra byte count
+ */
+ intel_dsi->clk_hs_to_lp_count =
+ DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
+ 8);
+ intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+
+ DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
+ DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
+ "disabled" : "enabled");
+ DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
+ DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
+ DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+ DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+ DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
+ DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+ DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+ DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+ DRM_DEBUG_KMS("BTA %s\n",
+ intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
+ "disabled" : "enabled");
+
+ /* delays in VBT are in unit of 100us, so need to convert
+ * here in ms
+ * Delay (100us) * 100 /1000 = Delay / 10 (ms) */
+ intel_dsi->backlight_off_delay = pps->bl_disable_delay / 10;
+ intel_dsi->backlight_on_delay = pps->bl_enable_delay / 10;
+ intel_dsi->panel_on_delay = pps->panel_on_delay / 10;
+ intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
+ intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
+
+ return true;
+}
+
+static int generic_mode_valid(struct intel_dsi_device *dsi,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static bool generic_mode_fixup(struct intel_dsi_device *dsi,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode) {
+ return true;
+}
+
+static void generic_panel_reset(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static void generic_disable_panel_power(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static void generic_send_otp_cmds(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static void generic_enable(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static void generic_disable(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
+
+ generic_exec_sequence(intel_dsi, sequence);
+}
+
+static enum drm_connector_status generic_detect(struct intel_dsi_device *dsi)
+{
+ return connector_status_connected;
+}
+
+static bool generic_get_hw_state(struct intel_dsi_device *dev)
+{
+ return true;
+}
+
+static struct drm_display_mode *generic_get_modes(struct intel_dsi_device *dsi)
+{
+ struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->vbt.lfp_lvds_vbt_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ return dev_priv->vbt.lfp_lvds_vbt_mode;
+}
+
+static void generic_destroy(struct intel_dsi_device *dsi) { }
+
+/* Callbacks. We might not need them all. */
+struct intel_dsi_dev_ops vbt_generic_dsi_display_ops = {
+ .init = generic_init,
+ .mode_valid = generic_mode_valid,
+ .mode_fixup = generic_mode_fixup,
+ .panel_reset = generic_panel_reset,
+ .disable_panel_power = generic_disable_panel_power,
+ .send_otp_cmds = generic_send_otp_cmds,
+ .enable = generic_enable,
+ .disable = generic_disable,
+ .detect = generic_detect,
+ .get_hw_state = generic_get_hw_state,
+ .get_modes = generic_get_modes,
+ .destroy = generic_destroy,
+};
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7fe3feedfe03..a3631c0a5c28 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -285,7 +285,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
return true;
}
-static void intel_dvo_mode_set(struct intel_encoder *encoder)
+static void intel_dvo_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -343,7 +343,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
@@ -475,7 +475,7 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
intel_encoder->get_config = intel_dvo_get_config;
intel_encoder->compute_config = intel_dvo_compute_config;
- intel_encoder->mode_set = intel_dvo_mode_set;
+ intel_encoder->pre_enable = intel_dvo_pre_enable;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f73ba5e6b7a8..088fe9378a4c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -343,15 +343,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
num_connectors_detected++;
if (!enabled[i]) {
- DRM_DEBUG_KMS("connector %d not enabled, skipping\n",
- connector->base.id);
+ DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
+ connector->name);
continue;
}
encoder = connector->encoder;
if (!encoder || WARN_ON(!encoder->crtc)) {
- DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n",
- connector->base.id);
+ DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
+ connector->name);
enabled[i] = false;
continue;
}
@@ -373,16 +373,16 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
}
}
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- fb_conn->connector->base.id);
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
+ connector->name);
/* go for command line mode first */
modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
/* try for preferred next */
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- fb_conn->connector->base.id);
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
+ connector->name);
modes[i] = drm_has_preferred_mode(fb_conn, width,
height);
}
@@ -390,7 +390,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
/* No preferred mode marked by the EDID? Are there any modes? */
if (!modes[i] && !list_empty(&connector->modes)) {
DRM_DEBUG_KMS("using first mode listed on connector %s\n",
- drm_get_connector_name(connector));
+ connector->name);
modes[i] = list_first_entry(&connector->modes,
struct drm_display_mode,
head);
@@ -409,16 +409,20 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
* since the fb helper layer wants a pointer to
* something we own.
*/
+ DRM_DEBUG_KMS("looking for current mode on connector %s\n",
+ connector->name);
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
&to_intel_crtc(encoder->crtc)->config);
modes[i] = &encoder->crtc->hwmode;
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on crtc %d: %s\n",
- drm_get_connector_name(connector),
+ DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
+ connector->name,
+ pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
- modes[i]->name);
+ modes[i]->hdisplay, modes[i]->vdisplay,
+ modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
fallback = false;
}
@@ -497,7 +501,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
return false;
/* Find the largest fb */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active || !crtc->primary->fb) {
@@ -521,7 +525,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
/* Now make sure all the pipes will fit into it */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
unsigned int cur_size;
intel_crtc = to_intel_crtc(crtc);
@@ -586,7 +590,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
drm_framebuffer_reference(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active)
@@ -692,11 +696,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
if (!dev_priv->fbdev)
return;
- drm_modeset_lock_all(dev);
-
- ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
if (ret)
DRM_DEBUG("failed to restore crtc mode\n");
-
- drm_modeset_unlock_all(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 157267aa3561..eee2bbec2958 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -418,6 +418,7 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
}
static void g4x_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -440,7 +441,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
* either. */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
@@ -471,6 +472,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
}
static void ibx_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -486,7 +488,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
@@ -518,6 +520,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -531,7 +534,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
@@ -554,20 +557,23 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
assert_hdmi_port_disabled(intel_hdmi);
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
@@ -576,9 +582,19 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
return;
}
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
val |= VIDEO_DIP_ENABLE;
- val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
- VIDEO_DIP_ENABLE_GCP);
+ val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
+ VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -589,6 +605,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
+ bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
@@ -599,7 +616,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
assert_hdmi_port_disabled(intel_hdmi);
- if (!intel_hdmi->has_hdmi_sink) {
+ if (!enable) {
I915_WRITE(reg, 0);
POSTING_READ(reg);
return;
@@ -616,7 +633,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
-static void intel_hdmi_mode_set(struct intel_encoder *encoder)
+static void intel_hdmi_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -638,27 +655,26 @@ static void intel_hdmi_mode_set(struct intel_encoder *encoder)
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
- /* Required on CPT */
- if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
+ if (crtc->config.has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (intel_hdmi->has_audio) {
+ if (crtc->config.has_audio) {
+ WARN_ON(!crtc->config.has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
hdmi_val |= SDVO_AUDIO_ENABLE;
- hdmi_val |= HDMI_MODE_SELECT_HDMI;
intel_write_eld(&encoder->base, adjusted_mode);
}
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
+ else if (IS_CHERRYVIEW(dev))
+ hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
POSTING_READ(intel_hdmi->hdmi_reg);
-
- intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -681,6 +697,8 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
+ else if (IS_CHERRYVIEW(dev))
+ *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -707,6 +725,12 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
+ if (tmp & HDMI_MODE_SELECT_HDMI)
+ pipe_config->has_hdmi_sink = true;
+
+ if (tmp & HDMI_MODE_SELECT_HDMI)
+ pipe_config->has_audio = true;
+
pipe_config->adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
@@ -729,7 +753,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
u32 temp;
u32 enable_bits = SDVO_ENABLE;
- if (intel_hdmi->has_audio)
+ if (intel_crtc->config.has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -883,9 +907,11 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
+ pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
+
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
- if (intel_hdmi->has_hdmi_sink &&
+ if (pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
else
@@ -898,13 +924,16 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
pipe_config->has_pch_encoder = true;
+ if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
+ pipe_config->has_audio = true;
+
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
+ if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
clock_12bpc <= portclock_limit &&
hdmi_12bpc_possible(encoder->new_crtc)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
@@ -944,7 +973,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
@@ -1104,20 +1133,34 @@ done:
return 0;
}
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+
+ intel_hdmi_prepare(encoder);
+
+ intel_hdmi->set_infoframes(&encoder->base,
+ intel_crtc->config.has_hdmi_sink,
+ adjusted_mode);
+}
+
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
- if (!IS_VALLEYVIEW(dev))
- return;
-
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
@@ -1144,6 +1187,10 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
+ intel_hdmi->set_infoframes(&encoder->base,
+ intel_crtc->config.has_hdmi_sink,
+ adjusted_mode);
+
intel_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, dport);
@@ -1159,8 +1206,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
- if (!IS_VALLEYVIEW(dev))
- return;
+ intel_hdmi_prepare(encoder);
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
@@ -1199,6 +1245,152 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
}
+static void chv_hdmi_post_disable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Propagate soft reset to data lane reset */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ int data, i;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* Deassert soft data lane reset*/
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+
+ /* Program Tx latency optimal setting */
+ for (i = 0; i < 4; i++) {
+ /* Set the latency optimal bit */
+ data = (i == 1) ? 0x0 : 0x6;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
+ data << DPIO_FRC_LATENCY_SHFIT);
+
+ /* Set the upar bit */
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ /* FIXME: Fix up value only after power analysis */
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* FIXME: Program the support xxx V-dB */
+ /* Use 800mV-0dB */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val &= ~DPIO_SWING_MARGIN_MASK;
+ val |= 102 << DPIO_SWING_MARGIN_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /* Disable unique transition scale */
+ for (i = 0; i < 4; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ /* Additional steps for 1200mV-0dB */
+#if 0
+ val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
+ if (ch)
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
+ else
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
+ vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
+ (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
+#endif
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+
+ /* LRC Bypass */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ val |= DPIO_LRC_BYPASS;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ intel_enable_hdmi(encoder);
+
+ vlv_wait_port_ready(dev_priv, dport);
+}
+
static void intel_hdmi_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
@@ -1259,7 +1451,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
- intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ if (IS_CHERRYVIEW(dev))
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD_CHV;
+ else
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_A:
@@ -1329,21 +1524,32 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
DRM_MODE_ENCODER_TMDS);
intel_encoder->compute_config = intel_hdmi_compute_config;
- intel_encoder->mode_set = intel_hdmi_mode_set;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_enable = chv_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
+ intel_encoder->post_disable = chv_hdmi_post_disable;
+ } else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
+ intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->enable = intel_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ if (IS_CHERRYVIEW(dev)) {
+ if (port == PORT_D)
+ intel_encoder->crtc_mask = 1 << 2;
+ else
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ } else {
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ }
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
/*
* BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1ecf916474a..23126023aeba 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -111,13 +111,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags;
- /* gen2/3 store dither state in pfit control, needs to match */
- if (INTEL_INFO(dev)->gen < 4) {
- tmp = I915_READ(PFIT_CONTROL);
-
- pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
- }
-
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev_priv->dev))
@@ -126,10 +119,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.crtc_clock = dotclock;
}
-/* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
@@ -331,15 +320,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
-static void intel_lvds_mode_set(struct intel_encoder *encoder)
-{
- /*
- * We don't do anything here, the LVDS port is fully set up in the pre
- * enable hook - the ordering constraints for enabling the lvds port vs.
- * enabling the display pll are too strict.
- */
-}
-
/**
* Detect the LVDS connection.
*
@@ -354,7 +334,7 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
status = intel_panel_detect(dev);
if (status != connector_status_unknown)
@@ -953,7 +933,6 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
- intel_encoder->mode_set = intel_lvds_mode_set;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index acde2945eb8a..4f6b53998d79 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -403,6 +403,15 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+ /*
+ * If the acpi_video interface is not supposed to be used, don't
+ * bother processing backlight level change requests from firmware.
+ */
+ if (!acpi_video_verify_backlight_support()) {
+ DRM_DEBUG_KMS("opregion backlight request ignored\n");
+ return 0;
+ }
+
if (!(bclp & ASLE_BCLP_VALID))
return ASLC_BACKLIGHT_FAILED;
@@ -410,7 +419,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
if (bclp > 255)
return ASLC_BACKLIGHT_FAILED;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
/*
* Update backlight on all connectors that support backlight (usually
@@ -421,7 +430,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
intel_panel_set_backlight(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 129db0c7d835..daa118978eec 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,7 +213,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->last_flip_req);
@@ -236,7 +236,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->active);
@@ -263,7 +263,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
@@ -320,7 +320,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
int ret;
@@ -363,7 +363,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == 0)
@@ -389,7 +389,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
/* Only wait if there is actually an old frame to release to
@@ -688,7 +688,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
u32 swidth, swidthsw, sheight, ostride;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
BUG_ON(!overlay);
ret = intel_overlay_release_old_vid(overlay);
@@ -793,7 +793,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index cb8cfb7e0974..628cd8938274 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -42,6 +42,59 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
+/**
+ * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @fixed_mode : panel native mode
+ * @connector: LVDS/eDP connector
+ *
+ * Return downclock_avail
+ * Find the reduced downclock for LVDS/eDP in EDID.
+ */
+struct drm_display_mode *
+intel_find_panel_downclock(struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *scan, *tmp_mode;
+ int temp_downclock;
+
+ temp_downclock = fixed_mode->clock;
+ tmp_mode = NULL;
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ tmp_mode = scan;
+ }
+ }
+ }
+
+ if (temp_downclock < fixed_mode->clock)
+ return drm_mode_duplicate(dev, tmp_mode);
+ else
+ return NULL;
+}
+
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
@@ -308,21 +361,43 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
+ /* Make sure pre-965 set dither correctly for 18bpp panels. */
+ if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
- /* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
pipe_config->gmch_pfit.control = pfit_control;
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
pipe_config->gmch_pfit.lvds_border_bits = border;
}
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Assume that the BIOS does not lie through the OpRegion... */
+ if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+
+ switch (i915.panel_ignore_lid) {
+ case -2:
+ return connector_status_connected;
+ case -1:
+ return connector_status_disconnected;
+ default:
+ return connector_status_unknown;
+ }
+}
+
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
@@ -723,9 +798,6 @@ static void i965_enable_backlight(struct intel_connector *connector)
ctl = freq << 16;
I915_WRITE(BLC_PWM_CTL, ctl);
- /* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(connector, panel->backlight.level);
-
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
@@ -734,6 +806,8 @@ static void i965_enable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_PWM_CTL2, ctl2);
POSTING_READ(BLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
}
static void vlv_enable_backlight(struct intel_connector *connector)
@@ -795,40 +869,18 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
-enum drm_connector_status
-intel_panel_detect(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
- return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
- connector_status_connected :
- connector_status_disconnected;
- }
-
- switch (i915.panel_ignore_lid) {
- case -2:
- return connector_status_connected;
- case -1:
- return connector_status_disconnected;
- default:
- return connector_status_unknown;
- }
-}
-
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector, bd->props.brightness,
bd->props.max_brightness);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
return 0;
}
@@ -840,9 +892,9 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
int ret;
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
ret = intel_panel_get_backlight(connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
intel_runtime_pm_put(dev_priv);
return ret;
@@ -1066,8 +1118,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
int ret;
if (!dev_priv->vbt.backlight.present) {
- DRM_DEBUG_KMS("native backlight control not available per VBT\n");
- return 0;
+ if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+ } else {
+ DRM_DEBUG_KMS("no backlight present per VBT\n");
+ return 0;
+ }
}
/* set level and max in panel struct */
@@ -1077,7 +1133,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
if (ret) {
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
- drm_get_connector_name(connector));
+ connector->name);
return ret;
}
@@ -1103,59 +1159,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
intel_backlight_device_unregister(intel_connector);
}
-/**
- * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
- * @dev: drm device
- * @fixed_mode : panel native mode
- * @connector: LVDS/eDP connector
- *
- * Return downclock_avail
- * Find the reduced downclock for LVDS/eDP in EDID.
- */
-struct drm_display_mode *
-intel_find_panel_downclock(struct drm_device *dev,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector)
-{
- struct drm_display_mode *scan, *tmp_mode;
- int temp_downclock;
-
- temp_downclock = fixed_mode->clock;
- tmp_mode = NULL;
-
- list_for_each_entry(scan, &connector->probed_modes, head) {
- /*
- * If one mode has the same resolution with the fixed_panel
- * mode while they have the different refresh rate, it means
- * that the reduced downclock is found. In such
- * case we can set the different FPx0/1 to dynamically select
- * between low and high frequency.
- */
- if (scan->hdisplay == fixed_mode->hdisplay &&
- scan->hsync_start == fixed_mode->hsync_start &&
- scan->hsync_end == fixed_mode->hsync_end &&
- scan->htotal == fixed_mode->htotal &&
- scan->vdisplay == fixed_mode->vdisplay &&
- scan->vsync_start == fixed_mode->vsync_start &&
- scan->vsync_end == fixed_mode->vsync_end &&
- scan->vtotal == fixed_mode->vtotal) {
- if (scan->clock < temp_downclock) {
- /*
- * The downclock is already found. But we
- * expect to find the lower downclock.
- */
- temp_downclock = scan->clock;
- tmp_mode = scan;
- }
- }
- }
-
- if (temp_downclock < fixed_mode->clock)
- return drm_mode_duplicate(dev, tmp_mode);
- else
- return NULL;
-}
-
/* Set up chip specific backlight functions */
void intel_panel_init_backlight_funcs(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d93dcf683e8c..ee72807069e4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -487,7 +487,7 @@ void intel_update_fbc(struct drm_device *dev)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
- list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, tmp_crtc) {
if (intel_crtc_active(tmp_crtc) &&
to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
@@ -511,8 +511,7 @@ void intel_update_fbc(struct drm_device *dev)
obj = intel_fb->obj;
adjusted_mode = &intel_crtc->config.adjusted_mode;
- if (i915.enable_fbc < 0 &&
- INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+ if (i915.enable_fbc < 0) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
DRM_DEBUG_KMS("disabled per chip default\n");
goto out_disable;
@@ -1010,7 +1009,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
{
struct drm_crtc *crtc, *enabled = NULL;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for_each_crtc(dev, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -1831,6 +1830,40 @@ static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
return 512;
}
+static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
+ int level, bool is_sprite)
+{
+ if (INTEL_INFO(dev)->gen >= 8)
+ /* BDW primary/sprite plane watermarks */
+ return level == 0 ? 255 : 2047;
+ else if (INTEL_INFO(dev)->gen >= 7)
+ /* IVB/HSW primary/sprite plane watermarks */
+ return level == 0 ? 127 : 1023;
+ else if (!is_sprite)
+ /* ILK/SNB primary plane watermarks */
+ return level == 0 ? 127 : 511;
+ else
+ /* ILK/SNB sprite plane watermarks */
+ return level == 0 ? 63 : 255;
+}
+
+static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
+ int level)
+{
+ if (INTEL_INFO(dev)->gen >= 7)
+ return level == 0 ? 63 : 255;
+ else
+ return level == 0 ? 31 : 63;
+}
+
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen >= 8)
+ return 31;
+ else
+ return 15;
+}
+
/* Calculate the maximum primary/sprite plane watermark */
static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
int level,
@@ -1839,7 +1872,6 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
bool is_sprite)
{
unsigned int fifo_size = ilk_display_fifo_size(dev);
- unsigned int max;
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -1870,19 +1902,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* clamp to max that the registers can hold */
- if (INTEL_INFO(dev)->gen >= 8)
- max = level == 0 ? 255 : 2047;
- else if (INTEL_INFO(dev)->gen >= 7)
- /* IVB/HSW primary/sprite plane watermarks */
- max = level == 0 ? 127 : 1023;
- else if (!is_sprite)
- /* ILK/SNB primary plane watermarks */
- max = level == 0 ? 127 : 511;
- else
- /* ILK/SNB sprite plane watermarks */
- max = level == 0 ? 63 : 255;
-
- return min(fifo_size, max);
+ return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
@@ -1895,20 +1915,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
return 64;
/* otherwise just report max that registers can hold */
- if (INTEL_INFO(dev)->gen >= 7)
- return level == 0 ? 63 : 255;
- else
- return level == 0 ? 31 : 63;
-}
-
-/* Calculate the maximum FBC watermark */
-static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
-{
- /* max that registers can hold */
- if (INTEL_INFO(dev)->gen >= 8)
- return 31;
- else
- return 15;
+ return ilk_cursor_wm_reg_max(dev, level);
}
static void ilk_compute_wm_maximums(const struct drm_device *dev,
@@ -1920,7 +1927,17 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_max(dev);
+ max->fbc = ilk_fbc_wm_reg_max(dev);
+}
+
+static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
+ int level,
+ struct ilk_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_reg_max(dev, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev);
}
static bool ilk_validate_wm_level(int level,
@@ -2059,7 +2076,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
wm[3] *= 2;
}
-static int ilk_wm_max_level(const struct drm_device *dev)
+int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -2155,38 +2172,52 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
}
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
- struct ilk_pipe_wm_parameters *p,
- struct intel_wm_config *config)
+ struct ilk_pipe_wm_parameters *p)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
- p->active = intel_crtc_active(crtc);
- if (p->active) {
- p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
- p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
- p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
- p->cur.bytes_per_pixel = 4;
- p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
- p->cur.horiz_pixels = intel_crtc->cursor_width;
- /* TODO: for now, assume primary and cursor planes are always enabled. */
- p->pri.enabled = true;
- p->cur.enabled = true;
- }
+ if (!intel_crtc_active(crtc))
+ return;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- config->num_pipes_active += intel_crtc_active(crtc);
+ p->active = true;
+ p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
+ p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+ p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
+ p->cur.bytes_per_pixel = 4;
+ p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
+ p->cur.horiz_pixels = intel_crtc->cursor_width;
+ /* TODO: for now, assume primary and cursor planes are always enabled. */
+ p->pri.enabled = true;
+ p->cur.enabled = true;
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
struct intel_plane *intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
+ if (intel_plane->pipe == pipe) {
p->spr = intel_plane->wm;
+ break;
+ }
+ }
+}
- config->sprites_enabled |= intel_plane->wm.enabled;
- config->sprites_scaled |= intel_plane->wm.scaled;
+static void ilk_compute_wm_config(struct drm_device *dev,
+ struct intel_wm_config *config)
+{
+ struct intel_crtc *intel_crtc;
+
+ /* Compute the currently _active_ config */
+ for_each_intel_crtc(dev, intel_crtc) {
+ const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
+
+ if (!wm->pipe_enabled)
+ continue;
+
+ config->sprites_enabled |= wm->sprites_enabled;
+ config->sprites_scaled |= wm->sprites_scaled;
+ config->num_pipes_active++;
}
}
@@ -2206,8 +2237,9 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
};
struct ilk_wm_maximums max;
- /* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+ pipe_wm->pipe_enabled = params->active;
+ pipe_wm->sprites_enabled = params->spr.enabled;
+ pipe_wm->sprites_scaled = params->spr.scaled;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
@@ -2217,15 +2249,37 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
if (params->spr.scaled)
max_level = 0;
- for (level = 0; level <= max_level; level++)
- ilk_compute_wm_level(dev_priv, level, params,
- &pipe_wm->wm[level]);
+ ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+ /* LP0 watermarks always use 1/2 DDB partitioning */
+ ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+
/* At least LP0 must be valid */
- return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
+ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
+ return false;
+
+ ilk_compute_wm_reg_maximums(dev, 1, &max);
+
+ for (level = 1; level <= max_level; level++) {
+ struct intel_wm_level wm = {};
+
+ ilk_compute_wm_level(dev_priv, level, params, &wm);
+
+ /*
+ * Disable any watermark level that exceeds the
+ * register maximums since such watermarks are
+ * always invalid.
+ */
+ if (!ilk_validate_wm_level(level, &max, &wm))
+ break;
+
+ pipe_wm->wm[level] = wm;
+ }
+
+ return true;
}
/*
@@ -2237,20 +2291,28 @@ static void ilk_merge_wm_level(struct drm_device *dev,
{
const struct intel_crtc *intel_crtc;
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
- const struct intel_wm_level *wm =
- &intel_crtc->wm.active.wm[level];
+ ret_wm->enable = true;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ const struct intel_pipe_wm *active = &intel_crtc->wm.active;
+ const struct intel_wm_level *wm = &active->wm[level];
+
+ if (!active->pipe_enabled)
+ continue;
+ /*
+ * The watermark values may have been used in the past,
+ * so we must maintain them in the registers for some
+ * time even if the level is now disabled.
+ */
if (!wm->enable)
- return;
+ ret_wm->enable = false;
ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
}
-
- ret_wm->enable = true;
}
/*
@@ -2262,6 +2324,7 @@ static void ilk_wm_merge(struct drm_device *dev,
struct intel_pipe_wm *merged)
{
int level, max_level = ilk_wm_max_level(dev);
+ int last_enabled_level = max_level;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
@@ -2277,15 +2340,19 @@ static void ilk_wm_merge(struct drm_device *dev,
ilk_merge_wm_level(dev, level, wm);
- if (!ilk_validate_wm_level(level, max, wm))
- break;
+ if (level > last_enabled_level)
+ wm->enable = false;
+ else if (!ilk_validate_wm_level(level, max, wm))
+ /* make sure all following levels get disabled */
+ last_enabled_level = level - 1;
/*
* The spec says it is preferred to disable
* FBC WMs instead of disabling a WM level.
*/
if (wm->fbc_val > max->fbc) {
- merged->fbc_wm_enabled = false;
+ if (wm->enable)
+ merged->fbc_wm_enabled = false;
wm->fbc_val = 0;
}
}
@@ -2340,14 +2407,19 @@ static void ilk_compute_wm_results(struct drm_device *dev,
level = ilk_wm_lp_to_level(wm_lp, merged);
r = &merged->wm[level];
- if (!r->enable)
- break;
- results->wm_lp[wm_lp - 1] = WM3_LP_EN |
+ /*
+ * Maintain the watermark values even if the level is
+ * disabled. Doing otherwise could cause underruns.
+ */
+ results->wm_lp[wm_lp - 1] =
(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
(r->pri_val << WM1_LP_SR_SHIFT) |
r->cur_val;
+ if (r->enable)
+ results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
+
if (INTEL_INFO(dev)->gen >= 8)
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
@@ -2355,6 +2427,10 @@ static void ilk_compute_wm_results(struct drm_device *dev,
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT;
+ /*
+ * Always set WM1S_LP_EN when spr_val != 0, even if the
+ * level is disabled. Doing otherwise could cause underruns.
+ */
if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
WARN_ON(wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
@@ -2363,7 +2439,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
}
/* LP0 register values */
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ for_each_intel_crtc(dev, intel_crtc) {
enum pipe pipe = intel_crtc->pipe;
const struct intel_wm_level *r =
&intel_crtc->wm.active.wm[0];
@@ -2598,7 +2674,7 @@ static void ilk_update_wm(struct drm_crtc *crtc)
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct intel_wm_config config = {};
- ilk_compute_wm_parameters(crtc, &params, &config);
+ ilk_compute_wm_parameters(crtc, &params);
intel_compute_pipe_wm(crtc, &params, &pipe_wm);
@@ -2607,6 +2683,8 @@ static void ilk_update_wm(struct drm_crtc *crtc)
intel_crtc->wm.active = pipe_wm;
+ ilk_compute_wm_config(dev, &config);
+
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
@@ -2673,7 +2751,9 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
- if (intel_crtc_active(crtc)) {
+ active->pipe_enabled = intel_crtc_active(crtc);
+
+ if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
/*
@@ -2706,7 +2786,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ for_each_crtc(dev, crtc)
ilk_pipe_wm_get_hw_state(crtc);
hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
@@ -2714,8 +2794,10 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
- hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
- hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+ if (INTEL_INFO(dev)->gen >= 7) {
+ hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+ }
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
@@ -3071,6 +3153,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
mask |= GEN6_PM_RP_UP_EI_EXPIRED;
+ if (IS_GEN8(dev_priv->dev))
+ mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
return ~mask;
}
@@ -3091,7 +3176,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
@@ -3124,6 +3209,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
+ /* Latest VLV doesn't need to force the gfx clock */
+ if (dev->pdev->revision >= 0xd) {
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ return;
+ }
+
/*
* When we are idle. Drop to min voltage state.
*/
@@ -3134,16 +3227,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
/* Mask turbo interrupt so that they will not come in between */
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- /* Bring up the Gfx clock */
- I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
- I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
- VLV_GFX_CLK_FORCE_ON_BIT);
-
- if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
- I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
- DRM_ERROR("GFX_CLK_ON request timed out\n");
- return;
- }
+ vlv_force_gfx_clock(dev_priv, true);
dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
@@ -3154,10 +3238,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
& GENFREQSTATUS) == 0, 5))
DRM_ERROR("timed out waiting for Punit\n");
- /* Release the Gfx clock */
- I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
- I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
- ~VLV_GFX_CLK_FORCE_ON_BIT);
+ vlv_force_gfx_clock(dev_priv, false);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
@@ -3215,6 +3296,26 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
+static void gen8_disable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
+ I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
+ ~dev_priv->pm_rps_events);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
+ * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
+ * gen8_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
+}
+
static void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3241,7 +3342,10 @@ static void gen6_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- gen6_disable_rps_interrupts(dev);
+ if (IS_BROADWELL(dev))
+ gen8_disable_rps_interrupts(dev);
+ else
+ gen6_disable_rps_interrupts(dev);
}
static void valleyview_disable_rps(struct drm_device *dev)
@@ -3255,21 +3359,44 @@ static void valleyview_disable_rps(struct drm_device *dev)
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
{
+ if (IS_VALLEYVIEW(dev)) {
+ if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
+ mode = GEN6_RC_CTL_RC6_ENABLE;
+ else
+ mode = 0;
+ }
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
}
-int intel_enable_rc6(const struct drm_device *dev)
+static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
{
/* No RC6 before Ironlake */
if (INTEL_INFO(dev)->gen < 5)
return 0;
+ /* RC6 is only on Ironlake mobile not on desktop */
+ if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
+ return 0;
+
/* Respect the kernel parameter if it is set */
- if (i915.enable_rc6 >= 0)
- return i915.enable_rc6;
+ if (enable_rc6 >= 0) {
+ int mask;
+
+ if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+ mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
+ INTEL_RC6pp_ENABLE;
+ else
+ mask = INTEL_RC6_ENABLE;
+
+ if ((enable_rc6 & mask) != enable_rc6)
+ DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
+ enable_rc6 & mask, enable_rc6, mask);
+
+ return enable_rc6 & mask;
+ }
/* Disable RC6 on Ironlake */
if (INTEL_INFO(dev)->gen == 5)
@@ -3281,6 +3408,22 @@ int intel_enable_rc6(const struct drm_device *dev)
return INTEL_RC6_ENABLE;
}
+int intel_enable_rc6(const struct drm_device *dev)
+{
+ return i915.enable_rc6;
+}
+
+static void gen8_enable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3292,10 +3435,31 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
+static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
+{
+ /* All of these values are in units of 50MHz */
+ dev_priv->rps.cur_freq = 0;
+ /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ /* XXX: only BYT has a special efficient freq */
+ dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+ /* hw_max = RP0 until we check for overclocking */
+ dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+}
+
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
uint32_t rc6_mask = 0, rp_state_cap;
int unused;
@@ -3310,6 +3474,7 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ parse_rp_state_cap(dev_priv, rp_state_cap);
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -3329,8 +3494,10 @@ static void gen8_enable_rps(struct drm_device *dev)
rc6_mask);
/* 4 Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
- I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
+ I915_WRITE(GEN6_RPNSWREQ,
+ HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ HSW_FREQUENCY(dev_priv->rps.rp1_freq));
/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
@@ -3359,7 +3526,7 @@ static void gen8_enable_rps(struct drm_device *dev)
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
- gen6_enable_rps_interrupts(dev);
+ gen8_enable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -3367,7 +3534,7 @@ static void gen8_enable_rps(struct drm_device *dev)
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
u32 rp_state_cap;
u32 gt_perf_status;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
@@ -3396,23 +3563,7 @@ static void gen6_enable_rps(struct drm_device *dev)
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- /* All of these values are in units of 50MHz */
- dev_priv->rps.cur_freq = 0;
- /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
- dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
- dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
- /* XXX: only BYT has a special efficient freq */
- dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
- /* hw_max = RP0 until we check for overclocking */
- dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
-
- /* Preserve min/max settings in case of re-init */
- if (dev_priv->rps.max_freq_softlimit == 0)
- dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
- if (dev_priv->rps.min_freq_softlimit == 0)
- dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+ parse_rp_state_cap(dev_priv, rp_state_cap);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3494,7 +3645,7 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
-void gen6_update_ring_freq(struct drm_device *dev)
+static void __gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
@@ -3564,6 +3715,18 @@ void gen6_update_ring_freq(struct drm_device *dev)
}
}
+void gen6_update_ring_freq(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
+ return;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ __gen6_update_ring_freq(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
@@ -3658,10 +3821,49 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
dev_priv->vlv_pctx = NULL;
}
+static void valleyview_init_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ valleyview_setup_pctx(dev);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
+ dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ dev_priv->rps.max_freq);
+
+ dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ dev_priv->rps.min_freq);
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
+{
+ valleyview_cleanup_pctx(dev);
+}
+
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
u32 gtfifodbg, val, rc6_mode = 0;
int i;
@@ -3724,29 +3926,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
- dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
- dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
- dev_priv->rps.max_freq);
-
- dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- dev_priv->rps.efficient_freq);
-
- dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
- dev_priv->rps.min_freq);
-
- /* Preserve min/max settings in case of re-init */
- if (dev_priv->rps.max_freq_softlimit == 0)
- dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
- if (dev_priv->rps.min_freq_softlimit == 0)
- dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
-
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
@@ -3815,7 +3994,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
bool was_interruptible;
int ret;
@@ -3873,7 +4052,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
+ intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4327,7 +4506,7 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
bool ret = false;
int i;
@@ -4487,14 +4666,16 @@ static void intel_init_emon(struct drm_device *dev)
void intel_init_gt_powersave(struct drm_device *dev)
{
+ i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+
if (IS_VALLEYVIEW(dev))
- valleyview_setup_pctx(dev);
+ valleyview_init_gt_powersave(dev);
}
void intel_cleanup_gt_powersave(struct drm_device *dev)
{
if (IS_VALLEYVIEW(dev))
- valleyview_cleanup_pctx(dev);
+ valleyview_cleanup_gt_powersave(dev);
}
void intel_disable_gt_powersave(struct drm_device *dev)
@@ -4507,8 +4688,10 @@ void intel_disable_gt_powersave(struct drm_device *dev)
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+ } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+ if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
+ intel_runtime_pm_put(dev_priv);
+
cancel_work_sync(&dev_priv->rps.work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev))
@@ -4533,13 +4716,15 @@ static void intel_gen6_powersave_work(struct work_struct *work)
valleyview_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
- gen6_update_ring_freq(dev);
+ __gen6_update_ring_freq(dev);
} else {
gen6_enable_rps(dev);
- gen6_update_ring_freq(dev);
+ __gen6_update_ring_freq(dev);
}
dev_priv->rps.enabled = true;
mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_runtime_pm_put(dev_priv);
}
void intel_enable_gt_powersave(struct drm_device *dev)
@@ -4547,20 +4732,38 @@ void intel_enable_gt_powersave(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_IRONLAKE_M(dev)) {
+ mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
- } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ mutex_unlock(&dev->struct_mutex);
+ } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
* to make resume and init faster.
+ *
+ * We depend on the HW RC6 power context save/restore
+ * mechanism when entering D3 through runtime PM suspend. So
+ * disable RPM until RPS/RC6 is properly setup. We can only
+ * get here via the driver load/system resume/runtime resume
+ * paths, so the _noresume version is enough (and in case of
+ * runtime resume it's necessary).
*/
- schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
- round_jiffies_up_relative(HZ));
+ if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+ round_jiffies_up_relative(HZ)))
+ intel_runtime_pm_get_noresume(dev_priv);
}
}
+void intel_reset_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->rps.enabled = false;
+ intel_enable_gt_powersave(dev);
+}
+
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4666,6 +4869,9 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+ /* WaDisable_RenderCache_OperationalFlush:ilk */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
g4x_disable_trickle_feed(dev);
ibx_init_clock_gating(dev);
@@ -4741,6 +4947,9 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
/*
* BSpec recoomends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
@@ -4909,6 +5118,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
+ /* WaDisableDopClockGating:bdw May not be needed for production */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -4980,6 +5193,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_FF_THREAD_MODE,
I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
+ /* WaDisable_RenderCache_OperationalFlush:hsw */
+ I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
/* enable HiZ Raw Stall Optimization */
I915_WRITE(CACHE_MODE_0_GEN7,
_MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
@@ -5032,6 +5248,9 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+ /* WaDisable_RenderCache_OperationalFlush:ivb */
+ I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -5126,6 +5345,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+ dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
+ dev_priv->vlv_cdclk_freq);
+
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull:vlv */
@@ -5143,6 +5366,9 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+ /* WaDisable_RenderCache_OperationalFlush:vlv */
+ I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
/* WaForceL3Serialization:vlv */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
@@ -5165,8 +5391,11 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
- /* WaDisableL3Bank2xClockGate:vlv */
- I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+ /* WaDisableL3Bank2xClockGate:vlv
+ * Disabling L3 clock gating- MMIO 940c[25] = 1
+ * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
+ I915_WRITE(GEN7_UCGCTL4,
+ I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
@@ -5191,6 +5420,59 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
+static void cherryview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+
+ /* WaDisablePartialInstShootdown:chv */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+
+ /* WaDisableThreadStallDopClockGating:chv */
+ I915_WRITE(GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+
+ /* WaVSRefCountFullforceMissDisable:chv */
+ /* WaDSRefCountFullforceMissDisable:chv */
+ I915_WRITE(GEN7_FF_THREAD_MODE,
+ I915_READ(GEN7_FF_THREAD_MODE) &
+ ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
+
+ /* WaDisableSemaphoreAndSyncFlipWait:chv */
+ I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+
+ /* WaDisableCSUnitClockGating:chv */
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSDEUnitClockGating:chv */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+
+ /* WaDisableGunitClockGating:chv (pre-production hw) */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
+ GINT_DIS);
+
+ /* WaDisableFfDopClockGating:chv (pre-production hw) */
+ I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
+
+ /* WaDisableDopClockGating:chv (pre-production hw) */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
+}
+
static void g4x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5212,6 +5494,9 @@ static void g4x_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+ /* WaDisable_RenderCache_OperationalFlush:g4x */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+
g4x_disable_trickle_feed(dev);
}
@@ -5226,6 +5511,9 @@ static void crestline_init_clock_gating(struct drm_device *dev)
I915_WRITE16(DEUC, 0);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+
+ /* WaDisable_RenderCache_OperationalFlush:gen4 */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
static void broadwater_init_clock_gating(struct drm_device *dev)
@@ -5240,6 +5528,9 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+
+ /* WaDisable_RenderCache_OperationalFlush:gen4 */
+ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
static void gen3_init_clock_gating(struct drm_device *dev)
@@ -5256,6 +5547,12 @@ static void gen3_init_clock_gating(struct drm_device *dev)
/* IIR "flip pending" means done if this bit is set */
I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+
+ /* interrupts should cause a wake up from C3 */
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+
+ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+ I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
static void i85x_init_clock_gating(struct drm_device *dev)
@@ -5263,6 +5560,10 @@ static void i85x_init_clock_gating(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+
+ /* interrupts should cause a wake up from C3 */
+ I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+ _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
}
static void i830_init_clock_gating(struct drm_device *dev)
@@ -5310,18 +5611,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
}
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
-
- power_domains = &dev_priv->power_domains;
-
- return power_domains->domain_use_count[domain];
-}
-
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
@@ -5335,21 +5626,34 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
is_enabled = true;
- mutex_lock(&power_domains->lock);
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
if (power_well->always_on)
continue;
- if (!power_well->ops->is_enabled(dev_priv, power_well)) {
+ if (!power_well->hw_enabled) {
is_enabled = false;
break;
}
}
- mutex_unlock(&power_domains->lock);
return is_enabled;
}
+bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ bool ret;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ ret = intel_display_power_enabled_unlocked(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return ret;
+}
+
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
@@ -5392,33 +5696,6 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
}
}
-static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
-{
- assert_spin_locked(&dev->vbl_lock);
-
- dev->vblank[pipe].last = 0;
-}
-
-static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- enum pipe pipe;
- unsigned long irqflags;
-
- /*
- * After this, the registers on the pipes that are part of the power
- * well will become zero, so we have to adjust our counters according to
- * that.
- *
- * FIXME: Should we do this in general in drm_vblank_post_modeset?
- */
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for_each_pipe(pipe)
- if (pipe != PIPE_A)
- reset_vblank_counter(dev, pipe);
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-}
-
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
@@ -5447,8 +5724,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
-
- hsw_power_well_post_disable(dev_priv);
}
}
}
@@ -5489,13 +5764,34 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
return true;
}
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
+void __vlv_set_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id, bool enable)
{
- enum punit_power_well power_well_id = power_well->data;
+ struct drm_device *dev = dev_priv->dev;
u32 mask;
u32 state;
u32 ctrl;
+ enum pipe pipe;
+
+ if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (enable) {
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_INTEGRATED_CRI_CLK_VLV);
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ } else {
+ for_each_pipe(pipe)
+ assert_pll_disabled(dev_priv, pipe);
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
+ ~DPIO_CMNRST);
+ }
+ }
mask = PUNIT_PWRGT_MASK(power_well_id);
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -5523,6 +5819,28 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
out:
mutex_unlock(&dev_priv->rps.hw_lock);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ enum punit_power_well power_well_id = power_well->data;
+
+ __vlv_set_power_well(dev_priv, power_well_id, enable);
}
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -5591,11 +5909,13 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
/*
- * During driver initialization we need to defer enabling hotplug
- * processing until fbdev is set up.
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
*/
- if (dev_priv->enable_hotplug_processing)
- intel_hpd_init(dev_priv->dev);
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv->dev);
i915_redisable_vga_power_on(dev_priv->dev);
}
@@ -5603,23 +5923,12 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_device *dev = dev_priv->dev;
- enum pipe pipe;
-
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
spin_lock_irq(&dev_priv->irq_lock);
- for_each_pipe(pipe)
- __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
-
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- spin_lock_irq(&dev->vbl_lock);
- for_each_pipe(pipe)
- reset_vblank_counter(dev, pipe);
- spin_unlock_irq(&dev->vbl_lock);
-
vlv_set_power_well(dev_priv, power_well, false);
}
@@ -5663,6 +5972,7 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
if (!power_well->count++) {
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
power_well->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
}
check_power_well_state(dev_priv, power_well);
@@ -5692,6 +6002,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
if (!--power_well->count && i915.disable_power_well) {
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
@@ -5706,33 +6017,56 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
static struct i915_power_domains *hsw_pwr;
/* Display audio driver power well request */
-void i915_request_power_well(void)
+int i915_request_power_well(void)
{
struct drm_i915_private *dev_priv;
- if (WARN_ON(!hsw_pwr))
- return;
+ if (!hsw_pwr)
+ return -ENODEV;
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
/* Display audio driver power well release */
-void i915_release_power_well(void)
+int i915_release_power_well(void)
{
struct drm_i915_private *dev_priv;
- if (WARN_ON(!hsw_pwr))
- return;
+ if (!hsw_pwr)
+ return -ENODEV;
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
+
+
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
@@ -5867,12 +6201,6 @@ static struct i915_power_well vlv_power_wells[] = {
.ops = &vlv_display_power_well_ops,
},
{
- .name = "dpio-common",
- .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &vlv_dpio_power_well_ops,
- },
- {
.name = "dpio-tx-b-01",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
@@ -5908,6 +6236,12 @@ static struct i915_power_well vlv_power_wells[] = {
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
+ {
+ .name = "dpio-common",
+ .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &vlv_dpio_power_well_ops,
+ },
};
#define set_power_wells(power_domains, __power_wells) ({ \
@@ -5952,16 +6286,23 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
int i;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
power_well->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+ power_well);
+ }
mutex_unlock(&power_domains->lock);
}
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ power_domains->initializing = true;
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv);
+ power_domains->initializing = false;
}
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
@@ -5986,6 +6327,18 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
WARN(dev_priv->pm.suspended, "Device still suspended.\n");
}
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
+ pm_runtime_get_noresume(device);
+}
+
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -6008,6 +6361,15 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
pm_runtime_set_active(device);
+ /*
+ * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
+ * requirement.
+ */
+ if (!intel_enable_rc6(dev)) {
+ DRM_INFO("RC6 disabled, disabling runtime PM support\n");
+ return;
+ }
+
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
pm_runtime_mark_last_busy(device);
pm_runtime_use_autosuspend(device);
@@ -6023,6 +6385,9 @@ void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
if (!HAS_RUNTIME_PM(dev))
return;
+ if (!intel_enable_rc6(dev))
+ return;
+
/* Make sure we're not suspended first. */
pm_runtime_get_sync(device);
pm_runtime_disable(device);
@@ -6087,6 +6452,10 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
+ } else if (IS_CHERRYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
new file mode 100644
index 000000000000..a5e783a9928a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_RENDERSTATE_H
+#define _INTEL_RENDERSTATE_H
+
+#include <linux/types.h>
+
+struct intel_renderstate_rodata {
+ const u32 *reloc;
+ const u32 reloc_items;
+ const u32 *batch;
+ const u32 batch_items;
+};
+
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+
+#define RO_RENDERSTATE(_g) \
+ const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
+ .reloc = gen ## _g ## _null_state_relocs, \
+ .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
+ .batch = gen ## _g ## _null_state_batch, \
+ .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
+ }
+
+#endif /* INTEL_RENDERSTATE_H */
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
new file mode 100644
index 000000000000..740538ad0977
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -0,0 +1,289 @@
+#include "intel_renderstate.h"
+
+static const u32 gen6_null_state_relocs[] = {
+ 0x00000020,
+ 0x00000024,
+ 0x0000002c,
+ 0x000001e0,
+ 0x000001e4,
+};
+
+static const u32 gen6_null_state_batch[] = {
+ 0x69040000,
+ 0x790d0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x61010008,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x61020000,
+ 0x00000000,
+ 0x78050001,
+ 0x00000018,
+ 0x00000000,
+ 0x780d1002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000420,
+ 0x78150003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79050005,
+ 0xe0040000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79100000,
+ 0x00000000,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x780e0002,
+ 0x00000441,
+ 0x00000401,
+ 0x00000401,
+ 0x78021002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000400,
+ 0x78130012,
+ 0x00400810,
+ 0x00000000,
+ 0x20000000,
+ 0x04000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78140007,
+ 0x00000280,
+ 0x08080000,
+ 0x00000000,
+ 0x00060000,
+ 0x4e080002,
+ 0x00100400,
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11330000,
+ 0x02850004,
+ 0x11220000,
+ 0x78011002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000200,
+ 0x78080003,
+ 0x00002000,
+ 0x00000448, /* reloc */
+ 0x00000448, /* reloc */
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000220, /* state start */
+ 0x00000240,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0060005a,
+ 0x204077be,
+ 0x000000c0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x206077be,
+ 0x000000c0,
+ 0x008d0080,
+ 0x0060005a,
+ 0x208077be,
+ 0x000000d0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x20a077be,
+ 0x000000d0,
+ 0x008d0080,
+ 0x00000201,
+ 0x20080061,
+ 0x00000000,
+ 0x00000000,
+ 0x00600001,
+ 0x20200022,
+ 0x008d0000,
+ 0x00000000,
+ 0x02800031,
+ 0x21c01cc9,
+ 0x00000020,
+ 0x0a8a0001,
+ 0x00600001,
+ 0x204003be,
+ 0x008d01c0,
+ 0x00000000,
+ 0x00600001,
+ 0x206003be,
+ 0x008d01e0,
+ 0x00000000,
+ 0x00600001,
+ 0x208003be,
+ 0x008d0200,
+ 0x00000000,
+ 0x00600001,
+ 0x20a003be,
+ 0x008d0220,
+ 0x00000000,
+ 0x00600001,
+ 0x20c003be,
+ 0x008d0240,
+ 0x00000000,
+ 0x00600001,
+ 0x20e003be,
+ 0x008d0260,
+ 0x00000000,
+ 0x00600001,
+ 0x210003be,
+ 0x008d0280,
+ 0x00000000,
+ 0x00600001,
+ 0x212003be,
+ 0x008d02a0,
+ 0x00000000,
+ 0x05800031,
+ 0x24001cc8,
+ 0x00000040,
+ 0x90019000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x30000000,
+ 0x00000124,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80000031,
+ 0x00000003,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(6);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
new file mode 100644
index 000000000000..6fa7ff2a1298
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -0,0 +1,253 @@
+#include "intel_renderstate.h"
+
+static const u32 gen7_null_state_relocs[] = {
+ 0x0000000c,
+ 0x00000010,
+ 0x00000018,
+ 0x000001ec,
+};
+
+static const u32 gen7_null_state_batch[] = {
+ 0x69040000,
+ 0x61010008,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x790d0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x79160000,
+ 0x00000008,
+ 0x78300000,
+ 0x02010040,
+ 0x78310000,
+ 0x04000000,
+ 0x78320000,
+ 0x04000000,
+ 0x78330000,
+ 0x02000000,
+ 0x78100004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78210000,
+ 0x00000000,
+ 0x78130005,
+ 0x00000000,
+ 0x20000000,
+ 0x04000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78140001,
+ 0x20000800,
+ 0x00000000,
+ 0x781e0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78050005,
+ 0xe0040000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x78240000,
+ 0x00000240,
+ 0x78230000,
+ 0x00000260,
+ 0x782f0000,
+ 0x00000280,
+ 0x781f000c,
+ 0x00400810,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78200006,
+ 0x000002c0,
+ 0x08080000,
+ 0x00000000,
+ 0x28000402,
+ 0x00060000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11230000,
+ 0x02f60004,
+ 0x11230000,
+ 0x78080003,
+ 0x00006008,
+ 0x00000340, /* reloc */
+ 0xffffffff,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000360,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x7b000005,
+ 0x0000000f,
+ 0x00000003,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000031, /* state start */
+ 0x00000003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000492,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0080005a,
+ 0x2e2077bd,
+ 0x000000c0,
+ 0x008d0040,
+ 0x0080005a,
+ 0x2e6077bd,
+ 0x000000d0,
+ 0x008d0040,
+ 0x02800031,
+ 0x21801fa9,
+ 0x008d0e20,
+ 0x08840001,
+ 0x00800001,
+ 0x2e2003bd,
+ 0x008d0180,
+ 0x00000000,
+ 0x00800001,
+ 0x2e6003bd,
+ 0x008d01c0,
+ 0x00000000,
+ 0x00800001,
+ 0x2ea003bd,
+ 0x008d0200,
+ 0x00000000,
+ 0x00800001,
+ 0x2ee003bd,
+ 0x008d0240,
+ 0x00000000,
+ 0x05800031,
+ 0x20001fa8,
+ 0x008d0e20,
+ 0x90031000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000380,
+ 0x000003a0,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(7);
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
new file mode 100644
index 000000000000..5c875615d42a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -0,0 +1,479 @@
+#include "intel_renderstate.h"
+
+static const u32 gen8_null_state_relocs[] = {
+ 0x00000048,
+ 0x00000050,
+ 0x00000060,
+ 0x000003ec,
+};
+
+static const u32 gen8_null_state_batch[] = {
+ 0x69040000,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79120000,
+ 0x00000000,
+ 0x79130000,
+ 0x00000000,
+ 0x79140000,
+ 0x00000000,
+ 0x79150000,
+ 0x00000000,
+ 0x79160000,
+ 0x00000000,
+ 0x6101000e,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0xfffff001,
+ 0x00001001,
+ 0xfffff001,
+ 0x00001001,
+ 0x78230000,
+ 0x000006e0,
+ 0x78210000,
+ 0x00000700,
+ 0x78300000,
+ 0x08010040,
+ 0x78330000,
+ 0x08000000,
+ 0x78310000,
+ 0x08000000,
+ 0x78320000,
+ 0x08000000,
+ 0x78240000,
+ 0x00000641,
+ 0x780e0000,
+ 0x00000601,
+ 0x780d0000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782e0000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x782d0000,
+ 0x00000000,
+ 0x78260000,
+ 0x00000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x78150009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781f0002,
+ 0x30400820,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00210000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000480,
+ 0x782f0000,
+ 0x00000540,
+ 0x78140000,
+ 0x00000800,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7820000a,
+ 0x00000580,
+ 0x00000000,
+ 0x08080000,
+ 0x00000000,
+ 0x00000000,
+ 0x1f000002,
+ 0x00060000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x784d0000,
+ 0x40000000,
+ 0x784f0000,
+ 0x80000100,
+ 0x780f0000,
+ 0x00000740,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000001,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x78080003,
+ 0x00006000,
+ 0x000005e0, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11230000,
+ 0x02850004,
+ 0x11230000,
+ 0x784b0000,
+ 0x0000000f,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x7b000005,
+ 0x00000000,
+ 0x00000003,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x000004c0, /* state start */
+ 0x00000500,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000092,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0060005a,
+ 0x21403ae8,
+ 0x3a0000c0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x21603ae8,
+ 0x3a0000c0,
+ 0x008d0080,
+ 0x0060005a,
+ 0x21803ae8,
+ 0x3a0000d0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x21a03ae8,
+ 0x3a0000d0,
+ 0x008d0080,
+ 0x02800031,
+ 0x2e0022e8,
+ 0x0e000140,
+ 0x08840001,
+ 0x05800031,
+ 0x200022e0,
+ 0x0e000e00,
+ 0x90031000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x06200000,
+ 0x00000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(8);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 79fb4cc2137c..279488addf3f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,26 +33,44 @@
#include "i915_trace.h"
#include "intel_drv.h"
-static inline int ring_space(struct intel_ring_buffer *ring)
+/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+
+static inline int __ring_space(int head, int tail, int size)
{
- int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
+ int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0)
- space += ring->size;
+ space += size;
return space;
}
-void __intel_ring_advance(struct intel_ring_buffer *ring)
+static inline int ring_space(struct intel_engine_cs *ring)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
+}
+
+static bool intel_ring_stopped(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
+}
- ring->tail &= ring->size - 1;
- if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+void __intel_ring_advance(struct intel_engine_cs *ring)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ ringbuf->tail &= ringbuf->size - 1;
+ if (intel_ring_stopped(ring))
return;
- ring->write_tail(ring, ring->tail);
+ ring->write_tail(ring, ringbuf->tail);
}
static int
-gen2_render_ring_flush(struct intel_ring_buffer *ring,
+gen2_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -78,7 +96,7 @@ gen2_render_ring_flush(struct intel_ring_buffer *ring,
}
static int
-gen4_render_ring_flush(struct intel_ring_buffer *ring,
+gen4_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -173,9 +191,9 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
{
- u32 scratch_addr = ring->scratch.gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
@@ -208,11 +226,11 @@ intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
}
static int
-gen6_render_ring_flush(struct intel_ring_buffer *ring,
+gen6_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -260,7 +278,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
}
static int
-gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
{
int ret;
@@ -278,7 +296,7 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
return 0;
}
-static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
+static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
{
int ret;
@@ -302,11 +320,11 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
}
static int
-gen7_render_ring_flush(struct intel_ring_buffer *ring,
+gen7_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/*
@@ -363,11 +381,11 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
}
static int
-gen8_render_ring_flush(struct intel_ring_buffer *ring,
+gen8_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
flags |= PIPE_CONTROL_CS_STALL;
@@ -403,14 +421,14 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring,
}
-static void ring_write_tail(struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_engine_cs *ring,
u32 value)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
-u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u64 acthd;
@@ -426,7 +444,7 @@ u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
return acthd;
}
-static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
+static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
@@ -437,7 +455,7 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
I915_WRITE(HWS_PGA, addr);
}
-static bool stop_ring(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
@@ -461,11 +479,12 @@ static bool stop_ring(struct intel_ring_buffer *ring)
return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
}
-static int init_ring_common(struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj = ring->obj;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -504,7 +523,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* register values. */
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring,
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+ ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
@@ -512,12 +531,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
+ ring->name,
+ I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
+ I915_READ_HEAD(ring), I915_READ_TAIL(ring),
+ I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
ret = -EIO;
goto out;
}
@@ -525,10 +543,10 @@ static int init_ring_common(struct intel_ring_buffer *ring)
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
i915_kernel_lost_context(ring->dev);
else {
- ring->head = I915_READ_HEAD(ring);
- ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring_space(ring);
- ring->last_retired_head = -1;
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->space = ring_space(ring);
+ ringbuf->last_retired_head = -1;
}
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -540,7 +558,7 @@ out:
}
static int
-init_pipe_control(struct intel_ring_buffer *ring)
+init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
@@ -581,7 +599,7 @@ err:
return ret;
}
-static int init_render_ring(struct intel_ring_buffer *ring)
+static int init_render_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -595,19 +613,21 @@ static int init_render_ring(struct intel_ring_buffer *ring)
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
+ /* WaEnableFlushTlbInvalidationMode:snb */
if (INTEL_INFO(dev)->gen == 6)
I915_WRITE(GFX_MODE,
- _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
+ /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
- _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
if (INTEL_INFO(dev)->gen >= 5) {
@@ -624,13 +644,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
*/
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
-
- /* This is not explicitly set for GEN6, so read the register.
- * see intel_ring_mi_set_context() for why we care.
- * TODO: consider explicitly setting the bit for GEN5
- */
- ring->itlb_before_ctx_switch =
- !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
}
if (INTEL_INFO(dev)->gen >= 6)
@@ -642,7 +655,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
-static void render_ring_cleanup(struct intel_ring_buffer *ring)
+static void render_ring_cleanup(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -658,20 +671,46 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
ring->scratch.obj = NULL;
}
-static void
-update_mboxes(struct intel_ring_buffer *ring,
- u32 mmio_offset)
+static int gen6_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
{
-/* NB: In order to be able to do semaphore MBOX updates for varying number
- * of rings, it's easiest if we round up each individual update to a
- * multiple of 2 (since ring updates must always be a multiple of 2)
- * even though the actual update only requires 3 dwords.
- */
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *useless;
+ int i, ret;
+
+ /* NB: In order to be able to do semaphore MBOX updates for varying
+ * number of rings, it's easiest if we round up each individual update
+ * to a multiple of 2 (since ring updates must always be a multiple of
+ * 2) even though the actual update only requires 3 dwords.
+ */
#define MBOX_UPDATE_DWORDS 4
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, mmio_offset);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
- intel_ring_emit(ring, MI_NOOP);
+ if (i915_semaphore_is_enabled(dev))
+ num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
+ else
+ return intel_ring_begin(signaller, num_dwords);
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+#undef MBOX_UPDATE_DWORDS
+
+ for_each_ring(useless, dev_priv, i) {
+ u32 mbox_reg = signaller->semaphore.mbox.signal[i];
+ if (mbox_reg != GEN6_NOSYNC) {
+ intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(signaller, mbox_reg);
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, MI_NOOP);
+ } else {
+ intel_ring_emit(signaller, MI_NOOP);
+ intel_ring_emit(signaller, MI_NOOP);
+ intel_ring_emit(signaller, MI_NOOP);
+ intel_ring_emit(signaller, MI_NOOP);
+ }
+ }
+
+ return 0;
}
/**
@@ -684,29 +723,14 @@ update_mboxes(struct intel_ring_buffer *ring,
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_ring_buffer *ring)
+gen6_add_request(struct intel_engine_cs *ring)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *useless;
- int i, ret, num_dwords = 4;
-
- if (i915_semaphore_is_enabled(dev))
- num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
-#undef MBOX_UPDATE_DWORDS
+ int ret;
- ret = intel_ring_begin(ring, num_dwords);
+ ret = ring->semaphore.signal(ring, 4);
if (ret)
return ret;
- if (i915_semaphore_is_enabled(dev)) {
- for_each_ring(useless, dev_priv, i) {
- u32 mbox_reg = ring->signal_mbox[i];
- if (mbox_reg != GEN6_NOSYNC)
- update_mboxes(ring, mbox_reg);
- }
- }
-
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
@@ -731,14 +755,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
* @seqno - seqno which the waiter will block on
*/
static int
-gen6_ring_sync(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
+gen6_ring_sync(struct intel_engine_cs *waiter,
+ struct intel_engine_cs *signaller,
u32 seqno)
{
- int ret;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
+ u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+ int ret;
/* Throughout all of the GEM code, seqno passed implies our current
* seqno is >= the last seqno executed. However for hardware the
@@ -746,8 +771,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
*/
seqno -= 1;
- WARN_ON(signaller->semaphore_register[waiter->id] ==
- MI_SEMAPHORE_SYNC_INVALID);
+ WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
ret = intel_ring_begin(waiter, 4);
if (ret)
@@ -755,9 +779,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
/* If seqno wrap happened, omit the wait with no-ops */
if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
- intel_ring_emit(waiter,
- dw1 |
- signaller->semaphore_register[waiter->id]);
+ intel_ring_emit(waiter, dw1 | wait_mbox);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
@@ -782,9 +804,9 @@ do { \
} while (0)
static int
-pc_render_add_request(struct intel_ring_buffer *ring)
+pc_render_add_request(struct intel_engine_cs *ring)
{
- u32 scratch_addr = ring->scratch.gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -806,15 +828,15 @@ pc_render_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
- scratch_addr += 128; /* write to separate cachelines */
+ scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
PIPE_CONTROL_FLUSH(ring, scratch_addr);
- scratch_addr += 128;
+ scratch_addr += 2 * CACHELINE_BYTES;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
- scratch_addr += 128;
+ scratch_addr += 2 * CACHELINE_BYTES;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
- scratch_addr += 128;
+ scratch_addr += 2 * CACHELINE_BYTES;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
- scratch_addr += 128;
+ scratch_addr += 2 * CACHELINE_BYTES;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
@@ -830,7 +852,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
}
static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
@@ -844,31 +866,31 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
}
static u32
-ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static void
-ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
{
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
return ring->scratch.cpu_page[0];
}
static void
-pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
{
ring->scratch.cpu_page[0] = seqno;
}
static bool
-gen5_ring_get_irq(struct intel_ring_buffer *ring)
+gen5_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -886,7 +908,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-gen5_ring_put_irq(struct intel_ring_buffer *ring)
+gen5_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -899,7 +921,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
}
static bool
-i9xx_ring_get_irq(struct intel_ring_buffer *ring)
+i9xx_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -920,7 +942,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-i9xx_ring_put_irq(struct intel_ring_buffer *ring)
+i9xx_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -936,7 +958,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
}
static bool
-i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -957,7 +979,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -972,7 +994,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -989,6 +1011,11 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
+ /*
+ * VCS2 actually doesn't exist on Gen7. Only shut up
+ * gcc switch check warning
+ */
+ case VCS2:
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
@@ -1030,7 +1057,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
}
static int
-bsd_ring_flush(struct intel_ring_buffer *ring,
+bsd_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -1047,7 +1074,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-i9xx_add_request(struct intel_ring_buffer *ring)
+i9xx_add_request(struct intel_engine_cs *ring)
{
int ret;
@@ -1065,7 +1092,7 @@ i9xx_add_request(struct intel_ring_buffer *ring)
}
static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring)
+gen6_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1090,7 +1117,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring)
+gen6_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1108,7 +1135,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
}
static bool
-hsw_vebox_get_irq(struct intel_ring_buffer *ring)
+hsw_vebox_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1128,7 +1155,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
}
static void
-hsw_vebox_put_irq(struct intel_ring_buffer *ring)
+hsw_vebox_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1146,7 +1173,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
}
static bool
-gen8_ring_get_irq(struct intel_ring_buffer *ring)
+gen8_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1172,7 +1199,7 @@ gen8_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-gen8_ring_put_irq(struct intel_ring_buffer *ring)
+gen8_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1192,8 +1219,8 @@ gen8_ring_put_irq(struct intel_ring_buffer *ring)
}
static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 length,
+i965_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 length,
unsigned flags)
{
int ret;
@@ -1215,8 +1242,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
static int
-i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len,
+i830_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
unsigned flags)
{
int ret;
@@ -1266,8 +1293,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
}
static int
-i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len,
+i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
unsigned flags)
{
int ret;
@@ -1283,7 +1310,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
-static void cleanup_status_page(struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
@@ -1297,50 +1324,44 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
ring->status_page.obj = NULL;
}
-static int init_status_page(struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_engine_cs *ring)
{
- struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *obj;
- int ret;
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate status page\n");
- ret = -ENOMEM;
- goto err;
- }
+ if ((obj = ring->status_page.obj) == NULL) {
+ int ret;
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- if (ret)
- goto err_unref;
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ return -ENOMEM;
+ }
- ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
- if (ret)
- goto err_unref;
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ if (ret)
+ goto err_unref;
+
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+ if (ret) {
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ring->status_page.obj = obj;
+ }
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
- if (ring->status_page.page_addr == NULL) {
- ret = -ENOMEM;
- goto err_unpin;
- }
- ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
return 0;
-
-err_unpin:
- i915_gem_object_ggtt_unpin(obj);
-err_unref:
- drm_gem_object_unreference(&obj->base);
-err:
- return ret;
}
-static int init_phys_status_page(struct intel_ring_buffer *ring)
+static int init_phys_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1357,44 +1378,24 @@ static int init_phys_status_page(struct intel_ring_buffer *ring)
return 0;
}
-static int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int allocate_ring_buffer(struct intel_engine_cs *ring)
{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_object *obj;
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ring->dev = dev;
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
- ring->size = 32 * PAGE_SIZE;
- memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
-
- init_waitqueue_head(&ring->irq_queue);
-
- if (I915_NEED_GFX_HWS(dev)) {
- ret = init_status_page(ring);
- if (ret)
- return ret;
- } else {
- BUG_ON(ring->id != RCS);
- ret = init_phys_status_page(ring);
- if (ret)
- return ret;
- }
+ if (intel_ring_initialized(ring))
+ return 0;
obj = NULL;
if (!HAS_LLC(dev))
- obj = i915_gem_object_create_stolen(dev, ring->size);
+ obj = i915_gem_object_create_stolen(dev, ringbuf->size);
if (obj == NULL)
- obj = i915_gem_alloc_object(dev, ring->size);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate ringbuffer\n");
- ret = -ENOMEM;
- goto err_hws;
- }
-
- ring->obj = obj;
+ obj = i915_gem_alloc_object(dev, ringbuf->size);
+ if (obj == NULL)
+ return -ENOMEM;
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
@@ -1404,65 +1405,102 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unpin;
- ring->virtual_start =
+ ringbuf->virtual_start =
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
- ring->size);
- if (ring->virtual_start == NULL) {
- DRM_ERROR("Failed to map ringbuffer.\n");
+ ringbuf->size);
+ if (ringbuf->virtual_start == NULL) {
ret = -EINVAL;
goto err_unpin;
}
- ret = ring->init(ring);
- if (ret)
- goto err_unmap;
+ ringbuf->obj = obj;
+ return 0;
+
+err_unpin:
+ i915_gem_object_ggtt_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+}
+
+static int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_engine_cs *ring)
+{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ int ret;
+
+ if (ringbuf == NULL) {
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
+ }
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ ringbuf->size = 32 * PAGE_SIZE;
+ memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
+
+ init_waitqueue_head(&ring->irq_queue);
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(ring);
+ if (ret)
+ goto error;
+ } else {
+ BUG_ON(ring->id != RCS);
+ ret = init_phys_status_page(ring);
+ if (ret)
+ goto error;
+ }
+
+ ret = allocate_ring_buffer(ring);
+ if (ret) {
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
+ goto error;
+ }
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
* of the buffer.
*/
- ring->effective_size = ring->size;
- if (IS_I830(ring->dev) || IS_845G(ring->dev))
- ring->effective_size -= 128;
+ ringbuf->effective_size = ringbuf->size;
+ if (IS_I830(dev) || IS_845G(dev))
+ ringbuf->effective_size -= 2 * CACHELINE_BYTES;
- i915_cmd_parser_init_ring(ring);
+ ret = i915_cmd_parser_init_ring(ring);
+ if (ret)
+ goto error;
+
+ ret = ring->init(ring);
+ if (ret)
+ goto error;
return 0;
-err_unmap:
- iounmap(ring->virtual_start);
-err_unpin:
- i915_gem_object_ggtt_unpin(obj);
-err_unref:
- drm_gem_object_unreference(&obj->base);
- ring->obj = NULL;
-err_hws:
- cleanup_status_page(ring);
+error:
+ kfree(ringbuf);
+ ring->buffer = NULL;
return ret;
}
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
{
- struct drm_i915_private *dev_priv;
- int ret;
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
- if (ring->obj == NULL)
+ if (!intel_ring_initialized(ring))
return;
- /* Disable the ring buffer. The ring must be idle at this point */
- dev_priv = ring->dev->dev_private;
- ret = intel_ring_idle(ring);
- if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
- DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
- ring->name, ret);
-
- I915_WRITE_CTL(ring, 0);
+ intel_stop_ring_buffer(ring);
+ WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
- iounmap(ring->virtual_start);
+ iounmap(ringbuf->virtual_start);
- i915_gem_object_ggtt_unpin(ring->obj);
- drm_gem_object_unreference(&ring->obj->base);
- ring->obj = NULL;
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
+ drm_gem_object_unreference(&ringbuf->obj->base);
+ ringbuf->obj = NULL;
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@@ -1470,44 +1508,34 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
ring->cleanup(ring);
cleanup_status_page(ring);
+
+ i915_cmd_parser_fini_ring(ring);
+
+ kfree(ringbuf);
+ ring->buffer = NULL;
}
-static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
- u32 seqno = 0, tail;
+ u32 seqno = 0;
int ret;
- if (ring->last_retired_head != -1) {
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
+ if (ringbuf->last_retired_head != -1) {
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
- ring->space = ring_space(ring);
- if (ring->space >= n)
+ ringbuf->space = ring_space(ring);
+ if (ringbuf->space >= n)
return 0;
}
list_for_each_entry(request, &ring->request_list, list) {
- int space;
-
- if (request->tail == -1)
- continue;
-
- space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
- if (space < 0)
- space += ring->size;
- if (space >= n) {
+ if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
seqno = request->seqno;
- tail = request->tail;
break;
}
-
- /* Consume this request in case we need more space than
- * is available and so need to prevent a race between
- * updating last_retired_head and direct reads of
- * I915_RING_HEAD. It also provides a nice sanity check.
- */
- request->tail = -1;
}
if (seqno == 0)
@@ -1517,18 +1545,19 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (ret)
return ret;
- ring->head = tail;
- ring->space = ring_space(ring);
- if (WARN_ON(ring->space < n))
- return -ENOSPC;
+ i915_gem_retire_requests_ring(ring);
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+ ringbuf->space = ring_space(ring);
return 0;
}
-static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
unsigned long end;
int ret;
@@ -1539,7 +1568,6 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
/* force the tail write in case we have been skipping them */
__intel_ring_advance(ring);
- trace_i915_ring_wait_begin(ring);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
* to running on almost untested codepaths). But on resume
@@ -1547,12 +1575,13 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
+ trace_i915_ring_wait_begin(ring);
do {
- ring->head = I915_READ_HEAD(ring);
- ring->space = ring_space(ring);
- if (ring->space >= n) {
- trace_i915_ring_wait_end(ring);
- return 0;
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->space = ring_space(ring);
+ if (ringbuf->space >= n) {
+ ret = 0;
+ break;
}
if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
@@ -1564,38 +1593,49 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
msleep(1);
+ if (dev_priv->mm.interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
- return ret;
- } while (!time_after(jiffies, end));
+ break;
+
+ if (time_after(jiffies, end)) {
+ ret = -EBUSY;
+ break;
+ }
+ } while (1);
trace_i915_ring_wait_end(ring);
- return -EBUSY;
+ return ret;
}
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
{
uint32_t __iomem *virt;
- int rem = ring->size - ring->tail;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ int rem = ringbuf->size - ringbuf->tail;
- if (ring->space < rem) {
+ if (ringbuf->space < rem) {
int ret = ring_wait_for_space(ring, rem);
if (ret)
return ret;
}
- virt = ring->virtual_start + ring->tail;
+ virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
- ring->tail = 0;
- ring->space = ring_space(ring);
+ ringbuf->tail = 0;
+ ringbuf->space = ring_space(ring);
return 0;
}
-int intel_ring_idle(struct intel_ring_buffer *ring)
+int intel_ring_idle(struct intel_engine_cs *ring)
{
u32 seqno;
int ret;
@@ -1619,7 +1659,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
}
static int
-intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+intel_ring_alloc_seqno(struct intel_engine_cs *ring)
{
if (ring->outstanding_lazy_seqno)
return 0;
@@ -1637,18 +1677,19 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
-static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+static int __intel_ring_prepare(struct intel_engine_cs *ring,
int bytes)
{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
- if (unlikely(ring->tail + bytes > ring->effective_size)) {
+ if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
}
- if (unlikely(ring->space < bytes)) {
+ if (unlikely(ringbuf->space < bytes)) {
ret = ring_wait_for_space(ring, bytes);
if (unlikely(ret))
return ret;
@@ -1657,7 +1698,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring,
return 0;
}
-int intel_ring_begin(struct intel_ring_buffer *ring,
+int intel_ring_begin(struct intel_engine_cs *ring,
int num_dwords)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1677,19 +1718,20 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
- ring->space -= num_dwords * sizeof(uint32_t);
+ ring->buffer->space -= num_dwords * sizeof(uint32_t);
return 0;
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+int intel_ring_cacheline_align(struct intel_engine_cs *ring)
{
- int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+ int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
if (num_dwords == 0)
return 0;
+ num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
ret = intel_ring_begin(ring, num_dwords);
if (ret)
return ret;
@@ -1702,7 +1744,7 @@ int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
return 0;
}
-void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1719,7 +1761,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
ring->hangcheck.seqno = seqno;
}
-static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
u32 value)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1752,7 +1794,7 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
-static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
@@ -1788,8 +1830,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len,
+gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
unsigned flags)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1803,8 +1845,8 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
/* FIXME(BDW): Address space and security selectors. */
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
- intel_ring_emit(ring, offset);
- intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, lower_32_bits(offset));
+ intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1812,8 +1854,8 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
}
static int
-hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len,
+hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
unsigned flags)
{
int ret;
@@ -1833,8 +1875,8 @@ hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
}
static int
-gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len,
+gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+ u64 offset, u32 len,
unsigned flags)
{
int ret;
@@ -1855,7 +1897,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
/* Blitter support (SandyBridge+) */
-static int gen6_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
@@ -1898,7 +1940,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
int intel_init_render_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
ring->name = "render ring";
ring->id = RCS;
@@ -1920,15 +1962,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->sync_to = gen6_ring_sync;
- ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
- ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
- ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
- ring->signal_mbox[RCS] = GEN6_NOSYNC;
- ring->signal_mbox[VCS] = GEN6_VRSYNC;
- ring->signal_mbox[BCS] = GEN6_BRSYNC;
- ring->signal_mbox[VECS] = GEN6_VERSYNC;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8 platform.
+ * And there is no VCS2 ring on the pre-gen8 platform. So the
+ * semaphore between RCS and VCS2 is initialized as INVALID.
+ * Gen8 will initialize the sema between VCS2 and RCS later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
@@ -1999,16 +2050,25 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
+ if (ringbuf == NULL) {
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
+ }
+
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 6) {
/* non-kms not supported on gen6+ */
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_ringbuf;
}
/* Note: gem is not supported on gen5/ilk without kms (the corresponding
@@ -2043,31 +2103,39 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- ring->size = size;
- ring->effective_size = ring->size;
+ ringbuf->size = size;
+ ringbuf->effective_size = ringbuf->size;
if (IS_I830(ring->dev) || IS_845G(ring->dev))
- ring->effective_size -= 128;
+ ringbuf->effective_size -= 2 * CACHELINE_BYTES;
- ring->virtual_start = ioremap_wc(start, size);
- if (ring->virtual_start == NULL) {
+ ringbuf->virtual_start = ioremap_wc(start, size);
+ if (ringbuf->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_ringbuf;
}
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_status_page(ring);
if (ret)
- return ret;
+ goto err_vstart;
}
return 0;
+
+err_vstart:
+ iounmap(ringbuf->virtual_start);
+err_ringbuf:
+ kfree(ringbuf);
+ ring->buffer = NULL;
+ return ret;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[VCS];
ring->name = "bsd ring";
ring->id = VCS;
@@ -2096,15 +2164,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
}
- ring->sync_to = gen6_ring_sync;
- ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
- ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
- ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
- ring->signal_mbox[RCS] = GEN6_RVSYNC;
- ring->signal_mbox[VCS] = GEN6_NOSYNC;
- ring->signal_mbox[BCS] = GEN6_BVSYNC;
- ring->signal_mbox[VECS] = GEN6_VEVSYNC;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8 platform.
+ * And there is no VCS2 ring on the pre-gen8 platform. So the
+ * semaphore between VCS and VCS2 is initialized as INVALID.
+ * Gen8 will initialize the sema between VCS2 and VCS later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
} else {
ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush;
@@ -2127,10 +2204,63 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
+/**
+ * Initialize the second BSD ring for Broadwell GT3.
+ * It is noted that this only exists on Broadwell GT3.
+ */
+int intel_init_bsd2_ring_buffer(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+
+ if ((INTEL_INFO(dev)->gen != 8)) {
+ DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
+ return -EINVAL;
+ }
+
+ ring->name = "bds2_ring";
+ ring->id = VCS2;
+
+ ring->write_tail = ring_write_tail;
+ ring->mmio_base = GEN8_BSD2_RING_BASE;
+ ring->flush = gen6_bsd_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ ring->irq_enable_mask =
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->dispatch_execbuffer =
+ gen8_ring_dispatch_execbuffer;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on the pre-gen8. And there
+ * is no bsd2 ring on the pre-gen8. So now the semaphore_register
+ * between VCS2 and other ring is initialized as invalid.
+ * Gen8 will initialize the sema between VCS2 and other ring later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ struct intel_engine_cs *ring = &dev_priv->ring[BCS];
ring->name = "blitter ring";
ring->id = BCS;
@@ -2153,15 +2283,24 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
}
- ring->sync_to = gen6_ring_sync;
- ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
- ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
- ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
- ring->signal_mbox[RCS] = GEN6_RBSYNC;
- ring->signal_mbox[VCS] = GEN6_VBSYNC;
- ring->signal_mbox[BCS] = GEN6_NOSYNC;
- ring->signal_mbox[VECS] = GEN6_VEBSYNC;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8 platform. And
+ * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
+ * between BCS and VCS2 is initialized as INVALID.
+ * Gen8 will initialize the sema between BCS and VCS2 later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
@@ -2170,7 +2309,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
+ struct intel_engine_cs *ring = &dev_priv->ring[VECS];
ring->name = "video enhancement ring";
ring->id = VECS;
@@ -2194,22 +2333,25 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
}
- ring->sync_to = gen6_ring_sync;
- ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
- ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
- ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
- ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->signal_mbox[RCS] = GEN6_RVESYNC;
- ring->signal_mbox[VCS] = GEN6_VVESYNC;
- ring->signal_mbox[BCS] = GEN6_BVESYNC;
- ring->signal_mbox[VECS] = GEN6_NOSYNC;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
int
-intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+intel_ring_flush_all_caches(struct intel_engine_cs *ring)
{
int ret;
@@ -2227,7 +2369,7 @@ intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
}
int
-intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
{
uint32_t flush_domains;
int ret;
@@ -2245,3 +2387,19 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
ring->gpu_caches_dirty = false;
return 0;
}
+
+void
+intel_stop_ring_buffer(struct intel_engine_cs *ring)
+{
+ int ret;
+
+ if (!intel_ring_initialized(ring))
+ return;
+
+ ret = intel_ring_idle(ring);
+ if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
+ stop_ring(ring);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2b91c4b4d34b..e72017bdcd7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,10 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+#include <linux/hashtable.h>
+
+#define I915_CMD_HASH_ORDER 9
+
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
@@ -51,79 +55,96 @@ struct intel_ring_hangcheck {
u32 seqno;
int score;
enum intel_ring_hangcheck_action action;
- bool deadlock;
+ int deadlock;
};
-struct intel_ring_buffer {
+struct intel_ringbuffer {
+ struct drm_i915_gem_object *obj;
+ void __iomem *virtual_start;
+
+ u32 head;
+ u32 tail;
+ int space;
+ int size;
+ int effective_size;
+
+ /** We track the position of the requests in the ring buffer, and
+ * when each is retired we increment last_retired_head as the GPU
+ * must have finished processing the request and so we know we
+ * can advance the ringbuffer up to that position.
+ *
+ * last_retired_head is set to -1 after the value is consumed so
+ * we can detect new retirements.
+ */
+ u32 last_retired_head;
+};
+
+struct intel_engine_cs {
const char *name;
enum intel_ring_id {
RCS = 0x0,
VCS,
BCS,
VECS,
+ VCS2
} id;
-#define I915_NUM_RINGS 4
+#define I915_NUM_RINGS 5
+#define LAST_USER_RING (VECS + 1)
u32 mmio_base;
- void __iomem *virtual_start;
struct drm_device *dev;
- struct drm_i915_gem_object *obj;
+ struct intel_ringbuffer *buffer;
- u32 head;
- u32 tail;
- int space;
- int size;
- int effective_size;
struct intel_hw_status_page status_page;
- /** We track the position of the requests in the ring buffer, and
- * when each is retired we increment last_retired_head as the GPU
- * must have finished processing the request and so we know we
- * can advance the ringbuffer up to that position.
- *
- * last_retired_head is set to -1 after the value is consumed so
- * we can detect new retirements.
- */
- u32 last_retired_head;
-
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
- u32 sync_seqno[I915_NUM_RINGS-1];
- bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
- void (*irq_put)(struct intel_ring_buffer *ring);
+ bool __must_check (*irq_get)(struct intel_engine_cs *ring);
+ void (*irq_put)(struct intel_engine_cs *ring);
- int (*init)(struct intel_ring_buffer *ring);
+ int (*init)(struct intel_engine_cs *ring);
- void (*write_tail)(struct intel_ring_buffer *ring,
+ void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
- int __must_check (*flush)(struct intel_ring_buffer *ring,
+ int __must_check (*flush)(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_ring_buffer *ring);
+ int (*add_request)(struct intel_engine_cs *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
- u32 (*get_seqno)(struct intel_ring_buffer *ring,
+ u32 (*get_seqno)(struct intel_engine_cs *ring,
bool lazy_coherency);
- void (*set_seqno)(struct intel_ring_buffer *ring,
+ void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
- int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
- u32 offset, u32 length,
+ int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
+ u64 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
- void (*cleanup)(struct intel_ring_buffer *ring);
- int (*sync_to)(struct intel_ring_buffer *ring,
- struct intel_ring_buffer *to,
- u32 seqno);
+ void (*cleanup)(struct intel_engine_cs *ring);
- /* our mbox written by others */
- u32 semaphore_register[I915_NUM_RINGS];
- /* mboxes this ring signals to */
- u32 signal_mbox[I915_NUM_RINGS];
+ struct {
+ u32 sync_seqno[I915_NUM_RINGS-1];
+
+ struct {
+ /* our mbox written by others */
+ u32 wait[I915_NUM_RINGS];
+ /* mboxes this ring signals to */
+ u32 signal[I915_NUM_RINGS];
+ } mbox;
+
+ /* AKA wait() */
+ int (*sync_to)(struct intel_engine_cs *ring,
+ struct intel_engine_cs *to,
+ u32 seqno);
+ int (*signal)(struct intel_engine_cs *signaller,
+ /* num_dwords needed by caller */
+ unsigned int num_dwords);
+ } semaphore;
/**
* List of objects currently involved in rendering from the
@@ -153,12 +174,8 @@ struct intel_ring_buffer {
wait_queue_head_t irq_queue;
- /**
- * Do an explicit TLB flush before MI_SET_CONTEXT
- */
- bool itlb_before_ctx_switch;
- struct i915_hw_context *default_context;
- struct i915_hw_context *last_context;
+ struct intel_context *default_context;
+ struct intel_context *last_context;
struct intel_ring_hangcheck hangcheck;
@@ -168,12 +185,13 @@ struct intel_ring_buffer {
volatile u32 *cpu_page;
} scratch;
+ bool needs_cmd_parser;
+
/*
- * Tables of commands the command parser needs to know about
+ * Table of commands the command parser needs to know about
* for this ring.
*/
- const struct drm_i915_cmd_table *cmd_tables;
- int cmd_table_count;
+ DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
/*
* Table of registers allowed in commands that read/write registers.
@@ -202,20 +220,20 @@ struct intel_ring_buffer {
};
static inline bool
-intel_ring_initialized(struct intel_ring_buffer *ring)
+intel_ring_initialized(struct intel_engine_cs *ring)
{
- return ring->obj != NULL;
+ return ring->buffer && ring->buffer->obj;
}
static inline unsigned
-intel_ring_flag(struct intel_ring_buffer *ring)
+intel_ring_flag(struct intel_engine_cs *ring)
{
return 1 << ring->id;
}
static inline u32
-intel_ring_sync_index(struct intel_ring_buffer *ring,
- struct intel_ring_buffer *other)
+intel_ring_sync_index(struct intel_engine_cs *ring,
+ struct intel_engine_cs *other)
{
int idx;
@@ -233,7 +251,7 @@ intel_ring_sync_index(struct intel_ring_buffer *ring,
}
static inline u32
-intel_read_status_page(struct intel_ring_buffer *ring,
+intel_read_status_page(struct intel_engine_cs *ring,
int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
@@ -242,7 +260,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
}
static inline void
-intel_write_status_page(struct intel_ring_buffer *ring,
+intel_write_status_page(struct intel_engine_cs *ring,
int reg, u32 value)
{
ring->status_page.page_addr[reg] = value;
@@ -267,47 +285,51 @@ intel_write_status_page(struct intel_ring_buffer *ring,
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+void intel_stop_ring_buffer(struct intel_engine_cs *ring);
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
-int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
-static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+static inline void intel_ring_emit(struct intel_engine_cs *ring,
u32 data)
{
- iowrite32(data, ring->virtual_start + ring->tail);
- ring->tail += 4;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
+ ringbuf->tail += 4;
}
-static inline void intel_ring_advance(struct intel_ring_buffer *ring)
+static inline void intel_ring_advance(struct intel_engine_cs *ring)
{
- ring->tail &= ring->size - 1;
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+ ringbuf->tail &= ringbuf->size - 1;
}
-void __intel_ring_advance(struct intel_ring_buffer *ring);
+void __intel_ring_advance(struct intel_engine_cs *ring);
-int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
-int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_engine_cs *ring);
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
+int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_bsd2_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
-u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+void intel_ring_setup_status_page(struct intel_engine_cs *ring);
-static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
{
- return ring->tail;
+ return ring->buffer->tail;
}
-static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
return ring->outstanding_lazy_seqno;
}
-static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
ring->trace_irq_seqno = seqno;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 46be00d66df3..20375cc7f82d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1153,20 +1153,21 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
+
if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
/* FIXME: This bit is only valid when using TMDS encoding and 8
* bit per color mode. */
- if (intel_sdvo->has_hdmi_monitor &&
+ if (pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
- intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
- else
- intel_sdvo->color_range = 0;
+ pipe_config->limited_color_range = true;
+ } else {
+ if (pipe_config->has_hdmi_sink &&
+ intel_sdvo->color_range == HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
}
- if (intel_sdvo->color_range)
- pipe_config->limited_color_range = true;
-
/* Clock computation needs to happen after pixel multiplier. */
if (intel_sdvo->is_tv)
i9xx_adjust_sdvo_tv_clock(pipe_config);
@@ -1174,7 +1175,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
return true;
}
-static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
+static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1223,7 +1224,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->has_hdmi_monitor) {
+ if (crtc->config.has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
@@ -1258,8 +1259,8 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
- sdvox |= intel_sdvo->color_range;
+ if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
+ sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
@@ -1349,6 +1350,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
u8 val;
bool ret;
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
/* Some sdvo encoders are not spec compliant and don't
@@ -1377,13 +1380,14 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* other platfroms.
*/
if (IS_I915G(dev) || IS_I915GM(dev)) {
- sdvox = I915_READ(intel_sdvo->sdvo_reg);
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
- dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+ dotclock = pipe_config->port_clock;
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
@@ -1406,6 +1410,15 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
}
+ if (sdvox & HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+ &val, 1)) {
+ if (val == SDVO_ENCODE_HDMI)
+ pipe_config->has_hdmi_sink = true;
+ }
+
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1732,7 +1745,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
@@ -1794,7 +1807,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
struct edid *edid;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
/* set the bus switch and get the modes */
edid = intel_sdvo_get_edid(connector);
@@ -1892,7 +1905,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
int i;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
/* Read the list of supported input resolutions for the selected TV
* format.
@@ -1929,7 +1942,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
struct drm_display_mode *newmode;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, drm_get_connector_name(connector));
+ connector->base.id, connector->name);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -2999,7 +3012,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo;
- intel_encoder->mode_set = intel_sdvo_mode_set;
+ intel_encoder->pre_enable = intel_sdvo_pre_enable;
intel_encoder->enable = intel_enable_sdvo;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
intel_encoder->get_config = intel_sdvo_get_config;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 0954f132726e..01d841ea3140 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -29,12 +29,21 @@
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
* VLV_VLV2_PUNIT_HAS_0.8.docx
*/
+
+/* Standard MMIO read, non-posted */
+#define SB_MRD_NP 0x00
+/* Standard MMIO write, non-posted */
+#define SB_MWR_NP 0x01
+/* Private register read, double-word addressing, non-posted */
+#define SB_CRRDDA_NP 0x06
+/* Private register write, double-word addressing, non-posted */
+#define SB_CRWRDA_NP 0x07
+
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
u32 port, u32 opcode, u32 addr, u32 *val)
{
u32 cmd, be = 0xf, bar = 0;
- bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
- opcode == DPIO_OPCODE_REG_READ);
+ bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
@@ -74,7 +83,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
- PUNIT_OPCODE_REG_READ, addr, &val);
+ SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
@@ -86,7 +95,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
- PUNIT_OPCODE_REG_WRITE, addr, &val);
+ SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -95,7 +104,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
@@ -103,7 +112,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
@@ -114,7 +123,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
- PUNIT_OPCODE_REG_READ, addr, &val);
+ SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
@@ -124,56 +133,56 @@ u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
- PUNIT_OPCODE_REG_READ, reg, &val);
+ SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
- PUNIT_OPCODE_REG_WRITE, reg, &val);
+ SB_CRWRDA_NP, reg, &val);
}
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
@@ -181,14 +190,22 @@ u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
u32 val = 0;
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
- DPIO_OPCODE_REG_READ, reg, &val);
+ SB_MRD_NP, reg, &val);
+
+ /*
+ * FIXME: There might be some registers where all 1's is a valid value,
+ * so ideally we should check the register offset instead...
+ */
+ WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
+ pipe_name(pipe), reg, val);
+
return val;
}
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
- DPIO_OPCODE_REG_WRITE, reg, &val);
+ SB_MWR_NP, reg, &val);
}
/* SBI access */
@@ -253,13 +270,13 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
- DPIO_OPCODE_REG_READ, reg, &val);
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
+ reg, &val);
return val;
}
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
- DPIO_OPCODE_REG_WRITE, reg, &val);
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
+ reg, &val);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 336ae6c602f2..9a17b4e92ef4 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -37,6 +37,106 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
+{
+ /* paranoia */
+ if (!mode->crtc_htotal)
+ return 1;
+
+ return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
+}
+
+static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+{
+ struct drm_device *dev = crtc->base.dev;
+ const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ enum pipe pipe = crtc->pipe;
+ long timeout = msecs_to_jiffies_timeout(1);
+ int scanline, min, max, vblank_start;
+ DEFINE_WAIT(wait);
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
+
+ vblank_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ /* FIXME needs to be calibrated sensibly */
+ min = vblank_start - usecs_to_scanlines(mode, 100);
+ max = vblank_start - 1;
+
+ if (min <= 0 || max <= 0)
+ return false;
+
+ if (WARN_ON(drm_vblank_get(dev, pipe)))
+ return false;
+
+ local_irq_disable();
+
+ trace_i915_pipe_update_start(crtc, min, max);
+
+ for (;;) {
+ /*
+ * prepare_to_wait() has a memory barrier, which guarantees
+ * other CPUs can see the task state update by the time we
+ * read the scanline.
+ */
+ prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE);
+
+ scanline = intel_get_crtc_scanline(crtc);
+ if (scanline < min || scanline > max)
+ break;
+
+ if (timeout <= 0) {
+ DRM_ERROR("Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
+ break;
+ }
+
+ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+ local_irq_disable();
+ }
+
+ finish_wait(&crtc->vbl_wait, &wait);
+
+ drm_vblank_put(dev, pipe);
+
+ *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+
+ trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
+
+ return true;
+}
+
+static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
+{
+ struct drm_device *dev = crtc->base.dev;
+ enum pipe pipe = crtc->pipe;
+ u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+
+ trace_i915_pipe_update_end(crtc, end_vbl_count);
+
+ local_irq_enable();
+
+ if (start_vbl_count != end_vbl_count)
+ DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
+ pipe_name(pipe), start_vbl_count, end_vbl_count);
+}
+
+static void intel_update_primary_plane(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ int reg = DSPCNTR(crtc->plane);
+
+ if (crtc->primary_enabled)
+ I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+ else
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+}
+
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -48,11 +148,14 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ u32 start_vbl_count;
+ bool atomic_update;
sprctl = I915_READ(SPCNTR(pipe, plane));
@@ -131,6 +234,10 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
+
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@@ -143,7 +250,11 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
- POSTING_READ(SPSURF(pipe, plane));
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void
@@ -152,14 +263,25 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
+ u32 start_vbl_count;
+ bool atomic_update;
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
~SP_ENABLE);
/* Activate double buffered register update */
I915_WRITE(SPSURF(pipe, plane), 0);
- POSTING_READ(SPSURF(pipe, plane));
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
}
@@ -226,10 +348,13 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ u32 start_vbl_count;
+ bool atomic_update;
sprctl = I915_READ(SPRCTL(pipe));
@@ -299,6 +424,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
+
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -317,7 +446,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
- POSTING_READ(SPRSURF(pipe));
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void
@@ -326,7 +459,14 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
+ u32 start_vbl_count;
+ bool atomic_update;
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
@@ -334,7 +474,11 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_WRITE(SPRSURF(pipe), 0);
- POSTING_READ(SPRSURF(pipe));
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
/*
* Avoid underruns when disabling the sprite.
@@ -410,10 +554,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ u32 start_vbl_count;
+ bool atomic_update;
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -478,6 +625,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
+
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -491,7 +642,11 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
- POSTING_READ(DVSSURF(pipe));
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void
@@ -500,14 +655,25 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
+ u32 start_vbl_count;
+ bool atomic_update;
+
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
+ intel_update_primary_plane(intel_crtc);
I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
- POSTING_READ(DVSSURF(pipe));
+
+ intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
/*
* Avoid underruns when disabling the sprite.
@@ -519,20 +685,18 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
}
static void
-intel_enable_primary(struct drm_crtc *crtc)
+intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int reg = DSPCNTR(intel_crtc->plane);
- if (intel_crtc->primary_enabled)
- return;
-
- intel_crtc->primary_enabled = true;
-
- I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
/*
* FIXME IPS should be fine as long as one plane is
@@ -540,10 +704,7 @@ intel_enable_primary(struct drm_crtc *crtc)
* when going from primary only to sprite only and vice
* versa.
*/
- if (intel_crtc->config.ips_enabled) {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- hsw_enable_ips(intel_crtc);
- }
+ hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
@@ -551,17 +712,11 @@ intel_enable_primary(struct drm_crtc *crtc)
}
static void
-intel_disable_primary(struct drm_crtc *crtc)
+intel_pre_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int reg = DSPCNTR(intel_crtc->plane);
-
- if (!intel_crtc->primary_enabled)
- return;
-
- intel_crtc->primary_enabled = false;
mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.plane == intel_crtc->plane)
@@ -575,9 +730,6 @@ intel_disable_primary(struct drm_crtc *crtc)
* versa.
*/
hsw_disable_ips(intel_crtc);
-
- I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
}
static int
@@ -671,7 +823,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int ret;
- bool disable_primary = false;
+ bool primary_enabled;
bool visible;
int hscale, vscale;
int max_scale, min_scale;
@@ -842,8 +994,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
- disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
- WARN_ON(disable_primary && !visible && intel_crtc->active);
+ primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
+ WARN_ON(!primary_enabled && !visible && intel_crtc->active);
mutex_lock(&dev->struct_mutex);
@@ -870,12 +1022,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_plane->obj = obj;
if (intel_crtc->active) {
- /*
- * Be sure to re-enable the primary before the sprite is no longer
- * covering it fully.
- */
- if (!disable_primary)
- intel_enable_primary(crtc);
+ bool primary_was_enabled = intel_crtc->primary_enabled;
+
+ intel_crtc->primary_enabled = primary_enabled;
+
+ if (primary_was_enabled != primary_enabled)
+ intel_crtc_wait_for_pending_flips(crtc);
+
+ if (primary_was_enabled && !primary_enabled)
+ intel_pre_disable_primary(crtc);
if (visible)
intel_plane->update_plane(plane, crtc, fb, obj,
@@ -884,8 +1039,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
else
intel_plane->disable_plane(plane, crtc);
- if (disable_primary)
- intel_disable_primary(crtc);
+ if (!primary_was_enabled && primary_enabled)
+ intel_post_enable_primary(crtc);
}
/* Unpin old obj after new one is active to avoid ugliness */
@@ -923,8 +1078,14 @@ intel_disable_plane(struct drm_plane *plane)
intel_crtc = to_intel_crtc(plane->crtc);
if (intel_crtc->active) {
- intel_enable_primary(plane->crtc);
+ bool primary_was_enabled = intel_crtc->primary_enabled;
+
+ intel_crtc->primary_enabled = true;
+
intel_plane->disable_plane(plane, plane->crtc);
+
+ if (!primary_was_enabled && intel_crtc->primary_enabled)
+ intel_post_enable_primary(plane->crtc);
}
if (intel_plane->obj) {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index bafe92e317d5..67c6c9a2eb1c 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -934,7 +934,86 @@ intel_tv_compute_config(struct intel_encoder *encoder,
return true;
}
-static void intel_tv_mode_set(struct intel_encoder *encoder)
+static void
+set_tv_mode_timings(struct drm_i915_private *dev_priv,
+ const struct tv_mode *tv_mode,
+ bool burst_ena)
+{
+ u32 hctl1, hctl2, hctl3;
+ u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
+
+ hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
+ (tv_mode->htotal << TV_HTOTAL_SHIFT);
+
+ hctl2 = (tv_mode->hburst_start << 16) |
+ (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
+
+ if (burst_ena)
+ hctl2 |= TV_BURST_ENA;
+
+ hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
+ (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
+
+ vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
+ (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
+ (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
+
+ vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
+ (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
+ (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
+
+ vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
+ (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
+ (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
+
+ if (tv_mode->veq_ena)
+ vctl3 |= TV_EQUAL_ENA;
+
+ vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
+ (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
+
+ vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
+ (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
+
+ vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
+ (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
+
+ vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
+ (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+
+ I915_WRITE(TV_H_CTL_1, hctl1);
+ I915_WRITE(TV_H_CTL_2, hctl2);
+ I915_WRITE(TV_H_CTL_3, hctl3);
+ I915_WRITE(TV_V_CTL_1, vctl1);
+ I915_WRITE(TV_V_CTL_2, vctl2);
+ I915_WRITE(TV_V_CTL_3, vctl3);
+ I915_WRITE(TV_V_CTL_4, vctl4);
+ I915_WRITE(TV_V_CTL_5, vctl5);
+ I915_WRITE(TV_V_CTL_6, vctl6);
+ I915_WRITE(TV_V_CTL_7, vctl7);
+}
+
+static void set_color_conversion(struct drm_i915_private *dev_priv,
+ const struct color_conversion *color_conversion)
+{
+ if (!color_conversion)
+ return;
+
+ I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
+ color_conversion->gy);
+ I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
+ color_conversion->ay);
+ I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
+ color_conversion->gu);
+ I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
+ color_conversion->au);
+ I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
+ color_conversion->gv);
+ I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
+ color_conversion->av);
+}
+
+static void intel_tv_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -942,14 +1021,13 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
u32 tv_ctl;
- u32 hctl1, hctl2, hctl3;
- u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
u32 scctl1, scctl2, scctl3;
int i, j;
const struct video_levels *video_levels;
const struct color_conversion *color_conversion;
bool burst_ena;
- int pipe = intel_crtc->pipe;
+ int xpos = 0x0, ypos = 0x0;
+ unsigned int xsize, ysize;
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
@@ -982,44 +1060,6 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
burst_ena = tv_mode->burst_ena;
break;
}
- hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
- (tv_mode->htotal << TV_HTOTAL_SHIFT);
-
- hctl2 = (tv_mode->hburst_start << 16) |
- (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
-
- if (burst_ena)
- hctl2 |= TV_BURST_ENA;
-
- hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
- (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
-
- vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
- (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
- (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
-
- vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
- (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
- (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
-
- vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
- (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
- (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
-
- if (tv_mode->veq_ena)
- vctl3 |= TV_EQUAL_ENA;
-
- vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
- (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
-
- vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
- (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
-
- vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
- (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
-
- vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
- (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
if (intel_crtc->pipe == 1)
tv_ctl |= TV_ENC_PIPEB_SELECT;
@@ -1051,37 +1091,16 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (dev->pdev->device < 0x2772)
+ if (IS_I915GM(dev))
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
- I915_WRITE(TV_H_CTL_1, hctl1);
- I915_WRITE(TV_H_CTL_2, hctl2);
- I915_WRITE(TV_H_CTL_3, hctl3);
- I915_WRITE(TV_V_CTL_1, vctl1);
- I915_WRITE(TV_V_CTL_2, vctl2);
- I915_WRITE(TV_V_CTL_3, vctl3);
- I915_WRITE(TV_V_CTL_4, vctl4);
- I915_WRITE(TV_V_CTL_5, vctl5);
- I915_WRITE(TV_V_CTL_6, vctl6);
- I915_WRITE(TV_V_CTL_7, vctl7);
+ set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
+
I915_WRITE(TV_SC_CTL_1, scctl1);
I915_WRITE(TV_SC_CTL_2, scctl2);
I915_WRITE(TV_SC_CTL_3, scctl3);
- if (color_conversion) {
- I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
- color_conversion->gy);
- I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
- color_conversion->ay);
- I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
- color_conversion->gu);
- I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
- color_conversion->au);
- I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
- color_conversion->gv);
- I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
- color_conversion->av);
- }
+ set_color_conversion(dev_priv, color_conversion);
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
@@ -1092,46 +1111,25 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
I915_WRITE(TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
- {
- int pipeconf_reg = PIPECONF(pipe);
- int dspcntr_reg = DSPCNTR(intel_crtc->plane);
- int pipeconf = I915_READ(pipeconf_reg);
- int dspcntr = I915_READ(dspcntr_reg);
- int xpos = 0x0, ypos = 0x0;
- unsigned int xsize, ysize;
- /* Pipe must be off here */
- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- /* Wait for vblank for the disable to take effect */
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_pipe_off(dev, intel_crtc->pipe);
-
- /* Filter ctl must be set before TV_WIN_SIZE */
- I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
- xsize = tv_mode->hblank_start - tv_mode->hblank_end;
- if (tv_mode->progressive)
- ysize = tv_mode->nbr_end + 1;
- else
- ysize = 2*tv_mode->nbr_end + 1;
-
- xpos += intel_tv->margin[TV_MARGIN_LEFT];
- ypos += intel_tv->margin[TV_MARGIN_TOP];
- xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
- intel_tv->margin[TV_MARGIN_RIGHT]);
- ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
- intel_tv->margin[TV_MARGIN_BOTTOM]);
- I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
- I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
-
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_WRITE(dspcntr_reg, dspcntr);
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
- }
+
+ assert_pipe_disabled(dev_priv, intel_crtc->pipe);
+
+ /* Filter ctl must be set before TV_WIN_SIZE */
+ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+ xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+ if (tv_mode->progressive)
+ ysize = tv_mode->nbr_end + 1;
+ else
+ ysize = 2*tv_mode->nbr_end + 1;
+
+ xpos += intel_tv->margin[TV_MARGIN_LEFT];
+ ypos += intel_tv->margin[TV_MARGIN_TOP];
+ xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
+ I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
+ I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
j = 0;
for (i = 0; i < 60; i++)
@@ -1316,17 +1314,18 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, drm_get_connector_name(connector),
+ connector->base.id, connector->name,
force);
mode = reported_modes[0];
if (force) {
struct intel_load_detect_pipe tmp;
+ struct drm_modeset_acquire_ctx ctx;
- if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
+ if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(connector, &tmp);
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
} else
return connector_status_unknown;
} else
@@ -1634,7 +1633,7 @@ intel_tv_init(struct drm_device *dev)
intel_encoder->compute_config = intel_tv_compute_config;
intel_encoder->get_config = intel_tv_get_config;
- intel_encoder->mode_set = intel_tv_mode_set;
+ intel_encoder->pre_enable = intel_tv_pre_enable;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;
intel_encoder->get_hw_state = intel_tv_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d0c75779d3f6..4f6fef7ac069 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -255,8 +255,7 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
}
-void vlv_force_wake_get(struct drm_i915_private *dev_priv,
- int fw_engine)
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
@@ -275,8 +274,7 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void vlv_force_wake_put(struct drm_i915_private *dev_priv,
- int fw_engine)
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
@@ -322,7 +320,8 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- del_timer_sync(&dev_priv->uncore.force_wake_timer);
+ if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
+ gen6_force_wake_timer((unsigned long)dev_priv);
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
@@ -374,7 +373,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- if (IS_HASWELL(dev) &&
+ if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
@@ -395,26 +394,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
void intel_uncore_sanitize(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg_val;
-
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
-
- /* Turn off power gate, require especially for the BIOS less system */
- if (IS_VALLEYVIEW(dev)) {
-
- mutex_lock(&dev_priv->rps.hw_lock);
- reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
-
- if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) |
- PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) |
- PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D)))
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- }
}
/*
@@ -488,6 +469,17 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((reg) < 0x40000 && (reg) != FORCEWAKE)
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+ (((reg) >= 0x2000 && (reg) < 0x4000) ||\
+ ((reg) >= 0x5000 && (reg) < 0x8000) ||\
+ ((reg) >= 0xB000 && (reg) < 0x12000) ||\
+ ((reg) >= 0x2E000 && (reg) < 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
+ (((reg) >= 0x12000 && (reg) < 0x14000) ||\
+ ((reg) >= 0x22000 && (reg) < 0x24000) ||\
+ ((reg) >= 0x30000 && (reg) < 0x40000))
+
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -854,12 +846,15 @@ void intel_uncore_fini(struct drm_device *dev)
intel_uncore_forcewake_reset(dev, false);
}
+#define GEN_RANGE(l, h) GENMASK(h, l)
+
static const struct register_whitelist {
uint64_t offset;
uint32_t size;
- uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+ /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+ uint32_t gen_bitmask;
} whitelist[] = {
- { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
};
int i915_reg_read_ioctl(struct drm_device *dev,
@@ -911,7 +906,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs;
- struct i915_hw_context *ctx;
+ struct intel_context *ctx;
int ret;
if (args->flags || args->pad)
@@ -955,6 +950,9 @@ static int i965_do_reset(struct drm_device *dev)
{
int ret;
+ /* FIXME: i965g/gm need a display save/restore for gpu reset. */
+ return -ENODEV;
+
/*
* Set the domains we want to reset (GRDOM/bits 2 and 3) as
* well as the reset bit (GR/bit 0). Setting the GR bit
@@ -966,7 +964,6 @@ static int i965_do_reset(struct drm_device *dev)
if (ret)
return ret;
- /* We can't reset render&media without also resetting display ... */
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
@@ -979,26 +976,58 @@ static int i965_do_reset(struct drm_device *dev)
return 0;
}
+static int g4x_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+ I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ(VDECCLK_GATE_D);
+
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+ I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ(VDECCLK_GATE_D);
+
+ pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+
+ return 0;
+}
+
static int ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 gdrst;
int ret;
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
+ ILK_GRDOM_RESET_ENABLE) == 0, 500);
if (ret)
return ret;
- /* We can't reset render&media without also resetting display ... */
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
+ ILK_GRDOM_RESET_ENABLE) == 0, 500);
+ if (ret)
+ return ret;
+
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
+
+ return 0;
}
static int gen6_do_reset(struct drm_device *dev)
@@ -1029,7 +1058,11 @@ int intel_gpu_reset(struct drm_device *dev)
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
- case 4: return i965_do_reset(dev);
+ case 4:
+ if (IS_G4X(dev))
+ return g4x_do_reset(dev);
+ else
+ return i965_do_reset(dev);
default: return -ENODEV;
}
}
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 86b4bb804852..729bfd56b55f 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -214,7 +214,7 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 314685b7f41f..792f924496fc 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1020,7 +1020,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
switch (param->param) {
case MGA_PARAM_IRQ_NR:
- value = drm_dev_to_irq(dev);
+ value = dev->pdev->irq;
break;
case MGA_PARAM_CARD_TYPE:
value = dev_priv->chipset;
@@ -1099,4 +1099,4 @@ const struct drm_ioctl_desc mga_ioctls[] = {
DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
-int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
+int mga_max_ioctl = ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 26868e5c55b0..f6b283b8375e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -322,17 +322,13 @@ static void mgag200_bo_unref(struct mgag200_bo **bo)
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
-
+ *bo = NULL;
}
void mgag200_gem_free_object(struct drm_gem_object *obj)
{
struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
- if (!mgag200_bo)
- return;
mgag200_bo_unref(&mgag200_bo);
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index b6984971ce0c..f12388967856 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,7 +3,7 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on MSM_IOMMU
- depends on ARCH_MSM8960 || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || (ARM && COMPILE_TEST)
select DRM_KMS_HELPER
select SHMEM
select TMPFS
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 5e1e6b0cd8ac..93ca49c8df44 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -34,6 +34,8 @@ msm-y := \
msm_gem_submit.o \
msm_gpu.o \
msm_iommu.o \
+ msm_perf.o \
+ msm_rd.o \
msm_ringbuffer.o
msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index f20fbde5dc49..942e09d898a8 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -207,11 +207,11 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Turn on performance counters: */
gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
- /* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS
- * we will use this to augment our hang detection:
- */
- gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT,
- SP_FS_FULL_ALU_INSTRUCTIONS);
+ /* Enable the perfcntrs that we use.. */
+ for (i = 0; i < gpu->num_perfcntrs; i++) {
+ const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
+ gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val);
+ }
gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
@@ -465,6 +465,13 @@ static const struct adreno_gpu_funcs funcs = {
},
};
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+ { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
+ SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
+ { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO,
+ SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
+};
+
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
{
struct a3xx_gpu *a3xx_gpu = NULL;
@@ -504,6 +511,9 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index ae750f6928c1..7f7aadef8a82 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -277,6 +277,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+ static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
config.phy_init = hdmi_phy_8x74_init;
@@ -286,6 +287,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.pwr_reg_names = pwr_reg_names;
config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
config.hpd_clk_names = hpd_clk_names;
+ config.hpd_freq = hpd_clk_freq;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.pwr_clk_names = pwr_clk_names;
config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 9fafee6a3e43..9d7723c6528a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -87,6 +87,7 @@ struct hdmi_platform_config {
/* clks that need to be on for hpd: */
const char **hpd_clk_names;
+ const long unsigned *hpd_freq;
int hpd_clk_cnt;
/* clks that need to be on for screen pwr (ie pixel clk): */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 7dedfdd12075..28f7e3ec6c28 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -127,6 +127,14 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ dev_warn(dev->dev, "failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
if (ret) {
dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
@@ -247,36 +255,49 @@ void hdmi_connector_irq(struct drm_connector *connector)
}
}
+static enum drm_connector_status detect_reg(struct hdmi *hdmi)
+{
+ uint32_t hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ return gpio_get_value(config->hpd_gpio) ?
+ connector_status_connected :
+ connector_status_disconnected;
+}
+
static enum drm_connector_status hdmi_connector_detect(
struct drm_connector *connector, bool force)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- uint32_t hpd_int_status;
+ enum drm_connector_status stat_gpio, stat_reg;
int retry = 20;
- hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ do {
+ stat_gpio = detect_gpio(hdmi);
+ stat_reg = detect_reg(hdmi);
- /* sense seems to in some cases be momentarily de-asserted, don't
- * let that trick us into thinking the monitor is gone:
- */
- while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
- /* hdmi debounce logic seems to get stuck sometimes,
- * read directly the gpio to get a second opinion:
- */
- if (gpio_get_value(config->hpd_gpio)) {
- DBG("gpio tells us we are connected!");
- hpd_int_status |= HDMI_HPD_INT_STATUS_CABLE_DETECTED;
+ if (stat_gpio == stat_reg)
break;
- }
+
mdelay(10);
- hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
- DBG("status=%08x", hpd_int_status);
+ } while (--retry);
+
+ /* the status we get from reading gpio seems to be more reliable,
+ * so trust that one the most if we didn't manage to get hdmi and
+ * gpio status to agree:
+ */
+ if (stat_gpio != stat_reg) {
+ DBG("HDMI_HPD_INT_STATUS tells us: %d", stat_reg);
+ DBG("hpd gpio tells us: %d", stat_gpio);
}
- return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
- connector_status_connected : connector_status_disconnected;
+ return stat_gpio;
}
static void hdmi_connector_destroy(struct drm_connector *connector)
@@ -389,7 +410,8 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
- connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index ef9957dbac94..74cebb51e8c2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -217,8 +217,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
-
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 6ea10bdb6e8f..ebe2e60f3ab1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -195,8 +195,6 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane);
-
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index ee8446c1b5f6..71510ee26e96 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -20,6 +20,10 @@
#include "msm_mmu.h"
#include "mdp5_kms.h"
+static const char *iommu_ports[] = {
+ "mdp_0",
+};
+
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
static int mdp5_hw_init(struct msm_kms *kms)
@@ -104,6 +108,12 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct msm_mmu *mmu = mdp5_kms->mmu;
+
+ if (mmu) {
+ mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+ mmu->funcs->destroy(mmu);
+ }
kfree(mdp5_kms);
}
@@ -216,10 +226,6 @@ fail:
return ret;
}
-static const char *iommu_ports[] = {
- "mdp_0",
-};
-
static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name)
{
@@ -280,12 +286,22 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
- ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") ||
- get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") ||
- get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") ||
- get_clk(pdev, &mdp5_kms->core_clk, "core_clk") ||
- get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") ||
- get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
if (ret)
goto fail;
@@ -307,17 +323,23 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mmu = msm_iommu_new(dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
+ dev_err(dev->dev, "failed to init iommu: %d\n", ret);
goto fail;
}
+
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
- if (ret)
+ if (ret) {
+ dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
+ mmu->funcs->destroy(mmu);
goto fail;
+ }
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
mmu = NULL;
}
+ mdp5_kms->mmu = mmu;
mdp5_kms->id = msm_register_mmu(dev, mmu);
if (mdp5_kms->id < 0) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index c8b1a2522c25..6e981b692d1d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -33,6 +33,7 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
+ struct msm_mmu *mmu;
/* for tracking smp allocation amongst pipes: */
mdp5_smp_state_t smp_state;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 47f7bbb9c15a..f3daec4412ad 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -85,8 +85,11 @@ static int mdp5_plane_disable(struct drm_plane *plane)
static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct msm_drm_private *priv = plane->dev->dev_private;
+
+ if (priv->kms)
+ mdp5_plane_disable(plane);
- mdp5_plane_disable(plane);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f9de156b9e65..9a5d87db5c23 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -159,7 +159,7 @@ static int msm_unload(struct drm_device *dev)
static int get_mdp_ver(struct platform_device *pdev)
{
#ifdef CONFIG_OF
- const static struct of_device_id match_types[] = { {
+ static const struct of_device_id match_types[] = { {
.compatible = "qcom,mdss_mdp",
.data = (void *)5,
}, {
@@ -220,7 +220,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
* is bogus, but non-null if allocation succeeded:
*/
p = dma_alloc_attrs(dev->dev, size,
- &priv->vram.paddr, 0, &attrs);
+ &priv->vram.paddr, GFP_KERNEL, &attrs);
if (!p) {
dev_err(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
@@ -288,7 +288,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
}
pm_runtime_get_sync(dev->dev);
- ret = drm_irq_install(dev);
+ ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
pm_runtime_put_sync(dev->dev);
if (ret < 0) {
dev_err(dev->dev, "failed to install IRQ handler\n");
@@ -299,6 +299,10 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
priv->fbdev = msm_fbdev_init(dev);
#endif
+ ret = msm_debugfs_late_init(dev);
+ if (ret)
+ goto fail;
+
drm_kms_helper_poll_init(dev);
return 0;
@@ -382,11 +386,8 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
static void msm_lastclose(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- if (priv->fbdev) {
- drm_modeset_lock_all(dev);
- drm_fb_helper_restore_fbdev_mode(priv->fbdev);
- drm_modeset_unlock_all(dev);
- }
+ if (priv->fbdev)
+ drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
}
static irqreturn_t msm_irq(int irq, void *arg)
@@ -531,6 +532,41 @@ static struct drm_info_list msm_debugfs_list[] = {
{ "fb", show_locked, 0, msm_fb_show },
};
+static int late_init_minor(struct drm_minor *minor)
+{
+ int ret;
+
+ if (!minor)
+ return 0;
+
+ ret = msm_rd_debugfs_init(minor);
+ if (ret) {
+ dev_err(minor->dev->dev, "could not install rd debugfs\n");
+ return ret;
+ }
+
+ ret = msm_perf_debugfs_init(minor);
+ if (ret) {
+ dev_err(minor->dev->dev, "could not install perf debugfs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int msm_debugfs_late_init(struct drm_device *dev)
+{
+ int ret;
+ ret = late_init_minor(dev->primary);
+ if (ret)
+ return ret;
+ ret = late_init_minor(dev->render);
+ if (ret)
+ return ret;
+ ret = late_init_minor(dev->control);
+ return ret;
+}
+
static int msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
@@ -545,13 +581,17 @@ static int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
- return ret;
+ return 0;
}
static void msm_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(msm_debugfs_list,
ARRAY_SIZE(msm_debugfs_list), minor);
+ if (!minor->dev->dev_private)
+ return;
+ msm_rd_debugfs_cleanup(minor);
+ msm_perf_debugfs_cleanup(minor);
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d10ee0b5aac..8a2c5fd0893e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -33,7 +33,7 @@
#include <asm/sizes.h>
-#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_MSM)
+#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_QCOM)
/* stubs we need for compile-test: */
static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
{
@@ -55,6 +55,9 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
+struct msm_rd_state;
+struct msm_perf_state;
+struct msm_gem_submit;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
@@ -82,6 +85,9 @@ struct msm_drm_private {
uint32_t next_fence, completed_fence;
wait_queue_head_t fence_event;
+ struct msm_rd_state *rd;
+ struct msm_perf_state *perf;
+
/* list of GEM objects: */
struct list_head inactive_list;
@@ -204,6 +210,15 @@ void __exit hdmi_unregister(void);
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+int msm_debugfs_late_init(struct drm_device *dev);
+int msm_rd_debugfs_init(struct drm_minor *minor);
+void msm_rd_debugfs_cleanup(struct drm_minor *minor);
+void msm_rd_dump_submit(struct msm_gem_submit *submit);
+int msm_perf_debugfs_init(struct drm_minor *minor);
+void msm_perf_debugfs_cleanup(struct drm_minor *minor);
+#else
+static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
+static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
#endif
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index a752ab83b810..5107fc4826bc 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -59,7 +59,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- dma_addr_t paddr;
+ uint32_t paddr;
int ret, size;
sizes->surface_bpp = 32;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index bb8026daebc9..690d7e7b6d1e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -278,6 +278,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
int ret = 0;
if (!msm_obj->domain[id].iova) {
@@ -285,6 +286,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
struct msm_mmu *mmu = priv->mmus[id];
struct page **pages = get_pages(obj);
+ if (!mmu) {
+ dev_err(dev->dev, "null MMU pointer\n");
+ return -EINVAL;
+ }
+
if (IS_ERR(pages))
return PTR_ERR(pages);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 3246bb46c4f2..bfb052688f8e 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -90,6 +90,7 @@ struct msm_gem_submit {
uint32_t type;
uint32_t size; /* in dwords */
uint32_t iova;
+ uint32_t idx; /* cmdstream buffer idx in bos[] */
} cmd[MAX_CMDS];
struct {
uint32_t flags;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1f1f4cffdaed..cd0554f68316 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -402,6 +402,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->cmd[i].type = submit_cmd.type;
submit->cmd[i].size = submit_cmd.size / 4;
submit->cmd[i].iova = iova + submit_cmd.submit_offset;
+ submit->cmd[i].idx = submit_cmd.submit_idx;
if (submit->valid)
continue;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3e667ca1f2b9..c6322197db8c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -320,6 +320,101 @@ static void hangcheck_handler(unsigned long data)
}
/*
+ * Performance Counters:
+ */
+
+/* called under perf_lock */
+static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
+{
+ uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
+ int i, n = min(ncntrs, gpu->num_perfcntrs);
+
+ /* read current values: */
+ for (i = 0; i < gpu->num_perfcntrs; i++)
+ current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
+
+ /* update cntrs: */
+ for (i = 0; i < n; i++)
+ cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
+
+ /* save current values: */
+ for (i = 0; i < gpu->num_perfcntrs; i++)
+ gpu->last_cntrs[i] = current_cntrs[i];
+
+ return n;
+}
+
+static void update_sw_cntrs(struct msm_gpu *gpu)
+{
+ ktime_t time;
+ uint32_t elapsed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpu->perf_lock, flags);
+ if (!gpu->perfcntr_active)
+ goto out;
+
+ time = ktime_get();
+ elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
+
+ gpu->totaltime += elapsed;
+ if (gpu->last_sample.active)
+ gpu->activetime += elapsed;
+
+ gpu->last_sample.active = msm_gpu_active(gpu);
+ gpu->last_sample.time = time;
+
+out:
+ spin_unlock_irqrestore(&gpu->perf_lock, flags);
+}
+
+void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpu->perf_lock, flags);
+ /* we could dynamically enable/disable perfcntr registers too.. */
+ gpu->last_sample.active = msm_gpu_active(gpu);
+ gpu->last_sample.time = ktime_get();
+ gpu->activetime = gpu->totaltime = 0;
+ gpu->perfcntr_active = true;
+ update_hw_cntrs(gpu, 0, NULL);
+ spin_unlock_irqrestore(&gpu->perf_lock, flags);
+}
+
+void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
+{
+ gpu->perfcntr_active = false;
+}
+
+/* returns -errno or # of cntrs sampled */
+int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
+ uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&gpu->perf_lock, flags);
+
+ if (!gpu->perfcntr_active) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *activetime = gpu->activetime;
+ *totaltime = gpu->totaltime;
+
+ gpu->activetime = gpu->totaltime = 0;
+
+ ret = update_hw_cntrs(gpu, ncntrs, cntrs);
+
+out:
+ spin_unlock_irqrestore(&gpu->perf_lock, flags);
+
+ return ret;
+}
+
+/*
* Cmdstream submission/retirement:
*/
@@ -361,6 +456,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
queue_work(priv->wq, &gpu->retire_work);
+ update_sw_cntrs(gpu);
}
/* add bo's to gpu's ring, and kick gpu: */
@@ -377,6 +473,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
inactive_cancel(gpu);
+ msm_rd_dump_submit(submit);
+
+ gpu->submitted_fence = submit->fence;
+
+ update_sw_cntrs(gpu);
+
ret = gpu->funcs->submit(gpu, submit, ctx);
priv->lastctx = ctx;
@@ -429,6 +531,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct iommu_domain *iommu;
int i, ret;
+ if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
+ gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
+
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
@@ -444,6 +549,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
+ spin_lock_init(&gpu->perf_lock);
+
BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
/* Map registers: */
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index fad27008922f..9b579b792840 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -25,6 +25,7 @@
#include "msm_ringbuffer.h"
struct msm_gem_submit;
+struct msm_gpu_perfcntr;
/* So far, with hardware that I've seen to date, we can have:
* + zero, one, or two z180 2d cores
@@ -64,6 +65,18 @@ struct msm_gpu {
struct drm_device *dev;
const struct msm_gpu_funcs *funcs;
+ /* performance counters (hw & sw): */
+ spinlock_t perf_lock;
+ bool perfcntr_active;
+ struct {
+ bool active;
+ ktime_t time;
+ } last_sample;
+ uint32_t totaltime, activetime; /* sw counters */
+ uint32_t last_cntrs[5]; /* hw counters */
+ const struct msm_gpu_perfcntr *perfcntrs;
+ uint32_t num_perfcntrs;
+
struct msm_ringbuffer *rb;
uint32_t rb_iova;
@@ -113,6 +126,19 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
}
+/* Perf-Counters:
+ * The select_reg and select_val are just there for the benefit of the child
+ * class that actually enables the perf counter.. but msm_gpu base class
+ * will handle sampling/displaying the counters.
+ */
+
+struct msm_gpu_perfcntr {
+ uint32_t select_reg;
+ uint32_t sample_reg;
+ uint32_t select_val;
+ const char *name;
+};
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
@@ -126,6 +152,11 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
+void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
+int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
+ uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
+
void msm_gpu_retire(struct msm_gpu *gpu);
int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 92b745986231..4b2ad9181edf 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -28,7 +28,7 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
unsigned long iova, int flags, void *arg)
{
DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
- return 0;
+ return -ENOSYS;
}
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
@@ -40,8 +40,10 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
for (i = 0; i < cnt; i++) {
struct device *msm_iommu_get_ctx(const char *ctx_name);
struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (IS_ERR_OR_NULL(ctx))
+ if (IS_ERR_OR_NULL(ctx)) {
+ dev_warn(dev->dev, "couldn't get %s context", names[i]);
continue;
+ }
ret = iommu_attach_device(iommu->domain, ctx);
if (ret) {
dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
@@ -52,6 +54,20 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
return 0;
}
+static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+ iommu_detach_device(iommu->domain, ctx);
+ }
+}
+
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
@@ -110,7 +126,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
- BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+ BUG_ON(!PAGE_ALIGNED(bytes));
da += bytes;
}
@@ -127,6 +143,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
static const struct msm_mmu_funcs funcs = {
.attach = msm_iommu_attach,
+ .detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 030324482b4a..21da6d154f71 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -22,6 +22,7 @@
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+ void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
new file mode 100644
index 000000000000..830857c47c86
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* For profiling, userspace can:
+ *
+ * tail -f /sys/kernel/debug/dri/<minor>/gpu
+ *
+ * This will enable performance counters/profiling to track the busy time
+ * and any gpu specific performance counters that are supported.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+
+struct msm_perf_state {
+ struct drm_device *dev;
+
+ bool open;
+ int cnt;
+ struct mutex read_lock;
+
+ char buf[256];
+ int buftot, bufpos;
+
+ unsigned long next_jiffies;
+
+ struct dentry *ent;
+ struct drm_info_node *node;
+};
+
+#define SAMPLE_TIME (HZ/4)
+
+/* wait for next sample time: */
+static int wait_sample(struct msm_perf_state *perf)
+{
+ unsigned long start_jiffies = jiffies;
+
+ if (time_after(perf->next_jiffies, start_jiffies)) {
+ unsigned long remaining_jiffies =
+ perf->next_jiffies - start_jiffies;
+ int ret = schedule_timeout_interruptible(remaining_jiffies);
+ if (ret > 0) {
+ /* interrupted */
+ return -ERESTARTSYS;
+ }
+ }
+ perf->next_jiffies += SAMPLE_TIME;
+ return 0;
+}
+
+static int refill_buf(struct msm_perf_state *perf)
+{
+ struct msm_drm_private *priv = perf->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ char *ptr = perf->buf;
+ int rem = sizeof(perf->buf);
+ int i, n;
+
+ if ((perf->cnt++ % 32) == 0) {
+ /* Header line: */
+ n = snprintf(ptr, rem, "%%BUSY");
+ ptr += n;
+ rem -= n;
+
+ for (i = 0; i < gpu->num_perfcntrs; i++) {
+ const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
+ n = snprintf(ptr, rem, "\t%s", perfcntr->name);
+ ptr += n;
+ rem -= n;
+ }
+ } else {
+ /* Sample line: */
+ uint32_t activetime = 0, totaltime = 0;
+ uint32_t cntrs[5];
+ uint32_t val;
+ int ret;
+
+ /* sleep until next sample time: */
+ ret = wait_sample(perf);
+ if (ret)
+ return ret;
+
+ ret = msm_gpu_perfcntr_sample(gpu, &activetime, &totaltime,
+ ARRAY_SIZE(cntrs), cntrs);
+ if (ret < 0)
+ return ret;
+
+ val = totaltime ? 1000 * activetime / totaltime : 0;
+ n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
+ ptr += n;
+ rem -= n;
+
+ for (i = 0; i < ret; i++) {
+ /* cycle counters (I think).. convert to MHz.. */
+ val = cntrs[i] / 10000;
+ n = snprintf(ptr, rem, "\t%5d.%02d",
+ val / 100, val % 100);
+ ptr += n;
+ rem -= n;
+ }
+ }
+
+ n = snprintf(ptr, rem, "\n");
+ ptr += n;
+ rem -= n;
+
+ perf->bufpos = 0;
+ perf->buftot = ptr - perf->buf;
+
+ return 0;
+}
+
+static ssize_t perf_read(struct file *file, char __user *buf,
+ size_t sz, loff_t *ppos)
+{
+ struct msm_perf_state *perf = file->private_data;
+ int n = 0, ret;
+
+ mutex_lock(&perf->read_lock);
+
+ if (perf->bufpos >= perf->buftot) {
+ ret = refill_buf(perf);
+ if (ret)
+ goto out;
+ }
+
+ n = min((int)sz, perf->buftot - perf->bufpos);
+ ret = copy_to_user(buf, &perf->buf[perf->bufpos], n);
+ if (ret)
+ goto out;
+
+ perf->bufpos += n;
+ *ppos += n;
+
+out:
+ mutex_unlock(&perf->read_lock);
+ if (ret)
+ return ret;
+ return n;
+}
+
+static int perf_open(struct inode *inode, struct file *file)
+{
+ struct msm_perf_state *perf = inode->i_private;
+ struct drm_device *dev = perf->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (perf->open || !gpu) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ file->private_data = perf;
+ perf->open = true;
+ perf->cnt = 0;
+ perf->buftot = 0;
+ perf->bufpos = 0;
+ msm_gpu_perfcntr_start(gpu);
+ perf->next_jiffies = jiffies + SAMPLE_TIME;
+
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static int perf_release(struct inode *inode, struct file *file)
+{
+ struct msm_perf_state *perf = inode->i_private;
+ struct msm_drm_private *priv = perf->dev->dev_private;
+ msm_gpu_perfcntr_stop(priv->gpu);
+ perf->open = false;
+ return 0;
+}
+
+
+static const struct file_operations perf_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = perf_open,
+ .read = perf_read,
+ .llseek = no_llseek,
+ .release = perf_release,
+};
+
+int msm_perf_debugfs_init(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_perf_state *perf;
+
+ /* only create on first minor: */
+ if (priv->perf)
+ return 0;
+
+ perf = kzalloc(sizeof(*perf), GFP_KERNEL);
+ if (!perf)
+ return -ENOMEM;
+
+ perf->dev = minor->dev;
+
+ mutex_init(&perf->read_lock);
+ priv->perf = perf;
+
+ perf->node = kzalloc(sizeof(*perf->node), GFP_KERNEL);
+ if (!perf->node)
+ goto fail;
+
+ perf->ent = debugfs_create_file("perf", S_IFREG | S_IRUGO,
+ minor->debugfs_root, perf, &perf_debugfs_fops);
+ if (!perf->ent) {
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/perf\n",
+ minor->debugfs_root->d_name.name);
+ goto fail;
+ }
+
+ perf->node->minor = minor;
+ perf->node->dent = perf->ent;
+ perf->node->info_ent = NULL;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&perf->node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+
+fail:
+ msm_perf_debugfs_cleanup(minor);
+ return -1;
+}
+
+void msm_perf_debugfs_cleanup(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_perf_state *perf = priv->perf;
+
+ if (!perf)
+ return;
+
+ priv->perf = NULL;
+
+ debugfs_remove(perf->ent);
+
+ if (perf->node) {
+ mutex_lock(&minor->debugfs_lock);
+ list_del(&perf->node->list);
+ mutex_unlock(&minor->debugfs_lock);
+ kfree(perf->node);
+ }
+
+ mutex_destroy(&perf->read_lock);
+
+ kfree(perf);
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
new file mode 100644
index 000000000000..9a78c48817c6
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* For debugging crashes, userspace can:
+ *
+ * tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
+ *
+ * To log the cmdstream in a format that is understood by freedreno/cffdump
+ * utility. By comparing the last successfully completed fence #, to the
+ * cmdstream for the next fence, you can narrow down which process and submit
+ * caused the gpu crash/lockup.
+ *
+ * This bypasses drm_debugfs_create_files() mainly because we need to use
+ * our own fops for a bit more control. In particular, we don't want to
+ * do anything if userspace doesn't have the debugfs file open.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/circ_buf.h>
+#include <linux/wait.h>
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+#include "msm_gem.h"
+
+enum rd_sect_type {
+ RD_NONE,
+ RD_TEST, /* ascii text */
+ RD_CMD, /* ascii text */
+ RD_GPUADDR, /* u32 gpuaddr, u32 size */
+ RD_CONTEXT, /* raw dump */
+ RD_CMDSTREAM, /* raw dump */
+ RD_CMDSTREAM_ADDR, /* gpu addr of cmdstream */
+ RD_PARAM, /* u32 param_type, u32 param_val, u32 bitlen */
+ RD_FLUSH, /* empty, clear previous params */
+ RD_PROGRAM, /* shader program, raw dump */
+ RD_VERT_SHADER,
+ RD_FRAG_SHADER,
+ RD_BUFFER_CONTENTS,
+ RD_GPU_ID,
+};
+
+#define BUF_SZ 512 /* should be power of 2 */
+
+/* space used: */
+#define circ_count(circ) \
+ (CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
+#define circ_count_to_end(circ) \
+ (CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
+/* space available: */
+#define circ_space(circ) \
+ (CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
+#define circ_space_to_end(circ) \
+ (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
+
+struct msm_rd_state {
+ struct drm_device *dev;
+
+ bool open;
+
+ struct dentry *ent;
+ struct drm_info_node *node;
+
+ /* current submit to read out: */
+ struct msm_gem_submit *submit;
+
+ /* fifo access is synchronized on the producer side by
+ * struct_mutex held by submit code (otherwise we could
+ * end up w/ cmds logged in different order than they
+ * were executed). And read_lock synchronizes the reads
+ */
+ struct mutex read_lock;
+
+ wait_queue_head_t fifo_event;
+ struct circ_buf fifo;
+
+ char buf[BUF_SZ];
+};
+
+static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
+{
+ struct circ_buf *fifo = &rd->fifo;
+ const char *ptr = buf;
+
+ while (sz > 0) {
+ char *fptr = &fifo->buf[fifo->head];
+ int n;
+
+ wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+
+ n = min(sz, circ_space_to_end(&rd->fifo));
+ memcpy(fptr, ptr, n);
+
+ fifo->head = (fifo->head + n) & (BUF_SZ - 1);
+ sz -= n;
+ ptr += n;
+
+ wake_up_all(&rd->fifo_event);
+ }
+}
+
+static void rd_write_section(struct msm_rd_state *rd,
+ enum rd_sect_type type, const void *buf, int sz)
+{
+ rd_write(rd, &type, 4);
+ rd_write(rd, &sz, 4);
+ rd_write(rd, buf, sz);
+}
+
+static ssize_t rd_read(struct file *file, char __user *buf,
+ size_t sz, loff_t *ppos)
+{
+ struct msm_rd_state *rd = file->private_data;
+ struct circ_buf *fifo = &rd->fifo;
+ const char *fptr = &fifo->buf[fifo->tail];
+ int n = 0, ret = 0;
+
+ mutex_lock(&rd->read_lock);
+
+ ret = wait_event_interruptible(rd->fifo_event,
+ circ_count(&rd->fifo) > 0);
+ if (ret)
+ goto out;
+
+ n = min_t(int, sz, circ_count_to_end(&rd->fifo));
+ ret = copy_to_user(buf, fptr, n);
+ if (ret)
+ goto out;
+
+ fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
+ *ppos += n;
+
+ wake_up_all(&rd->fifo_event);
+
+out:
+ mutex_unlock(&rd->read_lock);
+ if (ret)
+ return ret;
+ return n;
+}
+
+static int rd_open(struct inode *inode, struct file *file)
+{
+ struct msm_rd_state *rd = inode->i_private;
+ struct drm_device *dev = rd->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ uint64_t val;
+ uint32_t gpu_id;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (rd->open || !gpu) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ file->private_data = rd;
+ rd->open = true;
+
+ /* the parsing tools need to know gpu-id to know which
+ * register database to load.
+ */
+ gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
+ gpu_id = val;
+
+ rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
+
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static int rd_release(struct inode *inode, struct file *file)
+{
+ struct msm_rd_state *rd = inode->i_private;
+ rd->open = false;
+ return 0;
+}
+
+
+static const struct file_operations rd_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = rd_open,
+ .read = rd_read,
+ .llseek = no_llseek,
+ .release = rd_release,
+};
+
+int msm_rd_debugfs_init(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_rd_state *rd;
+
+ /* only create on first minor: */
+ if (priv->rd)
+ return 0;
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ rd->dev = minor->dev;
+ rd->fifo.buf = rd->buf;
+
+ mutex_init(&rd->read_lock);
+ priv->rd = rd;
+
+ init_waitqueue_head(&rd->fifo_event);
+
+ rd->node = kzalloc(sizeof(*rd->node), GFP_KERNEL);
+ if (!rd->node)
+ goto fail;
+
+ rd->ent = debugfs_create_file("rd", S_IFREG | S_IRUGO,
+ minor->debugfs_root, rd, &rd_debugfs_fops);
+ if (!rd->ent) {
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/rd\n",
+ minor->debugfs_root->d_name.name);
+ goto fail;
+ }
+
+ rd->node->minor = minor;
+ rd->node->dent = rd->ent;
+ rd->node->info_ent = NULL;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&rd->node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+
+fail:
+ msm_rd_debugfs_cleanup(minor);
+ return -1;
+}
+
+void msm_rd_debugfs_cleanup(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_rd_state *rd = priv->rd;
+
+ if (!rd)
+ return;
+
+ priv->rd = NULL;
+
+ debugfs_remove(rd->ent);
+
+ if (rd->node) {
+ mutex_lock(&minor->debugfs_lock);
+ list_del(&rd->node->list);
+ mutex_unlock(&minor->debugfs_lock);
+ kfree(rd->node);
+ }
+
+ mutex_destroy(&rd->read_lock);
+
+ kfree(rd);
+}
+
+/* called under struct_mutex */
+void msm_rd_dump_submit(struct msm_gem_submit *submit)
+{
+ struct drm_device *dev = submit->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_rd_state *rd = priv->rd;
+ char msg[128];
+ int i, n;
+
+ if (!rd->open)
+ return;
+
+ /* writing into fifo is serialized by caller, and
+ * rd->read_lock is used to serialize the reads
+ */
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
+ TASK_COMM_LEN, current->comm, task_pid_nr(current),
+ submit->fence);
+
+ rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+
+ /* could be nice to have an option (module-param?) to snapshot
+ * all the bo's associated with the submit. Handy to see vtx
+ * buffers, etc. For now just the cmdstream bo's is enough.
+ */
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ uint32_t idx = submit->cmd[i].idx;
+ uint32_t iova = submit->cmd[i].iova;
+ uint32_t szd = submit->cmd[i].size; /* in dwords */
+ struct msm_gem_object *obj = submit->bos[idx].obj;
+ const char *buf = msm_gem_vaddr_locked(&obj->base);
+
+ buf += iova - submit->bos[idx].iova;
+
+ rd_write_section(rd, RD_GPUADDR,
+ (uint32_t[2]){ iova, szd * 4 }, 8);
+ rd_write_section(rd, RD_BUFFER_CONTENTS,
+ buf, szd * 4);
+
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets, we've logged the buffer, the
+ * parser tool will follow the IB based on the logged
+ * buffer/gpuaddr, so nothing more to do.
+ */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ case MSM_SUBMIT_CMD_BUF:
+ rd_write_section(rd, RD_CMDSTREAM_ADDR,
+ (uint32_t[2]){ iova, szd }, 8);
+ break;
+ }
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index b7d216264775..8b307e143632 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -102,6 +102,7 @@ nouveau-y += core/subdev/fb/nvaa.o
nouveau-y += core/subdev/fb/nvaf.o
nouveau-y += core/subdev/fb/nvc0.o
nouveau-y += core/subdev/fb/nve0.o
+nouveau-y += core/subdev/fb/gk20a.o
nouveau-y += core/subdev/fb/gm107.o
nouveau-y += core/subdev/fb/ramnv04.o
nouveau-y += core/subdev/fb/ramnv10.o
@@ -117,25 +118,33 @@ nouveau-y += core/subdev/fb/ramnva3.o
nouveau-y += core/subdev/fb/ramnvaa.o
nouveau-y += core/subdev/fb/ramnvc0.o
nouveau-y += core/subdev/fb/ramnve0.o
+nouveau-y += core/subdev/fb/ramgk20a.o
nouveau-y += core/subdev/fb/ramgm107.o
nouveau-y += core/subdev/fb/sddr3.o
nouveau-y += core/subdev/fb/gddr5.o
nouveau-y += core/subdev/gpio/base.o
nouveau-y += core/subdev/gpio/nv10.o
nouveau-y += core/subdev/gpio/nv50.o
+nouveau-y += core/subdev/gpio/nv92.o
nouveau-y += core/subdev/gpio/nvd0.o
nouveau-y += core/subdev/gpio/nve0.o
nouveau-y += core/subdev/i2c/base.o
nouveau-y += core/subdev/i2c/anx9805.o
nouveau-y += core/subdev/i2c/aux.o
nouveau-y += core/subdev/i2c/bit.o
+nouveau-y += core/subdev/i2c/pad.o
+nouveau-y += core/subdev/i2c/padnv04.o
+nouveau-y += core/subdev/i2c/padnv94.o
nouveau-y += core/subdev/i2c/nv04.o
nouveau-y += core/subdev/i2c/nv4e.o
nouveau-y += core/subdev/i2c/nv50.o
nouveau-y += core/subdev/i2c/nv94.o
nouveau-y += core/subdev/i2c/nvd0.o
+nouveau-y += core/subdev/i2c/gf117.o
+nouveau-y += core/subdev/i2c/nve0.o
nouveau-y += core/subdev/ibus/nvc0.o
nouveau-y += core/subdev/ibus/nve0.o
+nouveau-y += core/subdev/ibus/gk20a.o
nouveau-y += core/subdev/instmem/base.o
nouveau-y += core/subdev/instmem/nv04.o
nouveau-y += core/subdev/instmem/nv40.o
@@ -214,6 +223,9 @@ nouveau-y += core/engine/device/nvc0.o
nouveau-y += core/engine/device/nve0.o
nouveau-y += core/engine/device/gm100.o
nouveau-y += core/engine/disp/base.o
+nouveau-y += core/engine/disp/conn.o
+nouveau-y += core/engine/disp/outp.o
+nouveau-y += core/engine/disp/outpdp.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
nouveau-y += core/engine/disp/nv84.o
@@ -245,6 +257,7 @@ nouveau-y += core/engine/fifo/nv50.o
nouveau-y += core/engine/fifo/nv84.o
nouveau-y += core/engine/fifo/nvc0.o
nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/fifo/gk20a.o
nouveau-y += core/engine/fifo/nv108.o
nouveau-y += core/engine/graph/ctxnv40.o
nouveau-y += core/engine/graph/ctxnv50.o
@@ -255,6 +268,7 @@ nouveau-y += core/engine/graph/ctxnvc8.o
nouveau-y += core/engine/graph/ctxnvd7.o
nouveau-y += core/engine/graph/ctxnvd9.o
nouveau-y += core/engine/graph/ctxnve4.o
+nouveau-y += core/engine/graph/ctxgk20a.o
nouveau-y += core/engine/graph/ctxnvf0.o
nouveau-y += core/engine/graph/ctxnv108.o
nouveau-y += core/engine/graph/ctxgm107.o
@@ -275,6 +289,7 @@ nouveau-y += core/engine/graph/nvc8.o
nouveau-y += core/engine/graph/nvd7.o
nouveau-y += core/engine/graph/nvd9.o
nouveau-y += core/engine/graph/nve4.o
+nouveau-y += core/engine/graph/gk20a.o
nouveau-y += core/engine/graph/nvf0.o
nouveau-y += core/engine/graph/nv108.o
nouveau-y += core/engine/graph/gm107.o
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 3f3c76581a9e..ae81d3b5d8b7 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -28,14 +28,20 @@ nouveau_event_put(struct nouveau_eventh *handler)
{
struct nouveau_event *event = handler->event;
unsigned long flags;
- if (__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
- spin_lock_irqsave(&event->refs_lock, flags);
- if (!--event->index[handler->index].refs) {
+ u32 m, t;
+
+ if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags))
+ return;
+
+ spin_lock_irqsave(&event->refs_lock, flags);
+ for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
+ if (!--event->refs[handler->index * event->types_nr + t]) {
if (event->disable)
- event->disable(event, handler->index);
+ event->disable(event, 1 << t, handler->index);
}
- spin_unlock_irqrestore(&event->refs_lock, flags);
+
}
+ spin_unlock_irqrestore(&event->refs_lock, flags);
}
void
@@ -43,14 +49,20 @@ nouveau_event_get(struct nouveau_eventh *handler)
{
struct nouveau_event *event = handler->event;
unsigned long flags;
- if (!__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
- spin_lock_irqsave(&event->refs_lock, flags);
- if (!event->index[handler->index].refs++) {
+ u32 m, t;
+
+ if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags))
+ return;
+
+ spin_lock_irqsave(&event->refs_lock, flags);
+ for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
+ if (!event->refs[handler->index * event->types_nr + t]++) {
if (event->enable)
- event->enable(event, handler->index);
+ event->enable(event, 1 << t, handler->index);
}
- spin_unlock_irqrestore(&event->refs_lock, flags);
+
}
+ spin_unlock_irqrestore(&event->refs_lock, flags);
}
static void
@@ -65,38 +77,47 @@ nouveau_event_fini(struct nouveau_eventh *handler)
}
static int
-nouveau_event_init(struct nouveau_event *event, int index,
- int (*func)(void *, int), void *priv,
+nouveau_event_init(struct nouveau_event *event, u32 types, int index,
+ int (*func)(void *, u32, int), void *priv,
struct nouveau_eventh *handler)
{
unsigned long flags;
+ if (types & ~((1 << event->types_nr) - 1))
+ return -EINVAL;
if (index >= event->index_nr)
return -EINVAL;
handler->event = event;
handler->flags = 0;
+ handler->types = types;
handler->index = index;
handler->func = func;
handler->priv = priv;
spin_lock_irqsave(&event->list_lock, flags);
- list_add_tail(&handler->head, &event->index[index].list);
+ list_add_tail(&handler->head, &event->list[index]);
spin_unlock_irqrestore(&event->list_lock, flags);
return 0;
}
int
-nouveau_event_new(struct nouveau_event *event, int index,
- int (*func)(void *, int), void *priv,
+nouveau_event_new(struct nouveau_event *event, u32 types, int index,
+ int (*func)(void *, u32, int), void *priv,
struct nouveau_eventh **phandler)
{
struct nouveau_eventh *handler;
int ret = -ENOMEM;
+ if (event->check) {
+ ret = event->check(event, types, index);
+ if (ret)
+ return ret;
+ }
+
handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
if (handler) {
- ret = nouveau_event_init(event, index, func, priv, handler);
+ ret = nouveau_event_init(event, types, index, func, priv, handler);
if (ret)
kfree(handler);
}
@@ -116,7 +137,7 @@ nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
}
void
-nouveau_event_trigger(struct nouveau_event *event, int index)
+nouveau_event_trigger(struct nouveau_event *event, u32 types, int index)
{
struct nouveau_eventh *handler;
unsigned long flags;
@@ -125,10 +146,15 @@ nouveau_event_trigger(struct nouveau_event *event, int index)
return;
spin_lock_irqsave(&event->list_lock, flags);
- list_for_each_entry(handler, &event->index[index].list, head) {
- if (test_bit(NVKM_EVENT_ENABLE, &handler->flags) &&
- handler->func(handler->priv, index) == NVKM_EVENT_DROP)
- nouveau_event_put(handler);
+ list_for_each_entry(handler, &event->list[index], head) {
+ if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags))
+ continue;
+ if (!(handler->types & types))
+ continue;
+ if (handler->func(handler->priv, handler->types & types, index)
+ != NVKM_EVENT_DROP)
+ continue;
+ nouveau_event_put(handler);
}
spin_unlock_irqrestore(&event->list_lock, flags);
}
@@ -144,20 +170,27 @@ nouveau_event_destroy(struct nouveau_event **pevent)
}
int
-nouveau_event_create(int index_nr, struct nouveau_event **pevent)
+nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent)
{
struct nouveau_event *event;
int i;
- event = *pevent = kzalloc(sizeof(*event) + index_nr *
- sizeof(event->index[0]), GFP_KERNEL);
+ event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) *
+ sizeof(event->refs[0]), GFP_KERNEL);
if (!event)
return -ENOMEM;
+ event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL);
+ if (!event->list) {
+ kfree(event);
+ return -ENOMEM;
+ }
+
spin_lock_init(&event->list_lock);
spin_lock_init(&event->refs_lock);
for (i = 0; i < index_nr; i++)
- INIT_LIST_HEAD(&event->index[i].list);
+ INIT_LIST_HEAD(&event->list[i]);
+ event->types_nr = types_nr;
event->index_nr = index_nr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 7f48e288215f..124538555904 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -156,7 +156,7 @@ nouveau_object_ctor(struct nouveau_object *parent,
}
if (ret == 0) {
- nv_debug(object, "created\n");
+ nv_trace(object, "created\n");
atomic_set(&object->refcount, 1);
}
@@ -166,7 +166,7 @@ nouveau_object_ctor(struct nouveau_object *parent,
static void
nouveau_object_dtor(struct nouveau_object *object)
{
- nv_debug(object, "destroying\n");
+ nv_trace(object, "destroying\n");
nv_ofuncs(object)->dtor(object);
}
@@ -337,7 +337,7 @@ nouveau_object_inc(struct nouveau_object *object)
goto fail_self;
}
- nv_debug(object, "initialised\n");
+ nv_trace(object, "initialised\n");
return 0;
fail_self:
@@ -375,7 +375,7 @@ nouveau_object_decf(struct nouveau_object *object)
if (object->parent)
nouveau_object_dec(object->parent, false);
- nv_debug(object, "stopped\n");
+ nv_trace(object, "stopped\n");
return 0;
}
@@ -411,7 +411,7 @@ nouveau_object_decs(struct nouveau_object *object)
}
}
- nv_debug(object, "suspended\n");
+ nv_trace(object, "suspended\n");
return 0;
fail_parent:
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
index d258c21c4a22..a520029e25d9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -60,8 +60,8 @@ gm100_identify(struct nouveau_device *device)
case 0x117:
device->cname = "GM107";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
#if 0
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 0a51ff4e9e00..40b29d0214cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -47,7 +47,7 @@ nv04_identify(struct nouveau_device *device)
case 0x04:
device->cname = "NV04";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -65,7 +65,7 @@ nv04_identify(struct nouveau_device *device)
case 0x05:
device->cname = "NV05";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index e008de8b51b0..5f7c25ff523d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -48,8 +48,8 @@ nv10_identify(struct nouveau_device *device)
case 0x10:
device->cname = "NV10";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -65,8 +65,8 @@ nv10_identify(struct nouveau_device *device)
case 0x15:
device->cname = "NV15";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -84,8 +84,8 @@ nv10_identify(struct nouveau_device *device)
case 0x16:
device->cname = "NV16";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -103,8 +103,8 @@ nv10_identify(struct nouveau_device *device)
case 0x1a:
device->cname = "nForce";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -122,8 +122,8 @@ nv10_identify(struct nouveau_device *device)
case 0x11:
device->cname = "NV11";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -141,8 +141,8 @@ nv10_identify(struct nouveau_device *device)
case 0x17:
device->cname = "NV17";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -160,8 +160,8 @@ nv10_identify(struct nouveau_device *device)
case 0x1f:
device->cname = "nForce2";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -179,8 +179,8 @@ nv10_identify(struct nouveau_device *device)
case 0x18:
device->cname = "NV18";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 7b629a3aed05..75fed11bba0a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -49,8 +49,8 @@ nv20_identify(struct nouveau_device *device)
case 0x20:
device->cname = "NV20";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -68,8 +68,8 @@ nv20_identify(struct nouveau_device *device)
case 0x25:
device->cname = "NV25";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -87,8 +87,8 @@ nv20_identify(struct nouveau_device *device)
case 0x28:
device->cname = "NV28";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -106,8 +106,8 @@ nv20_identify(struct nouveau_device *device)
case 0x2a:
device->cname = "NV2A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 7dfddd5a1908..36919d7db7cc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -49,8 +49,8 @@ nv30_identify(struct nouveau_device *device)
case 0x30:
device->cname = "NV30";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -68,8 +68,8 @@ nv30_identify(struct nouveau_device *device)
case 0x35:
device->cname = "NV35";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -87,8 +87,8 @@ nv30_identify(struct nouveau_device *device)
case 0x31:
device->cname = "NV31";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -107,8 +107,8 @@ nv30_identify(struct nouveau_device *device)
case 0x36:
device->cname = "NV36";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
@@ -127,8 +127,8 @@ nv30_identify(struct nouveau_device *device)
case 0x34:
device->cname = "NV34";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 7c1ce6cf4f1f..1130a62be2c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -53,8 +53,8 @@ nv40_identify(struct nouveau_device *device)
case 0x40:
device->cname = "NV40";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -76,8 +76,8 @@ nv40_identify(struct nouveau_device *device)
case 0x41:
device->cname = "NV41";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -99,8 +99,8 @@ nv40_identify(struct nouveau_device *device)
case 0x42:
device->cname = "NV42";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -122,8 +122,8 @@ nv40_identify(struct nouveau_device *device)
case 0x43:
device->cname = "NV43";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -145,8 +145,8 @@ nv40_identify(struct nouveau_device *device)
case 0x45:
device->cname = "NV45";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -168,8 +168,8 @@ nv40_identify(struct nouveau_device *device)
case 0x47:
device->cname = "G70";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -191,8 +191,8 @@ nv40_identify(struct nouveau_device *device)
case 0x49:
device->cname = "G71";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -214,8 +214,8 @@ nv40_identify(struct nouveau_device *device)
case 0x4b:
device->cname = "G73";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -237,8 +237,8 @@ nv40_identify(struct nouveau_device *device)
case 0x44:
device->cname = "NV44";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -260,8 +260,8 @@ nv40_identify(struct nouveau_device *device)
case 0x46:
device->cname = "G72";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -283,8 +283,8 @@ nv40_identify(struct nouveau_device *device)
case 0x4a:
device->cname = "NV44A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -306,8 +306,8 @@ nv40_identify(struct nouveau_device *device)
case 0x4c:
device->cname = "C61";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -329,8 +329,8 @@ nv40_identify(struct nouveau_device *device)
case 0x4e:
device->cname = "C51";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv4e_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -352,8 +352,8 @@ nv40_identify(struct nouveau_device *device)
case 0x63:
device->cname = "C73";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -375,8 +375,8 @@ nv40_identify(struct nouveau_device *device)
case 0x67:
device->cname = "C67";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
@@ -398,8 +398,8 @@ nv40_identify(struct nouveau_device *device)
case 0x68:
device->cname = "C68";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index 66499fa0f758..ef0b0bde1a91 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -60,8 +60,8 @@ nv50_identify(struct nouveau_device *device)
case 0x50:
device->cname = "G80";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -85,8 +85,8 @@ nv50_identify(struct nouveau_device *device)
case 0x84:
device->cname = "G84";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -113,8 +113,8 @@ nv50_identify(struct nouveau_device *device)
case 0x86:
device->cname = "G86";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -141,8 +141,8 @@ nv50_identify(struct nouveau_device *device)
case 0x92:
device->cname = "G92";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -169,8 +169,8 @@ nv50_identify(struct nouveau_device *device)
case 0x94:
device->cname = "G94";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -197,8 +197,8 @@ nv50_identify(struct nouveau_device *device)
case 0x96:
device->cname = "G96";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -225,8 +225,8 @@ nv50_identify(struct nouveau_device *device)
case 0x98:
device->cname = "G98";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -253,8 +253,8 @@ nv50_identify(struct nouveau_device *device)
case 0xa0:
device->cname = "G200";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -281,8 +281,8 @@ nv50_identify(struct nouveau_device *device)
case 0xaa:
device->cname = "MCP77/MCP78";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -309,8 +309,8 @@ nv50_identify(struct nouveau_device *device)
case 0xac:
device->cname = "MCP79/MCP7A";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -337,8 +337,8 @@ nv50_identify(struct nouveau_device *device)
case 0xa3:
device->cname = "GT215";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -367,8 +367,8 @@ nv50_identify(struct nouveau_device *device)
case 0xa5:
device->cname = "GT216";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -396,8 +396,8 @@ nv50_identify(struct nouveau_device *device)
case 0xa8:
device->cname = "GT218";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -425,8 +425,8 @@ nv50_identify(struct nouveau_device *device)
case 0xaf:
device->cname = "MCP89";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 2075b3027052..8d55ed633b19 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -60,8 +60,8 @@ nvc0_identify(struct nouveau_device *device)
case 0xc0:
device->cname = "GF100";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -92,8 +92,8 @@ nvc0_identify(struct nouveau_device *device)
case 0xc4:
device->cname = "GF104";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -124,8 +124,8 @@ nvc0_identify(struct nouveau_device *device)
case 0xc3:
device->cname = "GF106";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -155,8 +155,8 @@ nvc0_identify(struct nouveau_device *device)
case 0xce:
device->cname = "GF114";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -187,8 +187,8 @@ nvc0_identify(struct nouveau_device *device)
case 0xcf:
device->cname = "GF116";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -219,8 +219,8 @@ nvc0_identify(struct nouveau_device *device)
case 0xc1:
device->cname = "GF108";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -250,8 +250,8 @@ nvc0_identify(struct nouveau_device *device)
case 0xc8:
device->cname = "GF110";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -282,8 +282,8 @@ nvc0_identify(struct nouveau_device *device)
case 0xd9:
device->cname = "GF119";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -313,8 +313,8 @@ nvc0_identify(struct nouveau_device *device)
case 0xd7:
device->cname = "GF117";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 9784cbf8a9d2..2d1e97d4264f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -60,8 +60,8 @@ nve0_identify(struct nouveau_device *device)
case 0xe4:
device->cname = "GK104";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -93,8 +93,8 @@ nve0_identify(struct nouveau_device *device)
case 0xe7:
device->cname = "GK107";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -126,8 +126,8 @@ nve0_identify(struct nouveau_device *device)
case 0xe6:
device->cname = "GK106";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -156,11 +156,61 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
break;
+ case 0xea:
+ device->cname = "GK20A";
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
+ break;
case 0xf0:
device->cname = "GK110";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
+ break;
+ case 0xf1:
+ device->cname = "GK110B";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -184,18 +234,16 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
-#if 0
device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
-#endif
device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
break;
case 0x108:
device->cname = "GK208";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
@@ -219,11 +267,9 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
-#if 0
device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
-#endif
break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
index 7a5cae42834f..9c38c5e40500 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -22,13 +22,89 @@
* Authors: Ben Skeggs
*/
-#include <engine/disp.h>
+#include "priv.h"
+#include "outp.h"
+#include "conn.h"
+
+static int
+nouveau_disp_hpd_check(struct nouveau_event *event, u32 types, int index)
+{
+ struct nouveau_disp *disp = event->priv;
+ struct nvkm_output *outp;
+ list_for_each_entry(outp, &disp->outp, head) {
+ if (outp->conn->index == index) {
+ if (outp->conn->hpd.event)
+ return 0;
+ break;
+ }
+ }
+ return -ENOSYS;
+}
+
+int
+_nouveau_disp_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_disp *disp = (void *)object;
+ struct nvkm_output *outp;
+ int ret;
+
+ list_for_each_entry(outp, &disp->outp, head) {
+ ret = nv_ofuncs(outp)->fini(nv_object(outp), suspend);
+ if (ret && suspend)
+ goto fail_outp;
+ }
+
+ return nouveau_engine_fini(&disp->base, suspend);
+
+fail_outp:
+ list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
+ nv_ofuncs(outp)->init(nv_object(outp));
+ }
+
+ return ret;
+}
+
+int
+_nouveau_disp_init(struct nouveau_object *object)
+{
+ struct nouveau_disp *disp = (void *)object;
+ struct nvkm_output *outp;
+ int ret;
+
+ ret = nouveau_engine_init(&disp->base);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(outp, &disp->outp, head) {
+ ret = nv_ofuncs(outp)->init(nv_object(outp));
+ if (ret)
+ goto fail_outp;
+ }
+
+ return ret;
+
+fail_outp:
+ list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
+ nv_ofuncs(outp)->fini(nv_object(outp), false);
+ }
+
+ return ret;
+}
void
_nouveau_disp_dtor(struct nouveau_object *object)
{
struct nouveau_disp *disp = (void *)object;
+ struct nvkm_output *outp, *outt;
+
nouveau_event_destroy(&disp->vblank);
+
+ if (disp->outp.next) {
+ list_for_each_entry_safe(outp, outt, &disp->outp, head) {
+ nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
+ }
+ }
+
nouveau_engine_destroy(&disp->base);
}
@@ -39,8 +115,15 @@ nouveau_disp_create_(struct nouveau_object *parent,
const char *intname, const char *extname,
int length, void **pobject)
{
+ struct nouveau_disp_impl *impl = (void *)oclass;
+ struct nouveau_bios *bios = nouveau_bios(parent);
struct nouveau_disp *disp;
- int ret;
+ struct nouveau_oclass **sclass;
+ struct nouveau_object *object;
+ struct dcb_output dcbE;
+ u8 hpd = 0, ver, hdr;
+ u32 data;
+ int ret, i;
ret = nouveau_engine_create_(parent, engine, oclass, true,
intname, extname, length, pobject);
@@ -48,5 +131,42 @@ nouveau_disp_create_(struct nouveau_object *parent,
if (ret)
return ret;
- return nouveau_event_create(heads, &disp->vblank);
+ INIT_LIST_HEAD(&disp->outp);
+
+ /* create output objects for each display path in the vbios */
+ i = -1;
+ while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+ if (dcbE.type == DCB_OUTPUT_UNUSED)
+ continue;
+ if (dcbE.type == DCB_OUTPUT_EOL)
+ break;
+ data = dcbE.location << 4 | dcbE.type;
+
+ oclass = nvkm_output_oclass;
+ sclass = impl->outp;
+ while (sclass && sclass[0]) {
+ if (sclass[0]->handle == data) {
+ oclass = sclass[0];
+ break;
+ }
+ sclass++;
+ }
+
+ nouveau_object_ctor(*pobject, *pobject, oclass,
+ &dcbE, i, &object);
+ hpd = max(hpd, (u8)(dcbE.connector + 1));
+ }
+
+ ret = nouveau_event_create(3, hpd, &disp->hpd);
+ if (ret)
+ return ret;
+
+ disp->hpd->priv = disp;
+ disp->hpd->check = nouveau_disp_hpd_check;
+
+ ret = nouveau_event_create(1, heads, &disp->vblank);
+ if (ret)
+ return ret;
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
new file mode 100644
index 000000000000..4ffbc70ecf5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+#include "conn.h"
+#include "outp.h"
+
+static void
+nvkm_connector_hpd_work(struct work_struct *w)
+{
+ struct nvkm_connector *conn = container_of(w, typeof(*conn), hpd.work);
+ struct nouveau_disp *disp = nouveau_disp(conn);
+ struct nouveau_gpio *gpio = nouveau_gpio(conn);
+ u32 send = NVKM_HPD_UNPLUG;
+ if (gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.event->index))
+ send = NVKM_HPD_PLUG;
+ nouveau_event_trigger(disp->hpd, send, conn->index);
+ nouveau_event_get(conn->hpd.event);
+}
+
+static int
+nvkm_connector_hpd(void *data, u32 type, int index)
+{
+ struct nvkm_connector *conn = data;
+ DBG("HPD: %d\n", type);
+ schedule_work(&conn->hpd.work);
+ return NVKM_EVENT_DROP;
+}
+
+int
+_nvkm_connector_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvkm_connector *conn = (void *)object;
+ if (conn->hpd.event)
+ nouveau_event_put(conn->hpd.event);
+ return nouveau_object_fini(&conn->base, suspend);
+}
+
+int
+_nvkm_connector_init(struct nouveau_object *object)
+{
+ struct nvkm_connector *conn = (void *)object;
+ int ret = nouveau_object_init(&conn->base);
+ if (ret == 0) {
+ if (conn->hpd.event)
+ nouveau_event_get(conn->hpd.event);
+ }
+ return ret;
+}
+
+void
+_nvkm_connector_dtor(struct nouveau_object *object)
+{
+ struct nvkm_connector *conn = (void *)object;
+ nouveau_event_ref(NULL, &conn->hpd.event);
+ nouveau_object_destroy(&conn->base);
+}
+
+int
+nvkm_connector_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ struct nvbios_connE *info, int index,
+ int length, void **pobject)
+{
+ static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
+ struct nouveau_gpio *gpio = nouveau_gpio(parent);
+ struct nouveau_disp *disp = (void *)engine;
+ struct nvkm_connector *conn;
+ struct nvkm_output *outp;
+ struct dcb_gpio_func func;
+ int ret;
+
+ list_for_each_entry(outp, &disp->outp, head) {
+ if (outp->conn && outp->conn->index == index) {
+ atomic_inc(&nv_object(outp->conn)->refcount);
+ *pobject = outp->conn;
+ return 1;
+ }
+ }
+
+ ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
+ conn = *pobject;
+ if (ret)
+ return ret;
+
+ conn->info = *info;
+ conn->index = index;
+
+ DBG("type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x\n",
+ info->type, info->location, info->hpd, info->dp,
+ info->di, info->sr, info->lcdid);
+
+ if ((info->hpd = ffs(info->hpd))) {
+ if (--info->hpd >= ARRAY_SIZE(hpd)) {
+ ERR("hpd %02x unknown\n", info->hpd);
+ goto done;
+ }
+ info->hpd = hpd[info->hpd];
+
+ ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
+ if (ret) {
+ ERR("func %02x lookup failed, %d\n", info->hpd, ret);
+ goto done;
+ }
+
+ ret = nouveau_event_new(gpio->events, NVKM_GPIO_TOGGLED,
+ func.line, nvkm_connector_hpd,
+ conn, &conn->hpd.event);
+ if (ret) {
+ ERR("func %02x failed, %d\n", info->hpd, ret);
+ } else {
+ DBG("func %02x (HPD)\n", info->hpd);
+ }
+ }
+
+done:
+ INIT_WORK(&conn->hpd.work, nvkm_connector_hpd_work);
+ return 0;
+}
+
+int
+_nvkm_connector_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *info, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nvkm_connector *conn;
+ int ret;
+
+ ret = nvkm_connector_create(parent, engine, oclass, info, index, &conn);
+ *pobject = nv_object(conn);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass *
+nvkm_connector_oclass = &(struct nvkm_connector_impl) {
+ .base = {
+ .handle = 0,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_connector_ctor,
+ .dtor = _nvkm_connector_dtor,
+ .init = _nvkm_connector_init,
+ .fini = _nvkm_connector_fini,
+ },
+ },
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
new file mode 100644
index 000000000000..035ebeacbb1c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
@@ -0,0 +1,59 @@
+#ifndef __NVKM_DISP_CONN_H__
+#define __NVKM_DISP_CONN_H__
+
+#include "priv.h"
+
+struct nvkm_connector {
+ struct nouveau_object base;
+ struct list_head head;
+
+ struct nvbios_connE info;
+ int index;
+
+ struct {
+ struct nouveau_eventh *event;
+ struct work_struct work;
+ } hpd;
+};
+
+#define nvkm_connector_create(p,e,c,b,i,d) \
+ nvkm_connector_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
+#define nvkm_connector_destroy(d) ({ \
+ struct nvkm_connector *disp = (d); \
+ _nvkm_connector_dtor(nv_object(disp)); \
+})
+#define nvkm_connector_init(d) ({ \
+ struct nvkm_connector *disp = (d); \
+ _nvkm_connector_init(nv_object(disp)); \
+})
+#define nvkm_connector_fini(d,s) ({ \
+ struct nvkm_connector *disp = (d); \
+ _nvkm_connector_fini(nv_object(disp), (s)); \
+})
+
+int nvkm_connector_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, struct nvbios_connE *,
+ int, int, void **);
+
+int _nvkm_connector_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void _nvkm_connector_dtor(struct nouveau_object *);
+int _nvkm_connector_init(struct nouveau_object *);
+int _nvkm_connector_fini(struct nouveau_object *, bool);
+
+struct nvkm_connector_impl {
+ struct nouveau_oclass base;
+};
+
+#ifndef MSG
+#define MSG(l,f,a...) do { \
+ struct nvkm_connector *_conn = (void *)conn; \
+ nv_##l(nv_object(conn)->engine, "%02x:%02x%02x: "f, _conn->index, \
+ _conn->info.location, _conn->info.type, ##a); \
+} while(0)
+#define DBG(f,a...) MSG(debug, f, ##a)
+#define ERR(f,a...) MSG(error, f, ##a)
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 3ca2d25b7f5e..5a5b59b21130 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -30,42 +30,38 @@
#include <engine/disp.h>
-#include "dport.h"
+#include <core/class.h>
-#define DBG(fmt, args...) nv_debug(dp->disp, "DP:%04x:%04x: " fmt, \
- dp->outp->hasht, dp->outp->hashm, ##args)
-#define ERR(fmt, args...) nv_error(dp->disp, "DP:%04x:%04x: " fmt, \
- dp->outp->hasht, dp->outp->hashm, ##args)
+#include "dport.h"
+#include "outpdp.h"
/******************************************************************************
* link training
*****************************************************************************/
struct dp_state {
- const struct nouveau_dp_func *func;
- struct nouveau_disp *disp;
- struct dcb_output *outp;
- struct nvbios_dpout info;
- u8 version;
- struct nouveau_i2c_port *aux;
- int head;
- u8 dpcd[4];
+ struct nvkm_output_dp *outp;
int link_nr;
u32 link_bw;
u8 stat[6];
u8 conf[4];
+ bool pc2;
+ u8 pc2stat;
+ u8 pc2conf[2];
};
static int
dp_set_link_config(struct dp_state *dp)
{
- struct nouveau_disp *disp = dp->disp;
+ struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
+ struct nvkm_output_dp *outp = dp->outp;
+ struct nouveau_disp *disp = nouveau_disp(outp);
struct nouveau_bios *bios = nouveau_bios(disp);
struct nvbios_init init = {
- .subdev = nv_subdev(dp->disp),
+ .subdev = nv_subdev(disp),
.bios = bios,
.offset = 0x0000,
- .outp = dp->outp,
- .crtc = dp->head,
+ .outp = &outp->base.info,
+ .crtc = -1,
.execute = 1,
};
u32 lnkcmp;
@@ -75,8 +71,8 @@ dp_set_link_config(struct dp_state *dp)
DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
/* set desired link configuration on the source */
- if ((lnkcmp = dp->info.lnkcmp)) {
- if (dp->version < 0x30) {
+ if ((lnkcmp = dp->outp->info.lnkcmp)) {
+ if (outp->version < 0x30) {
while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
lnkcmp += 4;
init.offset = nv_ro16(bios, lnkcmp + 2);
@@ -89,73 +85,112 @@ dp_set_link_config(struct dp_state *dp)
nvbios_exec(&init);
}
- ret = dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
- dp->link_nr, dp->link_bw / 27000,
- dp->dpcd[DPCD_RC02] &
- DPCD_RC02_ENHANCED_FRAME_CAP);
+ ret = impl->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
+ outp->dpcd[DPCD_RC02] &
+ DPCD_RC02_ENHANCED_FRAME_CAP);
if (ret) {
- ERR("lnk_ctl failed with %d\n", ret);
+ if (ret < 0)
+ ERR("lnk_ctl failed with %d\n", ret);
return ret;
}
+ impl->lnk_pwr(outp, dp->link_nr);
+
/* set desired link configuration on the sink */
sink[0] = dp->link_bw / 27000;
sink[1] = dp->link_nr;
- if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+ if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
- return nv_wraux(dp->aux, DPCD_LC00, sink, 2);
+ return nv_wraux(outp->base.edid, DPCD_LC00_LINK_BW_SET, sink, 2);
}
static void
dp_set_training_pattern(struct dp_state *dp, u8 pattern)
{
+ struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
+ struct nvkm_output_dp *outp = dp->outp;
u8 sink_tp;
DBG("training pattern %d\n", pattern);
- dp->func->pattern(dp->disp, dp->outp, dp->head, pattern);
+ impl->pattern(outp, pattern);
- nv_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
+ nv_rdaux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
sink_tp |= pattern;
- nv_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
+ nv_wraux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
}
static int
-dp_link_train_commit(struct dp_state *dp)
+dp_link_train_commit(struct dp_state *dp, bool pc)
{
- int i;
+ struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
+ struct nvkm_output_dp *outp = dp->outp;
+ int ret, i;
for (i = 0; i < dp->link_nr; i++) {
u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
+ u8 lpc2 = (dp->pc2stat >> (i * 2)) & 0x3;
u8 lpre = (lane & 0x0c) >> 2;
u8 lvsw = (lane & 0x03) >> 0;
+ u8 hivs = 3 - lpre;
+ u8 hipe = 3;
+ u8 hipc = 3;
+
+ if (lpc2 >= hipc)
+ lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
+ if (lpre >= hipe) {
+ lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
+ lvsw = hivs = 3 - (lpre & 3);
+ } else
+ if (lvsw >= hivs) {
+ lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
+ }
dp->conf[i] = (lpre << 3) | lvsw;
- if (lvsw == 3)
- dp->conf[i] |= DPCD_LC03_MAX_SWING_REACHED;
- if (lpre == 3)
- dp->conf[i] |= DPCD_LC03_MAX_PRE_EMPHASIS_REACHED;
+ dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
+
+ DBG("config lane %d %02x %02x\n", i, dp->conf[i], lpc2);
+ impl->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
+ }
+
+ ret = nv_wraux(outp->base.edid, DPCD_LC03(0), dp->conf, 4);
+ if (ret)
+ return ret;
- DBG("config lane %d %02x\n", i, dp->conf[i]);
- dp->func->drv_ctl(dp->disp, dp->outp, dp->head, i, lvsw, lpre);
+ if (pc) {
+ ret = nv_wraux(outp->base.edid, DPCD_LC0F, dp->pc2conf, 2);
+ if (ret)
+ return ret;
}
- return nv_wraux(dp->aux, DPCD_LC03(0), dp->conf, 4);
+ return 0;
}
static int
-dp_link_train_update(struct dp_state *dp, u32 delay)
+dp_link_train_update(struct dp_state *dp, bool pc, u32 delay)
{
+ struct nvkm_output_dp *outp = dp->outp;
int ret;
- udelay(delay);
+ if (outp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL])
+ mdelay(outp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL] * 4);
+ else
+ udelay(delay);
- ret = nv_rdaux(dp->aux, DPCD_LS02, dp->stat, 6);
+ ret = nv_rdaux(outp->base.edid, DPCD_LS02, dp->stat, 6);
if (ret)
return ret;
- DBG("status %6ph\n", dp->stat);
+ if (pc) {
+ ret = nv_rdaux(outp->base.edid, DPCD_LS0C, &dp->pc2stat, 1);
+ if (ret)
+ dp->pc2stat = 0x00;
+ DBG("status %6ph pc2 %02x\n", dp->stat, dp->pc2stat);
+ } else {
+ DBG("status %6ph\n", dp->stat);
+ }
+
return 0;
}
@@ -169,8 +204,8 @@ dp_link_train_cr(struct dp_state *dp)
dp_set_training_pattern(dp, 1);
do {
- if (dp_link_train_commit(dp) ||
- dp_link_train_update(dp, 100))
+ if (dp_link_train_commit(dp, false) ||
+ dp_link_train_update(dp, false, 100))
break;
cr_done = true;
@@ -196,13 +231,19 @@ dp_link_train_cr(struct dp_state *dp)
static int
dp_link_train_eq(struct dp_state *dp)
{
+ struct nvkm_output_dp *outp = dp->outp;
bool eq_done = false, cr_done = true;
int tries = 0, i;
- dp_set_training_pattern(dp, 2);
+ if (outp->dpcd[2] & DPCD_RC02_TPS3_SUPPORTED)
+ dp_set_training_pattern(dp, 3);
+ else
+ dp_set_training_pattern(dp, 2);
do {
- if (dp_link_train_update(dp, 400))
+ if ((tries &&
+ dp_link_train_commit(dp, dp->pc2)) ||
+ dp_link_train_update(dp, dp->pc2, 400))
break;
eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
@@ -214,9 +255,6 @@ dp_link_train_eq(struct dp_state *dp)
!(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
eq_done = false;
}
-
- if (dp_link_train_commit(dp))
- break;
} while (!eq_done && cr_done && ++tries <= 5);
return eq_done ? 0 : -1;
@@ -225,121 +263,109 @@ dp_link_train_eq(struct dp_state *dp)
static void
dp_link_train_init(struct dp_state *dp, bool spread)
{
+ struct nvkm_output_dp *outp = dp->outp;
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ struct nouveau_bios *bios = nouveau_bios(disp);
struct nvbios_init init = {
- .subdev = nv_subdev(dp->disp),
- .bios = nouveau_bios(dp->disp),
- .outp = dp->outp,
- .crtc = dp->head,
+ .subdev = nv_subdev(disp),
+ .bios = bios,
+ .outp = &outp->base.info,
+ .crtc = -1,
.execute = 1,
};
/* set desired spread */
if (spread)
- init.offset = dp->info.script[2];
+ init.offset = outp->info.script[2];
else
- init.offset = dp->info.script[3];
+ init.offset = outp->info.script[3];
nvbios_exec(&init);
/* pre-train script */
- init.offset = dp->info.script[0];
+ init.offset = outp->info.script[0];
nvbios_exec(&init);
}
static void
dp_link_train_fini(struct dp_state *dp)
{
+ struct nvkm_output_dp *outp = dp->outp;
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ struct nouveau_bios *bios = nouveau_bios(disp);
struct nvbios_init init = {
- .subdev = nv_subdev(dp->disp),
- .bios = nouveau_bios(dp->disp),
- .outp = dp->outp,
- .crtc = dp->head,
+ .subdev = nv_subdev(disp),
+ .bios = bios,
+ .outp = &outp->base.info,
+ .crtc = -1,
.execute = 1,
};
/* post-train script */
- init.offset = dp->info.script[1],
+ init.offset = outp->info.script[1],
nvbios_exec(&init);
}
-int
-nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
- struct dcb_output *outp, int head, u32 datarate)
+static const struct dp_rates {
+ u32 rate;
+ u8 bw;
+ u8 nr;
+} nouveau_dp_rates[] = {
+ { 2160000, 0x14, 4 },
+ { 1080000, 0x0a, 4 },
+ { 1080000, 0x14, 2 },
+ { 648000, 0x06, 4 },
+ { 540000, 0x0a, 2 },
+ { 540000, 0x14, 1 },
+ { 324000, 0x06, 2 },
+ { 270000, 0x0a, 1 },
+ { 162000, 0x06, 1 },
+ {}
+};
+
+void
+nouveau_dp_train(struct work_struct *w)
{
- struct nouveau_bios *bios = nouveau_bios(disp);
- struct nouveau_i2c *i2c = nouveau_i2c(disp);
+ struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ const struct dp_rates *cfg = nouveau_dp_rates;
struct dp_state _dp = {
- .disp = disp,
- .func = func,
.outp = outp,
- .head = head,
}, *dp = &_dp;
- const u32 bw_list[] = { 540000, 270000, 162000, 0 };
- const u32 *link_bw = bw_list;
- u8 hdr, cnt, len;
- u32 data;
+ u32 datarate = 0;
int ret;
- /* find the bios displayport data relevant to this output */
- data = nvbios_dpout_match(bios, outp->hasht, outp->hashm, &dp->version,
- &hdr, &cnt, &len, &dp->info);
- if (!data) {
- ERR("bios data not found\n");
- return -EINVAL;
- }
-
- /* acquire the aux channel and fetch some info about the display */
- if (outp->location)
- dp->aux = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
- else
- dp->aux = i2c->find(i2c, NV_I2C_TYPE_DCBI2C(outp->i2c_index));
- if (!dp->aux) {
- ERR("no aux channel?!\n");
- return -ENODEV;
- }
-
- ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
- if (ret) {
- /* it's possible the display has been unplugged before we
- * get here. we still need to execute the full set of
- * vbios scripts, and program the OR at a high enough
- * frequency to satisfy the target mode. failure to do
- * so results at best in an UPDATE hanging, and at worst
- * with PDISP running away to join the circus.
- */
- dp->dpcd[1] = link_bw[0] / 27000;
- dp->dpcd[2] = 4;
- dp->dpcd[3] = 0x00;
- ERR("failed to read DPCD\n");
- }
-
/* bring capabilities within encoder limits */
- if ((dp->dpcd[2] & 0x1f) > dp->outp->dpconf.link_nr) {
- dp->dpcd[2] &= ~0x1f;
- dp->dpcd[2] |= dp->outp->dpconf.link_nr;
+ if (nv_mclass(disp) < NVD0_DISP_CLASS)
+ outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
+ if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
+ outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
+ outp->dpcd[2] |= outp->base.info.dpconf.link_nr;
+ }
+ if (outp->dpcd[1] > outp->base.info.dpconf.link_bw)
+ outp->dpcd[1] = outp->base.info.dpconf.link_bw;
+ dp->pc2 = outp->dpcd[2] & DPCD_RC02_TPS3_SUPPORTED;
+
+ /* restrict link config to the lowest required rate, if requested */
+ if (datarate) {
+ datarate = (datarate / 8) * 10; /* 8B/10B coding overhead */
+ while (cfg[1].rate >= datarate)
+ cfg++;
}
- if (dp->dpcd[1] > dp->outp->dpconf.link_bw)
- dp->dpcd[1] = dp->outp->dpconf.link_bw;
+ cfg--;
- /* adjust required bandwidth for 8B/10B coding overhead */
- datarate = (datarate / 8) * 10;
+ /* disable link interrupt handling during link training */
+ nouveau_event_put(outp->irq);
/* enable down-spreading and execute pre-train script from vbios */
- dp_link_train_init(dp, dp->dpcd[3] & 0x01);
+ dp_link_train_init(dp, outp->dpcd[3] & 0x01);
- /* start off at highest link rate supported by encoder and display */
- while (*link_bw > (dp->dpcd[1] * 27000))
- link_bw++;
-
- while ((ret = -EIO) && link_bw[0]) {
- /* find minimum required lane count at this link rate */
- dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
- while ((dp->link_nr >> 1) * link_bw[0] > datarate)
- dp->link_nr >>= 1;
-
- /* drop link rate to minimum with this lane count */
- while ((link_bw[1] * dp->link_nr) > datarate)
- link_bw++;
- dp->link_bw = link_bw[0];
+ while (ret = -EIO, (++cfg)->rate) {
+ /* select next configuration supported by encoder and sink */
+ while (cfg->nr > (outp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT) ||
+ cfg->bw > (outp->dpcd[DPCD_RC01_MAX_LINK_RATE]))
+ cfg++;
+ dp->link_bw = cfg->bw * 27000;
+ dp->link_nr = cfg->nr;
/* program selected link configuration */
ret = dp_set_link_config(dp);
@@ -356,17 +382,18 @@ nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
*/
break;
}
-
- /* retry at lower rate */
- link_bw++;
}
- /* finish link training */
+ /* finish link training and execute post-train script from vbios */
dp_set_training_pattern(dp, 0);
if (ret < 0)
ERR("link training failed\n");
- /* execute post-train script from vbios */
dp_link_train_fini(dp);
- return (ret < 0) ? false : true;
+
+ /* signal completion and enable link interrupt handling */
+ DBG("training complete\n");
+ atomic_set(&outp->lt.done, 1);
+ wake_up(&outp->lt.wait);
+ nouveau_event_get(outp->irq);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
index 0e1bbd18ff6c..5628d2d5ec71 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
@@ -2,19 +2,18 @@
#define __NVKM_DISP_DPORT_H__
/* DPCD Receiver Capabilities */
-#define DPCD_RC00 0x00000
-#define DPCD_RC00_DPCD_REV 0xff
-#define DPCD_RC01 0x00001
-#define DPCD_RC01_MAX_LINK_RATE 0xff
+#define DPCD_RC00_DPCD_REV 0x00000
+#define DPCD_RC01_MAX_LINK_RATE 0x00001
#define DPCD_RC02 0x00002
#define DPCD_RC02_ENHANCED_FRAME_CAP 0x80
+#define DPCD_RC02_TPS3_SUPPORTED 0x40
#define DPCD_RC02_MAX_LANE_COUNT 0x1f
#define DPCD_RC03 0x00003
#define DPCD_RC03_MAX_DOWNSPREAD 0x01
+#define DPCD_RC0E_AUX_RD_INTERVAL 0x0000e
/* DPCD Link Configuration */
-#define DPCD_LC00 0x00100
-#define DPCD_LC00_LINK_BW_SET 0xff
+#define DPCD_LC00_LINK_BW_SET 0x00100
#define DPCD_LC01 0x00101
#define DPCD_LC01_ENHANCED_FRAME_EN 0x80
#define DPCD_LC01_LANE_COUNT_SET 0x1f
@@ -25,6 +24,16 @@
#define DPCD_LC03_PRE_EMPHASIS_SET 0x18
#define DPCD_LC03_MAX_SWING_REACHED 0x04
#define DPCD_LC03_VOLTAGE_SWING_SET 0x03
+#define DPCD_LC0F 0x0010f
+#define DPCD_LC0F_LANE1_MAX_POST_CURSOR2_REACHED 0x40
+#define DPCD_LC0F_LANE1_POST_CURSOR2_SET 0x30
+#define DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED 0x04
+#define DPCD_LC0F_LANE0_POST_CURSOR2_SET 0x03
+#define DPCD_LC10 0x00110
+#define DPCD_LC10_LANE3_MAX_POST_CURSOR2_REACHED 0x40
+#define DPCD_LC10_LANE3_POST_CURSOR2_SET 0x30
+#define DPCD_LC10_LANE2_MAX_POST_CURSOR2_REACHED 0x04
+#define DPCD_LC10_LANE2_POST_CURSOR2_SET 0x03
/* DPCD Link/Sink Status */
#define DPCD_LS02 0x00202
@@ -55,24 +64,12 @@
#define DPCD_LS07_LANE3_VOLTAGE_SWING 0x30
#define DPCD_LS07_LANE2_PRE_EMPHASIS 0x0c
#define DPCD_LS07_LANE2_VOLTAGE_SWING 0x03
+#define DPCD_LS0C 0x0020c
+#define DPCD_LS0C_LANE3_POST_CURSOR2 0xc0
+#define DPCD_LS0C_LANE2_POST_CURSOR2 0x30
+#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
+#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
-struct nouveau_disp;
-struct dcb_output;
-
-struct nouveau_dp_func {
- int (*pattern)(struct nouveau_disp *, struct dcb_output *,
- int head, int pattern);
- int (*lnk_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
- int link_nr, int link_bw, bool enh_frame);
- int (*drv_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
- int lane, int swing, int preem);
-};
-
-extern const struct nouveau_dp_func nv94_sor_dp_func;
-extern const struct nouveau_dp_func nvd0_sor_dp_func;
-extern const struct nouveau_dp_func nv50_pior_dp_func;
-
-int nouveau_dp_train(struct nouveau_disp *, const struct nouveau_dp_func *,
- struct dcb_output *, int, u32);
+void nouveau_dp_train(struct work_struct *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
index cf6f59677b74..9fc7447fec90 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -81,7 +81,6 @@ gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
@@ -94,6 +93,7 @@ gm107_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nve0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 6c89af792889..a32666ed0c47 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -51,6 +51,14 @@ nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
args->hblanke = args->htotal - 1;
+ /*
+ * If output is vga instead of digital then vtotal/htotal is invalid
+ * so we have to give up and trigger the timestamping fallback in the
+ * drm core.
+ */
+ if (!args->vtotal || !args->htotal)
+ return -ENOTSUPP;
+
args->time[0] = ktime_to_ns(ktime_get());
line = nv_rd32(priv, 0x600868 + (head * 0x2000));
args->time[1] = ktime_to_ns(ktime_get());
@@ -78,13 +86,13 @@ nv04_disp_sclass[] = {
******************************************************************************/
static void
-nv04_disp_vblank_enable(struct nouveau_event *event, int head)
+nv04_disp_vblank_enable(struct nouveau_event *event, int type, int head)
{
nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
}
static void
-nv04_disp_vblank_disable(struct nouveau_event *event, int head)
+nv04_disp_vblank_disable(struct nouveau_event *event, int type, int head)
{
nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
}
@@ -98,12 +106,12 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
u32 pvideo;
if (crtc0 & 0x00000001) {
- nouveau_event_trigger(priv->base.vblank, 0);
+ nouveau_event_trigger(priv->base.vblank, 1, 0);
nv_wr32(priv, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
- nouveau_event_trigger(priv->base.vblank, 1);
+ nouveau_event_trigger(priv->base.vblank, 1, 1);
nv_wr32(priv, 0x602100, 0x00000001);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 9a0cab9c3adb..2283c442a10d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -829,13 +829,13 @@ nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
}
static void
-nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
+nv50_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
{
nv_mask(event->priv, 0x61002c, (4 << head), (4 << head));
}
static void
-nv50_disp_base_vblank_disable(struct nouveau_event *event, int head)
+nv50_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
{
nv_mask(event->priv, 0x61002c, (4 << head), 0);
}
@@ -1114,19 +1114,20 @@ nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
}
-static u16
-exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
- struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+static struct nvkm_output *
+exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
+ u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_outp *info)
{
struct nouveau_bios *bios = nouveau_bios(priv);
- u16 mask, type, data;
+ struct nvkm_output *outp;
+ u16 mask, type;
- if (outp < 4) {
+ if (or < 4) {
type = DCB_OUTPUT_ANALOG;
mask = 0;
} else
- if (outp < 8) {
+ if (or < 8) {
switch (ctrl & 0x00000f00) {
case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
@@ -1136,45 +1137,48 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
default:
nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
- return 0x0000;
+ return NULL;
}
- outp -= 4;
+ or -= 4;
} else {
- outp = outp - 8;
+ or = or - 8;
type = 0x0010;
mask = 0;
switch (ctrl & 0x00000f00) {
- case 0x00000000: type |= priv->pior.type[outp]; break;
+ case 0x00000000: type |= priv->pior.type[or]; break;
default:
nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
- return 0x0000;
+ return NULL;
}
}
mask = 0x00c0 & (mask << 6);
- mask |= 0x0001 << outp;
+ mask |= 0x0001 << or;
mask |= 0x0100 << head;
- data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
- if (!data)
- return 0x0000;
-
- /* off-chip encoders require matching the exact encoder type */
- if (dcb->location != 0)
- type |= dcb->extdev << 8;
+ list_for_each_entry(outp, &priv->base.outp, head) {
+ if ((outp->info.hasht & 0xff) == type &&
+ (outp->info.hashm & mask) == mask) {
+ *data = nvbios_outp_match(bios, outp->info.hasht,
+ outp->info.hashm,
+ ver, hdr, cnt, len, info);
+ if (!*data)
+ return NULL;
+ return outp;
+ }
+ }
- return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+ return NULL;
}
-static bool
+static struct nvkm_output *
exec_script(struct nv50_disp_priv *priv, int head, int id)
{
struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
struct nvbios_outp info;
- struct dcb_output dcb;
u8 ver, hdr, cnt, len;
- u16 data;
- u32 ctrl = 0x00000000;
+ u32 data, ctrl = 0;
u32 reg;
int i;
@@ -1204,36 +1208,35 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
}
if (!(ctrl & (1 << head)))
- return false;
+ return NULL;
i--;
- data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
- if (data) {
+ outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
struct nvbios_init init = {
.subdev = nv_subdev(priv),
.bios = bios,
.offset = info.script[id],
- .outp = &dcb,
+ .outp = &outp->info,
.crtc = head,
.execute = 1,
};
- return nvbios_exec(&init) == 0;
+ nvbios_exec(&init);
}
- return false;
+ return outp;
}
-static u32
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
- struct dcb_output *outp)
+static struct nvkm_output *
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
{
struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
struct nvbios_outp info1;
struct nvbios_ocfg info2;
u8 ver, hdr, cnt, len;
- u32 ctrl = 0x00000000;
- u32 data, conf = ~0;
+ u32 data, ctrl = 0;
u32 reg;
int i;
@@ -1263,37 +1266,37 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
}
if (!(ctrl & (1 << head)))
- return conf;
+ return NULL;
i--;
- data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
- if (!data)
- return conf;
+ outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+ if (!outp)
+ return NULL;
- if (outp->location == 0) {
- switch (outp->type) {
+ if (outp->info.location == 0) {
+ switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- conf = (ctrl & 0x00000f00) >> 8;
+ *conf = (ctrl & 0x00000f00) >> 8;
if (pclk >= 165000)
- conf |= 0x0100;
+ *conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- conf = priv->sor.lvdsconf;
+ *conf = priv->sor.lvdsconf;
break;
case DCB_OUTPUT_DP:
- conf = (ctrl & 0x00000f00) >> 8;
+ *conf = (ctrl & 0x00000f00) >> 8;
break;
case DCB_OUTPUT_ANALOG:
default:
- conf = 0x00ff;
+ *conf = 0x00ff;
break;
}
} else {
- conf = (ctrl & 0x00000f00) >> 8;
+ *conf = (ctrl & 0x00000f00) >> 8;
pclk = pclk / 2;
}
- data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
@@ -1301,7 +1304,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
.subdev = nv_subdev(priv),
.bios = bios,
.offset = data,
- .outp = outp,
+ .outp = &outp->info,
.crtc = head,
.execute = 1,
};
@@ -1310,7 +1313,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
}
}
- return conf;
+ return outp;
}
static void
@@ -1322,7 +1325,35 @@ nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
static void
nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
{
- exec_script(priv, head, 2);
+ struct nvkm_output *outp = exec_script(priv, head, 2);
+
+ /* the binary driver does this outside of the supervisor handling
+ * (after the third supervisor from a detach). we (currently?)
+ * allow both detach/attach to happen in the same set of
+ * supervisor interrupts, so it would make sense to execute this
+ * (full power down?) script after all the detach phases of the
+ * supervisor handling. like with training if needed from the
+ * second supervisor, nvidia doesn't do this, so who knows if it's
+ * entirely safe, but it does appear to work..
+ *
+ * without this script being run, on some configurations i've
+ * seen, switching from DP to TMDS on a DP connector may result
+ * in a blank screen (SOR_PWR off/on can restore it)
+ */
+ if (outp && outp->info.type == DCB_OUTPUT_DP) {
+ struct nvkm_output_dp *outpdp = (void *)outp;
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = nouveau_bios(priv),
+ .outp = &outp->info,
+ .crtc = head,
+ .offset = outpdp->info.script[4],
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ atomic_set(&outpdp->lt.done, 0);
+ }
}
static void
@@ -1444,56 +1475,83 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
static void
nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
{
- struct dcb_output outp;
+ struct nvkm_output *outp;
u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
u32 hval, hreg = 0x614200 + (head * 0x800);
u32 oval, oreg;
- u32 mask;
- u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
- if (conf != ~0) {
- if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
- u32 soff = (ffs(outp.or) - 1) * 0x08;
- u32 ctrl = nv_rd32(priv, 0x610794 + soff);
- u32 datarate;
-
- switch ((ctrl & 0x000f0000) >> 16) {
- case 6: datarate = pclk * 30 / 8; break;
- case 5: datarate = pclk * 24 / 8; break;
- case 2:
- default:
- datarate = pclk * 18 / 8;
- break;
- }
+ u32 mask, conf;
- nouveau_dp_train(&priv->base, priv->sor.dp,
- &outp, head, datarate);
- }
+ outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
+ if (!outp)
+ return;
+
+ /* we allow both encoder attach and detach operations to occur
+ * within a single supervisor (ie. modeset) sequence. the
+ * encoder detach scripts quite often switch off power to the
+ * lanes, which requires the link to be re-trained.
+ *
+ * this is not generally an issue as the sink "must" (heh)
+ * signal an irq when it's lost sync so the driver can
+ * re-train.
+ *
+ * however, on some boards, if one does not configure at least
+ * the gpu side of the link *before* attaching, then various
+ * things can go horribly wrong (PDISP disappearing from mmio,
+ * third supervisor never happens, etc).
+ *
+ * the solution is simply to retrain here, if necessary. last
+ * i checked, the binary driver userspace does not appear to
+ * trigger this situation (it forces an UPDATE between steps).
+ */
+ if (outp->info.type == DCB_OUTPUT_DP) {
+ u32 soff = (ffs(outp->info.or) - 1) * 0x08;
+ u32 ctrl, datarate;
- exec_clkcmp(priv, head, 0, pclk, &outp);
-
- if (!outp.location && outp.type == DCB_OUTPUT_ANALOG) {
- oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
- oval = 0x00000000;
- hval = 0x00000000;
- mask = 0xffffffff;
- } else
- if (!outp.location) {
- if (outp.type == DCB_OUTPUT_DP)
- nv50_disp_intr_unk20_2_dp(priv, &outp, pclk);
- oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
- oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
- hval = 0x00000000;
- mask = 0x00000707;
+ if (outp->info.location == 0) {
+ ctrl = nv_rd32(priv, 0x610794 + soff);
+ soff = 1;
} else {
- oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800;
- oval = 0x00000001;
- hval = 0x00000001;
- mask = 0x00000707;
+ ctrl = nv_rd32(priv, 0x610b80 + soff);
+ soff = 2;
+ }
+
+ switch ((ctrl & 0x000f0000) >> 16) {
+ case 6: datarate = pclk * 30; break;
+ case 5: datarate = pclk * 24; break;
+ case 2:
+ default:
+ datarate = pclk * 18;
+ break;
}
- nv_mask(priv, hreg, 0x0000000f, hval);
- nv_mask(priv, oreg, mask, oval);
+ if (nvkm_output_dp_train(outp, datarate / soff, true))
+ ERR("link not trained before attach\n");
}
+
+ exec_clkcmp(priv, head, 0, pclk, &conf);
+
+ if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
+ oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
+ oval = 0x00000000;
+ hval = 0x00000000;
+ mask = 0xffffffff;
+ } else
+ if (!outp->info.location) {
+ if (outp->info.type == DCB_OUTPUT_DP)
+ nv50_disp_intr_unk20_2_dp(priv, &outp->info, pclk);
+ oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
+ oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+ hval = 0x00000000;
+ mask = 0x00000707;
+ } else {
+ oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800;
+ oval = 0x00000001;
+ hval = 0x00000001;
+ mask = 0x00000707;
+ }
+
+ nv_mask(priv, hreg, 0x0000000f, hval);
+ nv_mask(priv, oreg, mask, oval);
}
/* If programming a TMDS output on a SOR that can also be configured for
@@ -1521,30 +1579,16 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp
static void
nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
{
- struct dcb_output outp;
+ struct nvkm_output *outp;
u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
- if (exec_clkcmp(priv, head, 1, pclk, &outp) != ~0) {
- if (outp.location == 0 && outp.type == DCB_OUTPUT_TMDS)
- nv50_disp_intr_unk40_0_tmds(priv, &outp);
- else
- if (outp.location == 1 && outp.type == DCB_OUTPUT_DP) {
- u32 soff = (ffs(outp.or) - 1) * 0x08;
- u32 ctrl = nv_rd32(priv, 0x610b84 + soff);
- u32 datarate;
-
- switch ((ctrl & 0x000f0000) >> 16) {
- case 6: datarate = pclk * 30 / 8; break;
- case 5: datarate = pclk * 24 / 8; break;
- case 2:
- default:
- datarate = pclk * 18 / 8;
- break;
- }
+ u32 conf;
- nouveau_dp_train(&priv->base, priv->pior.dp,
- &outp, head, datarate);
- }
- }
+ outp = exec_clkcmp(priv, head, 1, pclk, &conf);
+ if (!outp)
+ return;
+
+ if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
+ nv50_disp_intr_unk40_0_tmds(priv, &outp->info);
}
void
@@ -1610,13 +1654,13 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
}
if (intr1 & 0x00000004) {
- nouveau_event_trigger(priv->base.vblank, 0);
+ nouveau_event_trigger(priv->base.vblank, 1, 0);
nv_wr32(priv, 0x610024, 0x00000004);
intr1 &= ~0x00000004;
}
if (intr1 & 0x00000008) {
- nouveau_event_trigger(priv->base.vblank, 1);
+ nouveau_event_trigger(priv->base.vblank, 1, 1);
nv_wr32(priv, 0x610024, 0x00000008);
intr1 &= ~0x00000008;
}
@@ -1656,11 +1700,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->pior.power = nv50_pior_power;
- priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
struct nouveau_oclass *
+nv50_disp_outp_sclass[] = {
+ &nv50_pior_dp_impl.base.base,
+ NULL
+};
+
+struct nouveau_oclass *
nv50_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x50),
.base.base.ofuncs = &(struct nouveau_ofuncs) {
@@ -1669,6 +1718,7 @@ nv50_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.outp = nv50_disp_outp_sclass,
.mthd.core = &nv50_disp_mast_mthd_chan,
.mthd.base = &nv50_disp_sync_mthd_chan,
.mthd.ovly = &nv50_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 48d59db47f0d..1a886472b6f5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -11,6 +11,8 @@
#include "dport.h"
#include "priv.h"
+#include "outp.h"
+#include "outpdp.h"
struct nv50_disp_impl {
struct nouveau_disp_impl base;
@@ -43,13 +45,11 @@ struct nv50_disp_priv {
int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
u32 lvdsconf;
- const struct nouveau_dp_func *dp;
} sor;
struct {
int nr;
int (*power)(struct nv50_disp_priv *, int ext, u32 data);
u8 type[3];
- const struct nouveau_dp_func *dp;
} pior;
};
@@ -199,4 +199,14 @@ void nvd0_disp_intr(struct nouveau_subdev *);
extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
+extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
+extern struct nouveau_oclass *nv50_disp_outp_sclass[];
+
+extern struct nvkm_output_dp_impl nv94_sor_dp_impl;
+int nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
+extern struct nouveau_oclass *nv94_disp_outp_sclass[];
+
+extern struct nvkm_output_dp_impl nvd0_sor_dp_impl;
+extern struct nouveau_oclass *nvd0_disp_outp_sclass[];
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index 98c5b19bc2b0..1cc62e434683 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -264,7 +264,6 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
priv->pior.power = nv50_pior_power;
- priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
@@ -277,6 +276,7 @@ nv84_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.outp = nv50_disp_outp_sclass,
.mthd.core = &nv84_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index 6844061c7e04..4f718a9f5aef 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -77,6 +77,7 @@ nv94_disp_base_omthds[] = {
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
{ PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
@@ -122,13 +123,18 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
- priv->sor.dp = &nv94_sor_dp_func;
priv->pior.power = nv50_pior_power;
- priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
struct nouveau_oclass *
+nv94_disp_outp_sclass[] = {
+ &nv50_pior_dp_impl.base.base,
+ &nv94_sor_dp_impl.base.base,
+ NULL
+};
+
+struct nouveau_oclass *
nv94_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x88),
.base.base.ofuncs = &(struct nouveau_ofuncs) {
@@ -137,6 +143,7 @@ nv94_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.outp = nv94_disp_outp_sclass,
.mthd.core = &nv94_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 88c96241c02a..6237a9a36f70 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -126,7 +126,6 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hdmi = nv84_hdmi_ctrl;
priv->pior.power = nv50_pior_power;
- priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
@@ -139,6 +138,7 @@ nva0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.outp = nv50_disp_outp_sclass,
.mthd.core = &nv84_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nva0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 46cb2ce0e82a..019124d4782b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -50,6 +50,7 @@ nva3_disp_base_omthds[] = {
{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
{ PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
@@ -96,9 +97,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nva3_hda_eld;
priv->sor.hdmi = nva3_hdmi_ctrl;
- priv->sor.dp = &nv94_sor_dp_func;
priv->pior.power = nv50_pior_power;
- priv->pior.dp = &nv50_pior_dp_func;
return 0;
}
@@ -111,6 +110,7 @@ nva3_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.outp = nv94_disp_outp_sclass,
.mthd.core = &nv94_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 876de9ac3793..fa30d8196f35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -748,13 +748,13 @@ nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
}
static void
-nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
+nvd0_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
{
nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
}
static void
-nvd0_disp_base_vblank_disable(struct nouveau_event *event, int head)
+nvd0_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
{
nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
}
@@ -887,6 +887,7 @@ nvd0_disp_base_omthds[] = {
{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
{ DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
{ PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
@@ -915,19 +916,20 @@ nvd0_disp_sclass[] = {
* Display engine implementation
******************************************************************************/
-static u16
-exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
- struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+static struct nvkm_output *
+exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
+ u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_outp *info)
{
struct nouveau_bios *bios = nouveau_bios(priv);
- u16 mask, type, data;
+ struct nvkm_output *outp;
+ u16 mask, type;
- if (outp < 4) {
+ if (or < 4) {
type = DCB_OUTPUT_ANALOG;
mask = 0;
} else {
- outp -= 4;
+ or -= 4;
switch (ctrl & 0x00000f00) {
case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
@@ -939,101 +941,106 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
return 0x0000;
}
- dcb->sorconf.link = mask;
}
mask = 0x00c0 & (mask << 6);
- mask |= 0x0001 << outp;
+ mask |= 0x0001 << or;
mask |= 0x0100 << head;
- data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
- if (!data)
- return 0x0000;
+ list_for_each_entry(outp, &priv->base.outp, head) {
+ if ((outp->info.hasht & 0xff) == type &&
+ (outp->info.hashm & mask) == mask) {
+ *data = nvbios_outp_match(bios, outp->info.hasht,
+ outp->info.hashm,
+ ver, hdr, cnt, len, info);
+ if (!*data)
+ return NULL;
+ return outp;
+ }
+ }
- return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+ return NULL;
}
-static bool
+static struct nvkm_output *
exec_script(struct nv50_disp_priv *priv, int head, int id)
{
struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
struct nvbios_outp info;
- struct dcb_output dcb;
u8 ver, hdr, cnt, len;
- u32 ctrl = 0x00000000;
- u16 data;
- int outp;
+ u32 data, ctrl = 0;
+ int or;
- for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
- ctrl = nv_rd32(priv, 0x640180 + (outp * 0x20));
+ for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+ ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
if (ctrl & (1 << head))
break;
}
- if (outp == 8)
- return false;
+ if (or == 8)
+ return NULL;
- data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
- if (data) {
+ outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
struct nvbios_init init = {
.subdev = nv_subdev(priv),
.bios = bios,
.offset = info.script[id],
- .outp = &dcb,
+ .outp = &outp->info,
.crtc = head,
.execute = 1,
};
- return nvbios_exec(&init) == 0;
+ nvbios_exec(&init);
}
- return false;
+ return outp;
}
-static u32
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
- u32 pclk, struct dcb_output *dcb)
+static struct nvkm_output *
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
{
struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvkm_output *outp;
struct nvbios_outp info1;
struct nvbios_ocfg info2;
u8 ver, hdr, cnt, len;
- u32 ctrl = 0x00000000;
- u32 data, conf = ~0;
- int outp;
+ u32 data, ctrl = 0;
+ int or;
- for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
- ctrl = nv_rd32(priv, 0x660180 + (outp * 0x20));
+ for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+ ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
if (ctrl & (1 << head))
break;
}
- if (outp == 8)
- return conf;
+ if (or == 8)
+ return NULL;
- data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
- if (data == 0x0000)
- return conf;
+ outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+ if (!outp)
+ return NULL;
- switch (dcb->type) {
+ switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- conf = (ctrl & 0x00000f00) >> 8;
+ *conf = (ctrl & 0x00000f00) >> 8;
if (pclk >= 165000)
- conf |= 0x0100;
+ *conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- conf = priv->sor.lvdsconf;
+ *conf = priv->sor.lvdsconf;
break;
case DCB_OUTPUT_DP:
- conf = (ctrl & 0x00000f00) >> 8;
+ *conf = (ctrl & 0x00000f00) >> 8;
break;
case DCB_OUTPUT_ANALOG:
default:
- conf = 0x00ff;
+ *conf = 0x00ff;
break;
}
- data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
@@ -1041,7 +1048,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
.subdev = nv_subdev(priv),
.bios = bios,
.offset = data,
- .outp = dcb,
+ .outp = &outp->info,
.crtc = head,
.execute = 1,
};
@@ -1050,7 +1057,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
}
}
- return conf;
+ return outp;
}
static void
@@ -1062,7 +1069,23 @@ nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
static void
nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
{
- exec_script(priv, head, 2);
+ struct nvkm_output *outp = exec_script(priv, head, 2);
+
+ /* see note in nv50_disp_intr_unk20_0() */
+ if (outp && outp->info.type == DCB_OUTPUT_DP) {
+ struct nvkm_output_dp *outpdp = (void *)outp;
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = nouveau_bios(priv),
+ .outp = &outp->info,
+ .crtc = head,
+ .offset = outpdp->info.script[4],
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ atomic_set(&outpdp->lt.done, 0);
+ }
}
static void
@@ -1124,49 +1147,52 @@ nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
static void
nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
{
- struct dcb_output outp;
+ struct nvkm_output *outp;
u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
- u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
- if (conf != ~0) {
- u32 addr, data;
-
- if (outp.type == DCB_OUTPUT_DP) {
- u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
- switch ((sync & 0x000003c0) >> 6) {
- case 6: pclk = pclk * 30 / 8; break;
- case 5: pclk = pclk * 24 / 8; break;
- case 2:
- default:
- pclk = pclk * 18 / 8;
- break;
- }
-
- nouveau_dp_train(&priv->base, priv->sor.dp,
- &outp, head, pclk);
+ u32 conf, addr, data;
+
+ outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
+ if (!outp)
+ return;
+
+ /* see note in nv50_disp_intr_unk20_2() */
+ if (outp->info.type == DCB_OUTPUT_DP) {
+ u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
+ switch ((sync & 0x000003c0) >> 6) {
+ case 6: pclk = pclk * 30; break;
+ case 5: pclk = pclk * 24; break;
+ case 2:
+ default:
+ pclk = pclk * 18;
+ break;
}
- exec_clkcmp(priv, head, 0, pclk, &outp);
+ if (nvkm_output_dp_train(outp, pclk, true))
+ ERR("link not trained before attach\n");
+ }
- if (outp.type == DCB_OUTPUT_ANALOG) {
- addr = 0x612280 + (ffs(outp.or) - 1) * 0x800;
- data = 0x00000000;
- } else {
- if (outp.type == DCB_OUTPUT_DP)
- nvd0_disp_intr_unk2_2_tu(priv, head, &outp);
- addr = 0x612300 + (ffs(outp.or) - 1) * 0x800;
- data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
- }
+ exec_clkcmp(priv, head, 0, pclk, &conf);
- nv_mask(priv, addr, 0x00000707, data);
+ if (outp->info.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
+ data = 0x00000000;
+ } else {
+ if (outp->info.type == DCB_OUTPUT_DP)
+ nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
+ addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
+ data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
}
+
+ nv_mask(priv, addr, 0x00000707, data);
}
static void
nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
{
- struct dcb_output outp;
u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
- exec_clkcmp(priv, head, 1, pclk, &outp);
+ u32 conf;
+
+ exec_clkcmp(priv, head, 1, pclk, &conf);
}
void
@@ -1240,7 +1266,7 @@ nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
chid, (mthd & 0x0000ffc), data, mthd, unkn);
if (chid == 0) {
- switch (mthd) {
+ switch (mthd & 0xffc) {
case 0x0080:
nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
impl->mthd.core);
@@ -1250,7 +1276,7 @@ nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
}
} else
if (chid <= 4) {
- switch (mthd) {
+ switch (mthd & 0xffc) {
case 0x0080:
nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
impl->mthd.base);
@@ -1260,7 +1286,7 @@ nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
}
} else
if (chid <= 8) {
- switch (mthd) {
+ switch (mthd & 0xffc) {
case 0x0080:
nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
impl->mthd.ovly);
@@ -1317,7 +1343,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
if (stat & 0x00000001)
- nouveau_event_trigger(priv->base.vblank, i);
+ nouveau_event_trigger(priv->base.vblank, 1, i);
nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
nv_rd32(priv, 0x6100c0 + (i * 0x800));
}
@@ -1352,11 +1378,16 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
struct nouveau_oclass *
+nvd0_disp_outp_sclass[] = {
+ &nvd0_sor_dp_impl.base.base,
+ NULL
+};
+
+struct nouveau_oclass *
nvd0_disp_oclass = &(struct nv50_disp_impl) {
.base.base.handle = NV_ENGINE(DISP, 0x90),
.base.base.ofuncs = &(struct nouveau_ofuncs) {
@@ -1365,6 +1396,7 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nvd0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nvd0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 44e0b8f34c1a..11328e3f5df1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -246,7 +246,6 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
@@ -259,6 +258,7 @@ nve0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nve0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 482585d375fa..104388081d73 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -81,7 +81,6 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
- priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
@@ -94,6 +93,7 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nve0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
new file mode 100644
index 000000000000..ad9ba7ccec7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+
+#include "outp.h"
+
+int
+_nvkm_output_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvkm_output *outp = (void *)object;
+ nv_ofuncs(outp->conn)->fini(nv_object(outp->conn), suspend);
+ return nouveau_object_fini(&outp->base, suspend);
+}
+
+int
+_nvkm_output_init(struct nouveau_object *object)
+{
+ struct nvkm_output *outp = (void *)object;
+ int ret = nouveau_object_init(&outp->base);
+ if (ret == 0)
+ nv_ofuncs(outp->conn)->init(nv_object(outp->conn));
+ return 0;
+}
+
+void
+_nvkm_output_dtor(struct nouveau_object *object)
+{
+ struct nvkm_output *outp = (void *)object;
+ list_del(&outp->head);
+ nouveau_object_ref(NULL, (void *)&outp->conn);
+ nouveau_object_destroy(&outp->base);
+}
+
+int
+nvkm_output_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ struct dcb_output *dcbE, int index,
+ int length, void **pobject)
+{
+ struct nouveau_bios *bios = nouveau_bios(engine);
+ struct nouveau_i2c *i2c = nouveau_i2c(parent);
+ struct nouveau_disp *disp = (void *)engine;
+ struct nvbios_connE connE;
+ struct nvkm_output *outp;
+ u8 ver, hdr;
+ u32 data;
+ int ret;
+
+ ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
+ outp = *pobject;
+ if (ret)
+ return ret;
+
+ outp->info = *dcbE;
+ outp->index = index;
+
+ DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
+ dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
+ dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
+ dcbE->bus, dcbE->heads);
+
+ outp->port = i2c->find(i2c, outp->info.i2c_index);
+ outp->edid = outp->port;
+
+ data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
+ if (!data) {
+ DBG("vbios connector data not found\n");
+ memset(&connE, 0x00, sizeof(connE));
+ connE.type = DCB_CONNECTOR_NONE;
+ }
+
+ ret = nouveau_object_ctor(parent, engine, nvkm_connector_oclass,
+ &connE, outp->info.connector,
+ (struct nouveau_object **)&outp->conn);
+ if (ret < 0) {
+ ERR("error %d creating connector, disabling\n", ret);
+ return ret;
+ }
+
+ list_add_tail(&outp->head, &disp->outp);
+ return 0;
+}
+
+int
+_nvkm_output_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *dcbE, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nvkm_output *outp;
+ int ret;
+
+ ret = nvkm_output_create(parent, engine, oclass, dcbE, index, &outp);
+ *pobject = nv_object(outp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass *
+nvkm_output_oclass = &(struct nvkm_output_impl) {
+ .base = {
+ .handle = 0,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_output_ctor,
+ .dtor = _nvkm_output_dtor,
+ .init = _nvkm_output_init,
+ .fini = _nvkm_output_fini,
+ },
+ },
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
new file mode 100644
index 000000000000..bc76fbf85710
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
@@ -0,0 +1,59 @@
+#ifndef __NVKM_DISP_OUTP_H__
+#define __NVKM_DISP_OUTP_H__
+
+#include "priv.h"
+
+struct nvkm_output {
+ struct nouveau_object base;
+ struct list_head head;
+
+ struct dcb_output info;
+ int index;
+
+ struct nouveau_i2c_port *port;
+ struct nouveau_i2c_port *edid;
+
+ struct nvkm_connector *conn;
+};
+
+#define nvkm_output_create(p,e,c,b,i,d) \
+ nvkm_output_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
+#define nvkm_output_destroy(d) ({ \
+ struct nvkm_output *_outp = (d); \
+ _nvkm_output_dtor(nv_object(_outp)); \
+})
+#define nvkm_output_init(d) ({ \
+ struct nvkm_output *_outp = (d); \
+ _nvkm_output_init(nv_object(_outp)); \
+})
+#define nvkm_output_fini(d,s) ({ \
+ struct nvkm_output *_outp = (d); \
+ _nvkm_output_fini(nv_object(_outp), (s)); \
+})
+
+int nvkm_output_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, struct dcb_output *,
+ int, int, void **);
+
+int _nvkm_output_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void _nvkm_output_dtor(struct nouveau_object *);
+int _nvkm_output_init(struct nouveau_object *);
+int _nvkm_output_fini(struct nouveau_object *, bool);
+
+struct nvkm_output_impl {
+ struct nouveau_oclass base;
+};
+
+#ifndef MSG
+#define MSG(l,f,a...) do { \
+ struct nvkm_output *_outp = (void *)outp; \
+ nv_##l(nv_object(outp)->engine, "%02x:%04x:%04x: "f, _outp->index, \
+ _outp->info.hasht, _outp->info.hashm, ##a); \
+} while(0)
+#define DBG(f,a...) MSG(debug, f, ##a)
+#define ERR(f,a...) MSG(error, f, ##a)
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
new file mode 100644
index 000000000000..eb2d7789555d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+
+#include "outpdp.h"
+#include "conn.h"
+#include "dport.h"
+
+int
+nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
+{
+ struct nvkm_output_dp *outp = (void *)base;
+ bool retrain = true;
+ u8 link[2], stat[3];
+ u32 linkrate;
+ int ret, i;
+
+ /* check that the link is trained at a high enough rate */
+ ret = nv_rdaux(outp->base.edid, DPCD_LC00_LINK_BW_SET, link, 2);
+ if (ret) {
+ DBG("failed to read link config, assuming no sink\n");
+ goto done;
+ }
+
+ linkrate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
+ linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
+ datarate = (datarate + 9) / 10; /* -> decakilobits */
+ if (linkrate < datarate) {
+ DBG("link not trained at sufficient rate\n");
+ goto done;
+ }
+
+ /* check that link is still trained */
+ ret = nv_rdaux(outp->base.edid, DPCD_LS02, stat, 3);
+ if (ret) {
+ DBG("failed to read link status, assuming no sink\n");
+ goto done;
+ }
+
+ if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
+ for (i = 0; i < (link[1] & DPCD_LC01_LANE_COUNT_SET); i++) {
+ u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
+ if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
+ !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
+ !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
+ DBG("lane %d not equalised\n", lane);
+ goto done;
+ }
+ }
+ retrain = false;
+ } else {
+ DBG("no inter-lane alignment\n");
+ }
+
+done:
+ if (retrain || !atomic_read(&outp->lt.done)) {
+ /* no sink, but still need to configure source */
+ if (outp->dpcd[DPCD_RC00_DPCD_REV] == 0x00) {
+ outp->dpcd[DPCD_RC01_MAX_LINK_RATE] =
+ outp->base.info.dpconf.link_bw;
+ outp->dpcd[DPCD_RC02] =
+ outp->base.info.dpconf.link_nr;
+ }
+ atomic_set(&outp->lt.done, 0);
+ schedule_work(&outp->lt.work);
+ } else {
+ nouveau_event_get(outp->irq);
+ }
+
+ if (wait) {
+ if (!wait_event_timeout(outp->lt.wait,
+ atomic_read(&outp->lt.done),
+ msecs_to_jiffies(2000)))
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static void
+nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool present)
+{
+ struct nouveau_i2c_port *port = outp->base.edid;
+ if (present) {
+ if (!outp->present) {
+ nouveau_i2c(port)->acquire_pad(port, 0);
+ DBG("aux power -> always\n");
+ outp->present = true;
+ }
+ nvkm_output_dp_train(&outp->base, 0, true);
+ } else {
+ if (outp->present) {
+ nouveau_i2c(port)->release_pad(port);
+ DBG("aux power -> demand\n");
+ outp->present = false;
+ }
+ atomic_set(&outp->lt.done, 0);
+ }
+}
+
+static void
+nvkm_output_dp_detect(struct nvkm_output_dp *outp)
+{
+ struct nouveau_i2c_port *port = outp->base.edid;
+ int ret = nouveau_i2c(port)->acquire_pad(port, 0);
+ if (ret == 0) {
+ ret = nv_rdaux(outp->base.edid, DPCD_RC00_DPCD_REV,
+ outp->dpcd, sizeof(outp->dpcd));
+ nvkm_output_dp_enable(outp, ret == 0);
+ nouveau_i2c(port)->release_pad(port);
+ }
+}
+
+static void
+nvkm_output_dp_service_work(struct work_struct *work)
+{
+ struct nvkm_output_dp *outp = container_of(work, typeof(*outp), work);
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ int type = atomic_xchg(&outp->pending, 0);
+ u32 send = 0;
+
+ if (type & (NVKM_I2C_PLUG | NVKM_I2C_UNPLUG)) {
+ nvkm_output_dp_detect(outp);
+ if (type & NVKM_I2C_UNPLUG)
+ send |= NVKM_HPD_UNPLUG;
+ if (type & NVKM_I2C_PLUG)
+ send |= NVKM_HPD_PLUG;
+ nouveau_event_get(outp->base.conn->hpd.event);
+ }
+
+ if (type & NVKM_I2C_IRQ) {
+ nvkm_output_dp_train(&outp->base, 0, true);
+ send |= NVKM_HPD_IRQ;
+ }
+
+ nouveau_event_trigger(disp->hpd, send, outp->base.info.connector);
+}
+
+static int
+nvkm_output_dp_service(void *data, u32 type, int index)
+{
+ struct nvkm_output_dp *outp = data;
+ DBG("HPD: %d\n", type);
+ atomic_or(type, &outp->pending);
+ schedule_work(&outp->work);
+ return NVKM_EVENT_DROP;
+}
+
+int
+_nvkm_output_dp_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvkm_output_dp *outp = (void *)object;
+ nouveau_event_put(outp->irq);
+ nvkm_output_dp_enable(outp, false);
+ return nvkm_output_fini(&outp->base, suspend);
+}
+
+int
+_nvkm_output_dp_init(struct nouveau_object *object)
+{
+ struct nvkm_output_dp *outp = (void *)object;
+ nvkm_output_dp_detect(outp);
+ return nvkm_output_init(&outp->base);
+}
+
+void
+_nvkm_output_dp_dtor(struct nouveau_object *object)
+{
+ struct nvkm_output_dp *outp = (void *)object;
+ nouveau_event_ref(NULL, &outp->irq);
+ nvkm_output_destroy(&outp->base);
+}
+
+int
+nvkm_output_dp_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ struct dcb_output *info, int index,
+ int length, void **pobject)
+{
+ struct nouveau_bios *bios = nouveau_bios(parent);
+ struct nouveau_i2c *i2c = nouveau_i2c(parent);
+ struct nvkm_output_dp *outp;
+ u8 hdr, cnt, len;
+ u32 data;
+ int ret;
+
+ ret = nvkm_output_create_(parent, engine, oclass, info, index,
+ length, pobject);
+ outp = *pobject;
+ if (ret)
+ return ret;
+
+ nouveau_event_ref(NULL, &outp->base.conn->hpd.event);
+
+ /* access to the aux channel is not optional... */
+ if (!outp->base.edid) {
+ ERR("aux channel not found\n");
+ return -ENODEV;
+ }
+
+ /* nor is the bios data for this output... */
+ data = nvbios_dpout_match(bios, outp->base.info.hasht,
+ outp->base.info.hashm, &outp->version,
+ &hdr, &cnt, &len, &outp->info);
+ if (!data) {
+ ERR("no bios dp data\n");
+ return -ENODEV;
+ }
+
+ DBG("bios dp %02x %02x %02x %02x\n", outp->version, hdr, cnt, len);
+
+ /* link training */
+ INIT_WORK(&outp->lt.work, nouveau_dp_train);
+ init_waitqueue_head(&outp->lt.wait);
+ atomic_set(&outp->lt.done, 0);
+
+ /* link maintenance */
+ ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_IRQ, outp->base.edid->index,
+ nvkm_output_dp_service, outp, &outp->irq);
+ if (ret) {
+ ERR("error monitoring aux irq event: %d\n", ret);
+ return ret;
+ }
+
+ INIT_WORK(&outp->work, nvkm_output_dp_service_work);
+
+ /* hotplug detect, replaces gpio-based mechanism with aux events */
+ ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
+ outp->base.edid->index,
+ nvkm_output_dp_service, outp,
+ &outp->base.conn->hpd.event);
+ if (ret) {
+ ERR("error monitoring aux hpd events: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+_nvkm_output_dp_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *info, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nvkm_output_dp *outp;
+ int ret;
+
+ ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
+ *pobject = nv_object(outp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
new file mode 100644
index 000000000000..ff33ba12cb67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
@@ -0,0 +1,65 @@
+#ifndef __NVKM_DISP_OUTP_DP_H__
+#define __NVKM_DISP_OUTP_DP_H__
+
+#include <subdev/bios.h>
+#include <subdev/bios/dp.h>
+
+#include "outp.h"
+
+struct nvkm_output_dp {
+ struct nvkm_output base;
+
+ struct nvbios_dpout info;
+ u8 version;
+
+ struct nouveau_eventh *irq;
+ struct nouveau_eventh *hpd;
+ struct work_struct work;
+ atomic_t pending;
+ bool present;
+ u8 dpcd[16];
+
+ struct {
+ struct work_struct work;
+ wait_queue_head_t wait;
+ atomic_t done;
+ } lt;
+};
+
+#define nvkm_output_dp_create(p,e,c,b,i,d) \
+ nvkm_output_dp_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
+#define nvkm_output_dp_destroy(d) ({ \
+ struct nvkm_output_dp *_outp = (d); \
+ _nvkm_output_dp_dtor(nv_object(_outp)); \
+})
+#define nvkm_output_dp_init(d) ({ \
+ struct nvkm_output_dp *_outp = (d); \
+ _nvkm_output_dp_init(nv_object(_outp)); \
+})
+#define nvkm_output_dp_fini(d,s) ({ \
+ struct nvkm_output_dp *_outp = (d); \
+ _nvkm_output_dp_fini(nv_object(_outp), (s)); \
+})
+
+int nvkm_output_dp_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, struct dcb_output *,
+ int, int, void **);
+
+int _nvkm_output_dp_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void _nvkm_output_dp_dtor(struct nouveau_object *);
+int _nvkm_output_dp_init(struct nouveau_object *);
+int _nvkm_output_dp_fini(struct nouveau_object *, bool);
+
+struct nvkm_output_dp_impl {
+ struct nvkm_output_impl base;
+ int (*pattern)(struct nvkm_output_dp *, int);
+ int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
+ int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
+ int (*drv_ctl)(struct nvkm_output_dp *, int ln, int vs, int pe, int pc);
+};
+
+int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
index 2c8ce351b52d..fe0f256f11bf 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -33,68 +33,107 @@
#include "nv50.h"
/******************************************************************************
- * DisplayPort
+ * TMDS
*****************************************************************************/
-static struct nouveau_i2c_port *
-nv50_pior_dp_find(struct nouveau_disp *disp, struct dcb_output *outp)
+
+static int
+nv50_pior_tmds_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *info, u32 index,
+ struct nouveau_object **pobject)
{
- struct nouveau_i2c *i2c = nouveau_i2c(disp);
- return i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
+ struct nouveau_i2c *i2c = nouveau_i2c(parent);
+ struct nvkm_output *outp;
+ int ret;
+
+ ret = nvkm_output_create(parent, engine, oclass, info, index, &outp);
+ *pobject = nv_object(outp);
+ if (ret)
+ return ret;
+
+ outp->edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(outp->info.extdev));
+ return 0;
}
+struct nvkm_output_impl
+nv50_pior_tmds_impl = {
+ .base.handle = DCB_OUTPUT_TMDS | 0x0100,
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_pior_tmds_ctor,
+ .dtor = _nvkm_output_dtor,
+ .init = _nvkm_output_init,
+ .fini = _nvkm_output_fini,
+ },
+};
+
+/******************************************************************************
+ * DisplayPort
+ *****************************************************************************/
+
static int
-nv50_pior_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
- int head, int pattern)
+nv50_pior_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nouveau_i2c_port *port;
- int ret = -EINVAL;
-
- port = nv50_pior_dp_find(disp, outp);
- if (port) {
- if (port->func->pattern)
- ret = port->func->pattern(port, pattern);
- else
- ret = 0;
- }
-
- return ret;
+ struct nouveau_i2c_port *port = outp->base.edid;
+ if (port && port->func->pattern)
+ return port->func->pattern(port, pattern);
+ return port ? 0 : -ENODEV;
}
static int
-nv50_pior_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
- int head, int lane_nr, int link_bw, bool enh)
+nv50_pior_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
- struct nouveau_i2c_port *port;
- int ret = -EINVAL;
+ return 0;
+}
- port = nv50_pior_dp_find(disp, outp);
+static int
+nv50_pior_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
+{
+ struct nouveau_i2c_port *port = outp->base.edid;
if (port && port->func->lnk_ctl)
- ret = port->func->lnk_ctl(port, lane_nr, link_bw, enh);
+ return port->func->lnk_ctl(port, nr, bw, ef);
+ return port ? 0 : -ENODEV;
+}
- return ret;
+static int
+nv50_pior_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
+{
+ struct nouveau_i2c_port *port = outp->base.edid;
+ if (port && port->func->drv_ctl)
+ return port->func->drv_ctl(port, ln, vs, pe);
+ return port ? 0 : -ENODEV;
}
static int
-nv50_pior_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
- int head, int lane, int vsw, int pre)
+nv50_pior_dp_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *info, u32 index,
+ struct nouveau_object **pobject)
{
- struct nouveau_i2c_port *port;
- int ret = -EINVAL;
-
- port = nv50_pior_dp_find(disp, outp);
- if (port) {
- if (port->func->drv_ctl)
- ret = port->func->drv_ctl(port, lane, vsw, pre);
- else
- ret = 0;
- }
+ struct nouveau_i2c *i2c = nouveau_i2c(parent);
+ struct nvkm_output_dp *outp;
+ int ret;
- return ret;
+ ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
+ *pobject = nv_object(outp);
+ if (ret)
+ return ret;
+
+ outp->base.edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(
+ outp->base.info.extdev));
+ return 0;
}
-const struct nouveau_dp_func
-nv50_pior_dp_func = {
+struct nvkm_output_dp_impl
+nv50_pior_dp_impl = {
+ .base.base.handle = DCB_OUTPUT_DP | 0x0010,
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_pior_dp_ctor,
+ .dtor = _nvkm_output_dp_dtor,
+ .init = _nvkm_output_dp_init,
+ .fini = _nvkm_output_dp_fini,
+ },
.pattern = nv50_pior_dp_pattern,
+ .lnk_pwr = nv50_pior_dp_lnk_pwr,
.lnk_ctl = nv50_pior_dp_lnk_ctl,
.drv_ctl = nv50_pior_dp_drv_ctl,
};
@@ -102,6 +141,7 @@ nv50_pior_dp_func = {
/******************************************************************************
* General PIOR handling
*****************************************************************************/
+
int
nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data)
{
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
index cc3c7a4ca747..26e9a42569c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
@@ -1,10 +1,42 @@
#ifndef __NVKM_DISP_PRIV_H__
#define __NVKM_DISP_PRIV_H__
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/conn.h>
+
#include <engine/disp.h>
struct nouveau_disp_impl {
struct nouveau_oclass base;
+ struct nouveau_oclass **outp;
+ struct nouveau_oclass **conn;
};
+#define nouveau_disp_create(p,e,c,h,i,x,d) \
+ nouveau_disp_create_((p), (e), (c), (h), (i), (x), \
+ sizeof(**d), (void **)d)
+#define nouveau_disp_destroy(d) ({ \
+ struct nouveau_disp *disp = (d); \
+ _nouveau_disp_dtor(nv_object(disp)); \
+})
+#define nouveau_disp_init(d) ({ \
+ struct nouveau_disp *disp = (d); \
+ _nouveau_disp_init(nv_object(disp)); \
+})
+#define nouveau_disp_fini(d,s) ({ \
+ struct nouveau_disp *disp = (d); \
+ _nouveau_disp_fini(nv_object(disp), (s)); \
+})
+
+int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int heads,
+ const char *, const char *, int, void **);
+void _nouveau_disp_dtor(struct nouveau_object *);
+int _nouveau_disp_init(struct nouveau_object *);
+int _nouveau_disp_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass *nvkm_output_oclass;
+extern struct nouveau_oclass *nvkm_connector_oclass;
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index 526b75242899..7a1ebdfa9e1b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -47,8 +47,12 @@ int
nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
{
struct nv50_disp_priv *priv = (void *)object->engine;
+ const u8 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
+ const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
+ const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
+ struct nvkm_output *outp = NULL, *temp;
u32 data;
int ret = -EINVAL;
@@ -56,6 +60,13 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
return -EINVAL;
data = *(u32 *)args;
+ list_for_each_entry(temp, &priv->base.outp, head) {
+ if ((temp->info.hasht & 0xff) == type &&
+ (temp->info.hashm & mask) == mask) {
+ outp = temp;
+ break;
+ }
+ }
switch (mthd & ~0x3f) {
case NV50_DISP_SOR_PWR:
@@ -71,6 +82,24 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
ret = 0;
break;
+ case NV94_DISP_SOR_DP_PWR:
+ if (outp) {
+ struct nvkm_output_dp *outpdp = (void *)outp;
+ switch (data) {
+ case NV94_DISP_SOR_DP_PWR_STATE_OFF:
+ nouveau_event_put(outpdp->irq);
+ ((struct nvkm_output_dp_impl *)nv_oclass(outp))
+ ->lnk_pwr(outpdp, 0);
+ atomic_set(&outpdp->lt.done, 0);
+ break;
+ case NV94_DISP_SOR_DP_PWR_STATE_ON:
+ nvkm_output_dp_train(&outpdp->base, 0, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ break;
default:
BUG_ON(1);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index eea3ef59693d..05487cda84a8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -29,19 +29,21 @@
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
#include <subdev/bios/init.h>
+#include <subdev/timer.h>
#include "nv50.h"
+#include "outpdp.h"
static inline u32
-nv94_sor_soff(struct dcb_output *outp)
+nv94_sor_soff(struct nvkm_output_dp *outp)
{
- return (ffs(outp->or) - 1) * 0x800;
+ return (ffs(outp->base.info.or) - 1) * 0x800;
}
static inline u32
-nv94_sor_loff(struct dcb_output *outp)
+nv94_sor_loff(struct nvkm_output_dp *outp)
{
- return nv94_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
+ return nv94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
}
static inline u32
@@ -55,77 +57,96 @@ nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
}
static int
-nv94_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
- int head, int pattern)
+nv94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nv50_disp_priv *priv = (void *)disp;
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
const u32 loff = nv94_sor_loff(outp);
nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
return 0;
}
+int
+nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+{
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ const u32 soff = nv94_sor_soff(outp);
+ const u32 loff = nv94_sor_loff(outp);
+ u32 mask = 0, i;
+
+ for (i = 0; i < nr; i++)
+ mask |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
+ nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
+ nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+ return 0;
+}
+
static int
-nv94_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
- int head, int link_nr, int link_bw, bool enh_frame)
+nv94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
{
- struct nv50_disp_priv *priv = (void *)disp;
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
const u32 soff = nv94_sor_soff(outp);
const u32 loff = nv94_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
- u32 lane = 0;
- int i;
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enh_frame)
+ dpctrl |= ((1 << nr) - 1) << 16;
+ if (ef)
dpctrl |= 0x00004000;
- if (link_bw > 0x06)
+ if (bw > 0x06)
clksor |= 0x00040000;
- for (i = 0; i < link_nr; i++)
- lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
-
nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
- nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
return 0;
}
static int
-nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
- int head, int lane, int swing, int preem)
+nv94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
{
- struct nouveau_bios *bios = nouveau_bios(disp);
- struct nv50_disp_priv *priv = (void *)disp;
- const u32 shift = nv94_sor_dp_lane_map(priv, lane);
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 shift = nv94_sor_dp_lane_map(priv, ln);
const u32 loff = nv94_sor_loff(outp);
u32 addr, data[3];
u8 ver, hdr, cnt, len;
struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
- addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+ addr = nvbios_dpout_match(bios, outp->base.info.hasht,
+ outp->base.info.hashm,
&ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
- addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+ addr = nvbios_dpcfg_match(bios, addr, 0, vs, pe,
&ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
- data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
- nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
- nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
- nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
+ data[2] = nv_rd32(priv, 0x61c130 + loff);
+ if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
+ data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
+ nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+ nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+ nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
return 0;
}
-const struct nouveau_dp_func
-nv94_sor_dp_func = {
+struct nvkm_output_dp_impl
+nv94_sor_dp_impl = {
+ .base.base.handle = DCB_OUTPUT_DP,
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_output_dp_ctor,
+ .dtor = _nvkm_output_dp_dtor,
+ .init = _nvkm_output_dp_init,
+ .fini = _nvkm_output_dp_fini,
+ },
.pattern = nv94_sor_dp_pattern,
+ .lnk_pwr = nv94_sor_dp_lnk_pwr,
.lnk_ctl = nv94_sor_dp_lnk_ctl,
.drv_ctl = nv94_sor_dp_drv_ctl,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index d2df572f16a3..97f0e9cd3d40 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -29,19 +29,20 @@
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
#include <subdev/bios/init.h>
+#include <subdev/timer.h>
#include "nv50.h"
static inline u32
-nvd0_sor_soff(struct dcb_output *outp)
+nvd0_sor_soff(struct nvkm_output_dp *outp)
{
- return (ffs(outp->or) - 1) * 0x800;
+ return (ffs(outp->base.info.or) - 1) * 0x800;
}
static inline u32
-nvd0_sor_loff(struct dcb_output *outp)
+nvd0_sor_loff(struct nvkm_output_dp *outp)
{
- return nvd0_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
+ return nvd0_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
}
static inline u32
@@ -52,77 +53,80 @@ nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
}
static int
-nvd0_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
- int head, int pattern)
+nvd0_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nv50_disp_priv *priv = (void *)disp;
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
const u32 loff = nvd0_sor_loff(outp);
nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
static int
-nvd0_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
- int head, int link_nr, int link_bw, bool enh_frame)
+nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
{
- struct nv50_disp_priv *priv = (void *)disp;
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
const u32 soff = nvd0_sor_soff(outp);
const u32 loff = nvd0_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
- u32 lane = 0;
- int i;
- clksor |= link_bw << 18;
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enh_frame)
+ clksor |= bw << 18;
+ dpctrl |= ((1 << nr) - 1) << 16;
+ if (ef)
dpctrl |= 0x00004000;
- for (i = 0; i < link_nr; i++)
- lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
-
nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
- nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
return 0;
}
static int
-nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
- int head, int lane, int swing, int preem)
+nvd0_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
{
- struct nouveau_bios *bios = nouveau_bios(disp);
- struct nv50_disp_priv *priv = (void *)disp;
- const u32 shift = nvd0_sor_dp_lane_map(priv, lane);
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 shift = nvd0_sor_dp_lane_map(priv, ln);
const u32 loff = nvd0_sor_loff(outp);
- u32 addr, data[3];
+ u32 addr, data[4];
u8 ver, hdr, cnt, len;
struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
- addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+ addr = nvbios_dpout_match(bios, outp->base.info.hasht,
+ outp->base.info.hashm,
&ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
- addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+ addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
&ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
- data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
- nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
- nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
- nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
- nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
+ data[2] = nv_rd32(priv, 0x61c130 + loff);
+ if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
+ data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
+ nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+ nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+ nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
+ data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
+ nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
return 0;
}
-const struct nouveau_dp_func
-nvd0_sor_dp_func = {
+struct nvkm_output_dp_impl
+nvd0_sor_dp_impl = {
+ .base.base.handle = DCB_OUTPUT_DP,
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_output_dp_ctor,
+ .dtor = _nvkm_output_dp_dtor,
+ .init = _nvkm_output_dp_init,
+ .fini = _nvkm_output_dp_fini,
+ },
.pattern = nvd0_sor_dp_pattern,
+ .lnk_pwr = nv94_sor_dp_lnk_pwr,
.lnk_ctl = nvd0_sor_dp_lnk_ctl,
.drv_ctl = nvd0_sor_dp_drv_ctl,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index 6f9041ced9a2..56ed3d73bf8e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -91,7 +91,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
if (!chan->user)
return -EFAULT;
- nouveau_event_trigger(priv->cevent, 0);
+ nouveau_event_trigger(priv->cevent, 1, 0);
chan->size = size;
return 0;
@@ -194,11 +194,11 @@ nouveau_fifo_create_(struct nouveau_object *parent,
if (!priv->channel)
return -ENOMEM;
- ret = nouveau_event_create(1, &priv->cevent);
+ ret = nouveau_event_create(1, 1, &priv->cevent);
if (ret)
return ret;
- ret = nouveau_event_create(1, &priv->uevent);
+ ret = nouveau_event_create(1, 1, &priv->uevent);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c
new file mode 100644
index 000000000000..327456eae963
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/gk20a.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nve0.h"
+
+struct nouveau_oclass *
+gk20a_fifo_oclass = &(struct nve0_fifo_impl) {
+ .base.handle = NV_ENGINE(FIFO, 0xea),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_fifo_ctor,
+ .dtor = nve0_fifo_dtor,
+ .init = nve0_fifo_init,
+ .fini = nve0_fifo_fini,
+ },
+ .channels = 128,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 54f26cc801c7..c61b16a63884 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -539,7 +539,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (status & 0x40000000) {
- nouveau_event_trigger(priv->base.uevent, 0);
+ nouveau_event_trigger(priv->base.uevent, 1, 0);
nv_wr32(priv, 0x002100, 0x40000000);
status &= ~0x40000000;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index fe0f41e65d9b..6e5ac16e5460 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -389,14 +389,14 @@ nv84_fifo_cclass = {
******************************************************************************/
static void
-nv84_fifo_uevent_enable(struct nouveau_event *event, int index)
+nv84_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
{
struct nv84_fifo_priv *priv = event->priv;
nv_mask(priv, 0x002140, 0x40000000, 0x40000000);
}
static void
-nv84_fifo_uevent_disable(struct nouveau_event *event, int index)
+nv84_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
{
struct nv84_fifo_priv *priv = event->priv;
nv_mask(priv, 0x002140, 0x40000000, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index fa1e719872b7..ae4a4dc5642a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -730,7 +730,7 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
for (unkn = 0; unkn < 8; unkn++) {
u32 ints = (intr >> (unkn * 0x04)) & inte;
if (ints & 0x1) {
- nouveau_event_trigger(priv->base.uevent, 0);
+ nouveau_event_trigger(priv->base.uevent, 1, 0);
ints &= ~1;
}
if (ints) {
@@ -827,14 +827,14 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
static void
-nvc0_fifo_uevent_enable(struct nouveau_event *event, int index)
+nvc0_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
{
struct nvc0_fifo_priv *priv = event->priv;
nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
}
static void
-nvc0_fifo_uevent_disable(struct nouveau_event *event, int index)
+nvc0_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
{
struct nvc0_fifo_priv *priv = event->priv;
nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index a9a1a9c9f9f2..298063edb92d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -859,7 +859,7 @@ nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
static void
nve0_fifo_intr_engine(struct nve0_fifo_priv *priv)
{
- nouveau_event_trigger(priv->base.uevent, 0);
+ nouveau_event_trigger(priv->base.uevent, 1, 0);
}
static void
@@ -952,14 +952,14 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
static void
-nve0_fifo_uevent_enable(struct nouveau_event *event, int index)
+nve0_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
{
struct nve0_fifo_priv *priv = event->priv;
nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
}
static void
-nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
+nve0_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
{
struct nve0_fifo_priv *priv = event->priv;
nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
index 014344ebee66..e96b32bb1bbc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
@@ -8,6 +8,7 @@ int nve0_fifo_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_object **);
void nve0_fifo_dtor(struct nouveau_object *);
int nve0_fifo_init(struct nouveau_object *);
+int nve0_fifo_fini(struct nouveau_object *, bool);
struct nve0_fifo_impl {
struct nouveau_oclass base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
new file mode 100644
index 000000000000..224ee0287ab7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ctxnvc0.h"
+
+static const struct nvc0_graph_pack
+gk20a_grctx_pack_mthd[] = {
+ { nve4_grctx_init_a097_0, 0xa297 },
+ { nvc0_grctx_init_902d_0, 0x902d },
+ {}
+};
+
+struct nouveau_oclass *
+gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0xea),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_context_ctor,
+ .dtor = nvc0_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = _nouveau_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+ .main = nve4_grctx_generate_main,
+ .mods = nve4_grctx_generate_mods,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = nve4_grctx_pack_hub,
+ .gpc = nve4_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = nve4_grctx_pack_tpc,
+ .ppc = nve4_grctx_pack_ppc,
+ .icmd = nve4_grctx_pack_icmd,
+ .mthd = gk20a_grctx_pack_mthd,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
index 48351b4d6d6b..8de4a4291548 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -545,10 +545,12 @@ nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
mmio_list(0x408010, 0x80000000, 0, 0);
mmio_list(0x419004, 0x00000000, 8, 1);
mmio_list(0x419008, 0x00000000, 0, 0);
+ mmio_list(0x4064cc, 0x80000000, 0, 0);
mmio_list(0x408004, 0x00000000, 8, 0);
mmio_list(0x408008, 0x80000030, 0, 0);
mmio_list(0x418808, 0x00000000, 8, 0);
mmio_list(0x41880c, 0x80000030, 0, 0);
+ mmio_list(0x4064c8, 0x00c20200, 0, 0);
mmio_list(0x418810, 0x80000000, 12, 2);
mmio_list(0x419848, 0x10000000, 12, 2);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
index 9c815d1f99ef..8da8b627b9d0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
@@ -69,7 +69,9 @@ extern struct nouveau_oclass *nvd7_grctx_oclass;
extern struct nouveau_oclass *nvd9_grctx_oclass;
extern struct nouveau_oclass *nve4_grctx_oclass;
+extern struct nouveau_oclass *gk20a_grctx_oclass;
void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nve4_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *);
@@ -151,6 +153,13 @@ extern const struct nvc0_graph_init nve4_grctx_init_gpm_0[];
extern const struct nvc0_graph_init nve4_grctx_init_pes_0[];
+extern const struct nvc0_graph_pack nve4_grctx_pack_hub[];
+extern const struct nvc0_graph_pack nve4_grctx_pack_gpc[];
+extern const struct nvc0_graph_pack nve4_grctx_pack_tpc[];
+extern const struct nvc0_graph_pack nve4_grctx_pack_ppc[];
+extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[];
+extern const struct nvc0_graph_init nve4_grctx_init_a097_0[];
+
extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[];
extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
index 49a14b116a5f..c5b249238587 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
@@ -272,13 +272,13 @@ nve4_grctx_init_icmd_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nve4_grctx_pack_icmd[] = {
{ nve4_grctx_init_icmd_0 },
{}
};
-static const struct nvc0_graph_init
+const struct nvc0_graph_init
nve4_grctx_init_a097_0[] = {
{ 0x000800, 8, 0x40, 0x00000000 },
{ 0x000804, 8, 0x40, 0x00000000 },
@@ -697,7 +697,7 @@ nve4_grctx_init_be_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nve4_grctx_pack_hub[] = {
{ nvc0_grctx_init_main_0 },
{ nve4_grctx_init_fe_0 },
@@ -737,7 +737,7 @@ nve4_grctx_init_gpm_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nve4_grctx_pack_gpc[] = {
{ nvc0_grctx_init_gpc_unk_0 },
{ nvd9_grctx_init_prop_0 },
@@ -802,7 +802,7 @@ nve4_grctx_init_sm_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nve4_grctx_pack_tpc[] = {
{ nvd7_grctx_init_pe_0 },
{ nve4_grctx_init_tex_0 },
@@ -826,7 +826,7 @@ nve4_grctx_init_cbm_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nve4_grctx_pack_ppc[] = {
{ nve4_grctx_init_pes_0 },
{ nve4_grctx_init_cbm_0 },
@@ -838,7 +838,7 @@ nve4_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
+void
nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
u32 magic[GPC_MAX][2];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index 0fab95e49f53..dec03f04114d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -842,7 +842,7 @@ nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
u16 magic3 = 0x0648;
magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * (priv->tpc_nr[gpc] - 1);;
+ offset += 0x0324 * (priv->tpc_nr[gpc] - 1);
magic[gpc][2] = 0x10000000 | (magic2 << 16) | offset;
magic[gpc][3] = 0x00000000 | (magic3 << 16);
offset += 0x0324;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
index 2f7345f7fe07..7445f12b1d9e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
@@ -54,7 +54,7 @@ mmio_list_base:
#ifdef INCLUDE_CODE
// reports an exception to the host
//
-// In: $r15 error code (see nvc0.fuc)
+// In: $r15 error code (see os.h)
//
error:
push $r14
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
index c8ddb8d71b91..b4ad18bf5a26 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
@@ -49,7 +49,7 @@ hub_mmio_list_next:
#ifdef INCLUDE_CODE
// reports an exception to the host
//
-// In: $r15 error code (see nvc0.fuc)
+// In: $r15 error code (see os.h)
//
error:
nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), 0, $r15)
@@ -343,13 +343,25 @@ ih:
ih_no_ctxsw:
and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
bra e #ih_no_fwmthd
- // none we handle, ack, and fall-through to unhandled
+ // none we handle; report to host and ack
+ nv_rd32($r15, NV_PGRAPH_TRAPPED_DATA_LO)
+ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(4), 0, $r15)
+ nv_rd32($r15, NV_PGRAPH_TRAPPED_ADDR)
+ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(3), 0, $r15)
+ extr $r14 $r15 16:18
+ shl b32 $r14 $r14 2
+ imm32($r15, NV_PGRAPH_FE_OBJECT_TABLE(0))
+ add b32 $r14 $r15
+ call(nv_rd32)
+ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(2), 0, $r15)
+ mov $r15 E_BAD_FWMTHD
+ call(error)
mov $r11 0x100
nv_wr32(0x400144, $r11)
// anything we didn't handle, bring it to the host's attention
ih_no_fwmthd:
- mov $r11 0x104 // FIFO | CHSW
+ mov $r11 0x504 // FIFO | CHSW | FWMTHD
not b32 $r11
and $r11 $r10 $r11
bra e #ih_no_other
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
index 214dd16ec566..5f953c5c20b7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
@@ -478,10 +478,10 @@ uint32_t gm107_grhub_code[] = {
0x01040080,
0xbd0001f6,
0x01004104,
- 0x627e020f,
- 0x717e0006,
+ 0xa87e020f,
+ 0xb77e0006,
0x100f0006,
- 0x0006b37e,
+ 0x0006f97e,
0x98000e98,
0x207e010f,
0x14950001,
@@ -523,8 +523,8 @@ uint32_t gm107_grhub_code[] = {
0x800040b7,
0xf40132b6,
0x000fb41b,
- 0x0006b37e,
- 0x627e000f,
+ 0x0006f97e,
+ 0xa87e000f,
0x00800006,
0x01f60201,
0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t gm107_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0231f401,
- 0x0008367e,
+ 0x00087c7e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -563,7 +563,7 @@ uint32_t gm107_grhub_code[] = {
0x37008006,
0x0009f602,
0x31f404bd,
- 0x08367e01,
+ 0x087c7e01,
0xf094bd00,
0x00800699,
0x09f60217,
@@ -572,7 +572,7 @@ uint32_t gm107_grhub_code[] = {
0x20f92f0e,
0x32f412b2,
0x0232f401,
- 0x0008367e,
+ 0x00087c7e,
0x008020fc,
0x02f602c0,
0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t gm107_grhub_code[] = {
0x23c8130e,
0x0d0bf41f,
0xf40131f4,
- 0x367e0232,
+ 0x7c7e0232,
/* 0x054e: chsw_done */
0x01020008,
0x02c30080,
@@ -593,7 +593,7 @@ uint32_t gm107_grhub_code[] = {
0xb0ff2a0e,
0x1bf401e4,
0x7ef2b20c,
- 0xf40007d6,
+ 0xf400081c,
/* 0x057a: main_not_ctx_chan */
0xe4b0400e,
0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t gm107_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0232f401,
- 0x0008367e,
+ 0x00087c7e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -642,238 +642,238 @@ uint32_t gm107_grhub_code[] = {
/* 0x061a: ih_no_ctxsw */
0xabe40000,
0x0bf40400,
- 0x01004b10,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x062e: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd01,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x0642: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x0662: ctx_4170s */
- 0xf5f001f8,
- 0x8effb210,
- 0x7e404170,
- 0xf800008f,
-/* 0x0671: ctx_4170w */
- 0x41708e00,
+ 0x07088e56,
0x00657e40,
- 0xf0ffb200,
- 0x1bf410f4,
-/* 0x0683: ctx_redswitch */
- 0x4e00f8f3,
- 0xe5f00200,
- 0x20e5f040,
- 0x8010e5f0,
- 0xf6018500,
- 0x04bd000e,
-/* 0x069a: ctx_redswitch_delay */
- 0xf2b6080f,
- 0xfd1bf401,
- 0x0400e5f1,
- 0x0100e5f1,
- 0x01850080,
- 0xbd000ef6,
-/* 0x06b3: ctx_86c */
- 0x8000f804,
- 0xf6022300,
+ 0x80ffb200,
+ 0xf6020400,
0x04bd000f,
- 0x148effb2,
- 0x8f7e408a,
- 0xffb20000,
- 0x41a88c8e,
+ 0x4007048e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60203,
+ 0xc704bd00,
+ 0xee9450fe,
+ 0x07008f02,
+ 0x00efbb40,
+ 0x0000657e,
+ 0x02020080,
+ 0xbd000ff6,
+ 0x7e030f04,
+ 0x4b0002f8,
+ 0xbfb20100,
+ 0x4001448e,
0x00008f7e,
-/* 0x06d2: ctx_mem */
- 0x008000f8,
- 0x0ff60284,
-/* 0x06db: ctx_mem_wait */
- 0x8f04bd00,
- 0xcf028400,
- 0xfffd00ff,
- 0xf61bf405,
-/* 0x06ea: ctx_load */
- 0x94bd00f8,
- 0x800599f0,
- 0xf6023700,
- 0x04bd0009,
- 0xb87e0c0a,
- 0xf4bd0000,
- 0x02890080,
+/* 0x0674: ih_no_fwmthd */
+ 0xbd05044b,
+ 0xb4abffb0,
+ 0x800c0bf4,
+ 0xf6030700,
+ 0x04bd000b,
+/* 0x0688: ih_no_other */
+ 0xf6010040,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x06a8: ctx_4170s */
+ 0xb210f5f0,
+ 0x41708eff,
+ 0x008f7e40,
+/* 0x06b7: ctx_4170w */
+ 0x8e00f800,
+ 0x7e404170,
+ 0xb2000065,
+ 0x10f4f0ff,
+ 0xf8f31bf4,
+/* 0x06c9: ctx_redswitch */
+ 0x02004e00,
+ 0xf040e5f0,
+ 0xe5f020e5,
+ 0x85008010,
+ 0x000ef601,
+ 0x080f04bd,
+/* 0x06e0: ctx_redswitch_delay */
+ 0xf401f2b6,
+ 0xe5f1fd1b,
+ 0xe5f10400,
+ 0x00800100,
+ 0x0ef60185,
+ 0xf804bd00,
+/* 0x06f9: ctx_86c */
+ 0x23008000,
+ 0x000ff602,
+ 0xffb204bd,
+ 0x408a148e,
+ 0x00008f7e,
+ 0x8c8effb2,
+ 0x8f7e41a8,
+ 0x00f80000,
+/* 0x0718: ctx_mem */
+ 0x02840080,
0xbd000ff6,
- 0xc1008004,
- 0x0002f602,
- 0x008004bd,
- 0x02f60283,
- 0x0f04bd00,
- 0x06d27e07,
- 0xc0008000,
- 0x0002f602,
- 0x0bfe04bd,
- 0x1f2af000,
- 0xb60424b6,
- 0x94bd0220,
- 0x800899f0,
- 0xf6023700,
- 0x04bd0009,
- 0x02810080,
- 0xbd0002f6,
- 0x0000d204,
- 0x25f08000,
- 0x88008002,
- 0x0002f602,
- 0x100104bd,
- 0xf0020042,
- 0x12fa0223,
- 0xbd03f805,
- 0x0899f094,
- 0x02170080,
- 0xbd0009f6,
- 0x81019804,
- 0x981814b6,
- 0x25b68002,
- 0x0512fd08,
- 0xbd1601b5,
- 0x0999f094,
- 0x02370080,
- 0xbd0009f6,
- 0x81008004,
- 0x0001f602,
- 0x010204bd,
- 0x02880080,
+/* 0x0721: ctx_mem_wait */
+ 0x84008f04,
+ 0x00ffcf02,
+ 0xf405fffd,
+ 0x00f8f61b,
+/* 0x0730: ctx_load */
+ 0x99f094bd,
+ 0x37008005,
+ 0x0009f602,
+ 0x0c0a04bd,
+ 0x0000b87e,
+ 0x0080f4bd,
+ 0x0ff60289,
+ 0x8004bd00,
+ 0xf602c100,
+ 0x04bd0002,
+ 0x02830080,
0xbd0002f6,
- 0x01004104,
- 0xfa0613f0,
- 0x03f80501,
+ 0x7e070f04,
+ 0x80000718,
+ 0xf602c000,
+ 0x04bd0002,
+ 0xf0000bfe,
+ 0x24b61f2a,
+ 0x0220b604,
0x99f094bd,
- 0x17008009,
+ 0x37008008,
0x0009f602,
- 0x94bd04bd,
- 0x800599f0,
+ 0x008004bd,
+ 0x02f60281,
+ 0xd204bd00,
+ 0x80000000,
+ 0x800225f0,
+ 0xf6028800,
+ 0x04bd0002,
+ 0x00421001,
+ 0x0223f002,
+ 0xf80512fa,
+ 0xf094bd03,
+ 0x00800899,
+ 0x09f60217,
+ 0x9804bd00,
+ 0x14b68101,
+ 0x80029818,
+ 0xfd0825b6,
+ 0x01b50512,
+ 0xf094bd16,
+ 0x00800999,
+ 0x09f60237,
+ 0x8004bd00,
+ 0xf6028100,
+ 0x04bd0001,
+ 0x00800102,
+ 0x02f60288,
+ 0x4104bd00,
+ 0x13f00100,
+ 0x0501fa06,
+ 0x94bd03f8,
+ 0x800999f0,
0xf6021700,
0x04bd0009,
-/* 0x07d6: ctx_chan */
- 0xea7e00f8,
- 0x0c0a0006,
- 0x0000b87e,
- 0xd27e050f,
- 0x00f80006,
-/* 0x07e8: ctx_mmio_exec */
- 0x80410398,
+ 0x99f094bd,
+ 0x17008005,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x081c: ctx_chan */
+ 0x0007307e,
+ 0xb87e0c0a,
+ 0x050f0000,
+ 0x0007187e,
+/* 0x082e: ctx_mmio_exec */
+ 0x039800f8,
+ 0x81008041,
+ 0x0003f602,
+ 0x34bd04bd,
+/* 0x083c: ctx_mmio_loop */
+ 0xf4ff34c4,
+ 0x00450e1b,
+ 0x0653f002,
+ 0xf80535fa,
+/* 0x084d: ctx_mmio_pull */
+ 0x804e9803,
+ 0x7e814f98,
+ 0xb600008f,
+ 0x12b60830,
+ 0xdf1bf401,
+/* 0x0860: ctx_mmio_done */
+ 0x80160398,
0xf6028100,
0x04bd0003,
-/* 0x07f6: ctx_mmio_loop */
- 0x34c434bd,
- 0x0e1bf4ff,
- 0xf0020045,
- 0x35fa0653,
-/* 0x0807: ctx_mmio_pull */
- 0x9803f805,
- 0x4f98804e,
- 0x008f7e81,
- 0x0830b600,
- 0xf40112b6,
-/* 0x081a: ctx_mmio_done */
- 0x0398df1b,
- 0x81008016,
- 0x0003f602,
- 0x00b504bd,
- 0x01004140,
- 0xfa0613f0,
- 0x03f80601,
-/* 0x0836: ctx_xfer */
- 0x040e00f8,
- 0x03020080,
- 0xbd000ef6,
-/* 0x0841: ctx_xfer_idle */
- 0x00008e04,
- 0x00eecf03,
- 0x2000e4f1,
- 0xf4f51bf4,
- 0x02f40611,
-/* 0x0855: ctx_xfer_pre */
- 0x7e100f0c,
- 0xf40006b3,
-/* 0x085e: ctx_xfer_pre_load */
- 0x020f1b11,
- 0x0006627e,
- 0x0006717e,
- 0x0006837e,
- 0x627ef4bd,
- 0xea7e0006,
-/* 0x0876: ctx_xfer_exec */
- 0x01980006,
- 0x8024bd16,
- 0xf6010500,
- 0x04bd0002,
- 0x008e1fb2,
- 0x8f7e41a5,
- 0xfcf00000,
- 0x022cf001,
- 0xfd0124b6,
- 0xffb205f2,
- 0x41a5048e,
+ 0x414000b5,
+ 0x13f00100,
+ 0x0601fa06,
+ 0x00f803f8,
+/* 0x087c: ctx_xfer */
+ 0x0080040e,
+ 0x0ef60302,
+/* 0x0887: ctx_xfer_idle */
+ 0x8e04bd00,
+ 0xcf030000,
+ 0xe4f100ee,
+ 0x1bf42000,
+ 0x0611f4f5,
+/* 0x089b: ctx_xfer_pre */
+ 0x0f0c02f4,
+ 0x06f97e10,
+ 0x1b11f400,
+/* 0x08a4: ctx_xfer_pre_load */
+ 0xa87e020f,
+ 0xb77e0006,
+ 0xc97e0006,
+ 0xf4bd0006,
+ 0x0006a87e,
+ 0x0007307e,
+/* 0x08bc: ctx_xfer_exec */
+ 0xbd160198,
+ 0x05008024,
+ 0x0002f601,
+ 0x1fb204bd,
+ 0x41a5008e,
0x00008f7e,
- 0x0002167e,
- 0xfc8024bd,
- 0x02f60247,
- 0xf004bd00,
- 0x20b6012c,
- 0x4afc8003,
- 0x0002f602,
- 0xacf004bd,
- 0x06a5f001,
- 0x0c98000b,
- 0x010d9800,
- 0x3d7e000e,
- 0x080a0001,
- 0x0000ec7e,
- 0x00020a7e,
- 0x0a1201f4,
- 0x00b87e0c,
- 0x7e050f00,
- 0xf40006d2,
-/* 0x08f2: ctx_xfer_post */
- 0x020f2d02,
- 0x0006627e,
- 0xb37ef4bd,
- 0x277e0006,
- 0x717e0002,
+ 0xf001fcf0,
+ 0x24b6022c,
+ 0x05f2fd01,
+ 0x048effb2,
+ 0x8f7e41a5,
+ 0x167e0000,
+ 0x24bd0002,
+ 0x0247fc80,
+ 0xbd0002f6,
+ 0x012cf004,
+ 0x800320b6,
+ 0xf6024afc,
+ 0x04bd0002,
+ 0xf001acf0,
+ 0x000b06a5,
+ 0x98000c98,
+ 0x000e010d,
+ 0x00013d7e,
+ 0xec7e080a,
+ 0x0a7e0000,
+ 0x01f40002,
+ 0x7e0c0a12,
+ 0x0f0000b8,
+ 0x07187e05,
+ 0x2d02f400,
+/* 0x0938: ctx_xfer_post */
+ 0xa87e020f,
0xf4bd0006,
- 0x0006627e,
- 0x981011f4,
- 0x11fd4001,
- 0x070bf405,
- 0x0007e87e,
-/* 0x091c: ctx_xfer_no_post_mmio */
-/* 0x091c: ctx_xfer_done */
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x0006f97e,
+ 0x0002277e,
+ 0x0006b77e,
+ 0xa87ef4bd,
+ 0x11f40006,
+ 0x40019810,
+ 0xf40511fd,
+ 0x2e7e070b,
+/* 0x0962: ctx_xfer_no_post_mmio */
+/* 0x0962: ctx_xfer_done */
+ 0x00f80008,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
index 64dfd75192bf..e49b5a877ae4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
@@ -478,10 +478,10 @@ uint32_t nv108_grhub_code[] = {
0x01040080,
0xbd0001f6,
0x01004104,
- 0x627e020f,
- 0x717e0006,
+ 0xa87e020f,
+ 0xb77e0006,
0x100f0006,
- 0x0006b37e,
+ 0x0006f97e,
0x98000e98,
0x207e010f,
0x14950001,
@@ -523,8 +523,8 @@ uint32_t nv108_grhub_code[] = {
0x800040b7,
0xf40132b6,
0x000fb41b,
- 0x0006b37e,
- 0x627e000f,
+ 0x0006f97e,
+ 0xa87e000f,
0x00800006,
0x01f60201,
0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t nv108_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0231f401,
- 0x0008367e,
+ 0x00087c7e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -563,7 +563,7 @@ uint32_t nv108_grhub_code[] = {
0x37008006,
0x0009f602,
0x31f404bd,
- 0x08367e01,
+ 0x087c7e01,
0xf094bd00,
0x00800699,
0x09f60217,
@@ -572,7 +572,7 @@ uint32_t nv108_grhub_code[] = {
0x20f92f0e,
0x32f412b2,
0x0232f401,
- 0x0008367e,
+ 0x00087c7e,
0x008020fc,
0x02f602c0,
0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t nv108_grhub_code[] = {
0x23c8130e,
0x0d0bf41f,
0xf40131f4,
- 0x367e0232,
+ 0x7c7e0232,
/* 0x054e: chsw_done */
0x01020008,
0x02c30080,
@@ -593,7 +593,7 @@ uint32_t nv108_grhub_code[] = {
0xb0ff2a0e,
0x1bf401e4,
0x7ef2b20c,
- 0xf40007d6,
+ 0xf400081c,
/* 0x057a: main_not_ctx_chan */
0xe4b0400e,
0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t nv108_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0232f401,
- 0x0008367e,
+ 0x00087c7e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -642,238 +642,238 @@ uint32_t nv108_grhub_code[] = {
/* 0x061a: ih_no_ctxsw */
0xabe40000,
0x0bf40400,
- 0x01004b10,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x062e: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd01,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x0642: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x0662: ctx_4170s */
- 0xf5f001f8,
- 0x8effb210,
- 0x7e404170,
- 0xf800008f,
-/* 0x0671: ctx_4170w */
- 0x41708e00,
+ 0x07088e56,
0x00657e40,
- 0xf0ffb200,
- 0x1bf410f4,
-/* 0x0683: ctx_redswitch */
- 0x4e00f8f3,
- 0xe5f00200,
- 0x20e5f040,
- 0x8010e5f0,
- 0xf6018500,
- 0x04bd000e,
-/* 0x069a: ctx_redswitch_delay */
- 0xf2b6080f,
- 0xfd1bf401,
- 0x0400e5f1,
- 0x0100e5f1,
- 0x01850080,
- 0xbd000ef6,
-/* 0x06b3: ctx_86c */
- 0x8000f804,
- 0xf6022300,
+ 0x80ffb200,
+ 0xf6020400,
0x04bd000f,
- 0x148effb2,
- 0x8f7e408a,
- 0xffb20000,
- 0x41a88c8e,
+ 0x4007048e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60203,
+ 0xc704bd00,
+ 0xee9450fe,
+ 0x07008f02,
+ 0x00efbb40,
+ 0x0000657e,
+ 0x02020080,
+ 0xbd000ff6,
+ 0x7e030f04,
+ 0x4b0002f8,
+ 0xbfb20100,
+ 0x4001448e,
0x00008f7e,
-/* 0x06d2: ctx_mem */
- 0x008000f8,
- 0x0ff60284,
-/* 0x06db: ctx_mem_wait */
- 0x8f04bd00,
- 0xcf028400,
- 0xfffd00ff,
- 0xf61bf405,
-/* 0x06ea: ctx_load */
- 0x94bd00f8,
- 0x800599f0,
- 0xf6023700,
- 0x04bd0009,
- 0xb87e0c0a,
- 0xf4bd0000,
- 0x02890080,
+/* 0x0674: ih_no_fwmthd */
+ 0xbd05044b,
+ 0xb4abffb0,
+ 0x800c0bf4,
+ 0xf6030700,
+ 0x04bd000b,
+/* 0x0688: ih_no_other */
+ 0xf6010040,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x06a8: ctx_4170s */
+ 0xb210f5f0,
+ 0x41708eff,
+ 0x008f7e40,
+/* 0x06b7: ctx_4170w */
+ 0x8e00f800,
+ 0x7e404170,
+ 0xb2000065,
+ 0x10f4f0ff,
+ 0xf8f31bf4,
+/* 0x06c9: ctx_redswitch */
+ 0x02004e00,
+ 0xf040e5f0,
+ 0xe5f020e5,
+ 0x85008010,
+ 0x000ef601,
+ 0x080f04bd,
+/* 0x06e0: ctx_redswitch_delay */
+ 0xf401f2b6,
+ 0xe5f1fd1b,
+ 0xe5f10400,
+ 0x00800100,
+ 0x0ef60185,
+ 0xf804bd00,
+/* 0x06f9: ctx_86c */
+ 0x23008000,
+ 0x000ff602,
+ 0xffb204bd,
+ 0x408a148e,
+ 0x00008f7e,
+ 0x8c8effb2,
+ 0x8f7e41a8,
+ 0x00f80000,
+/* 0x0718: ctx_mem */
+ 0x02840080,
0xbd000ff6,
- 0xc1008004,
- 0x0002f602,
- 0x008004bd,
- 0x02f60283,
- 0x0f04bd00,
- 0x06d27e07,
- 0xc0008000,
- 0x0002f602,
- 0x0bfe04bd,
- 0x1f2af000,
- 0xb60424b6,
- 0x94bd0220,
- 0x800899f0,
- 0xf6023700,
- 0x04bd0009,
- 0x02810080,
- 0xbd0002f6,
- 0x0000d204,
- 0x25f08000,
- 0x88008002,
- 0x0002f602,
- 0x100104bd,
- 0xf0020042,
- 0x12fa0223,
- 0xbd03f805,
- 0x0899f094,
- 0x02170080,
- 0xbd0009f6,
- 0x81019804,
- 0x981814b6,
- 0x25b68002,
- 0x0512fd08,
- 0xbd1601b5,
- 0x0999f094,
- 0x02370080,
- 0xbd0009f6,
- 0x81008004,
- 0x0001f602,
- 0x010204bd,
- 0x02880080,
+/* 0x0721: ctx_mem_wait */
+ 0x84008f04,
+ 0x00ffcf02,
+ 0xf405fffd,
+ 0x00f8f61b,
+/* 0x0730: ctx_load */
+ 0x99f094bd,
+ 0x37008005,
+ 0x0009f602,
+ 0x0c0a04bd,
+ 0x0000b87e,
+ 0x0080f4bd,
+ 0x0ff60289,
+ 0x8004bd00,
+ 0xf602c100,
+ 0x04bd0002,
+ 0x02830080,
0xbd0002f6,
- 0x01004104,
- 0xfa0613f0,
- 0x03f80501,
+ 0x7e070f04,
+ 0x80000718,
+ 0xf602c000,
+ 0x04bd0002,
+ 0xf0000bfe,
+ 0x24b61f2a,
+ 0x0220b604,
0x99f094bd,
- 0x17008009,
+ 0x37008008,
0x0009f602,
- 0x94bd04bd,
- 0x800599f0,
+ 0x008004bd,
+ 0x02f60281,
+ 0xd204bd00,
+ 0x80000000,
+ 0x800225f0,
+ 0xf6028800,
+ 0x04bd0002,
+ 0x00421001,
+ 0x0223f002,
+ 0xf80512fa,
+ 0xf094bd03,
+ 0x00800899,
+ 0x09f60217,
+ 0x9804bd00,
+ 0x14b68101,
+ 0x80029818,
+ 0xfd0825b6,
+ 0x01b50512,
+ 0xf094bd16,
+ 0x00800999,
+ 0x09f60237,
+ 0x8004bd00,
+ 0xf6028100,
+ 0x04bd0001,
+ 0x00800102,
+ 0x02f60288,
+ 0x4104bd00,
+ 0x13f00100,
+ 0x0501fa06,
+ 0x94bd03f8,
+ 0x800999f0,
0xf6021700,
0x04bd0009,
-/* 0x07d6: ctx_chan */
- 0xea7e00f8,
- 0x0c0a0006,
- 0x0000b87e,
- 0xd27e050f,
- 0x00f80006,
-/* 0x07e8: ctx_mmio_exec */
- 0x80410398,
+ 0x99f094bd,
+ 0x17008005,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x081c: ctx_chan */
+ 0x0007307e,
+ 0xb87e0c0a,
+ 0x050f0000,
+ 0x0007187e,
+/* 0x082e: ctx_mmio_exec */
+ 0x039800f8,
+ 0x81008041,
+ 0x0003f602,
+ 0x34bd04bd,
+/* 0x083c: ctx_mmio_loop */
+ 0xf4ff34c4,
+ 0x00450e1b,
+ 0x0653f002,
+ 0xf80535fa,
+/* 0x084d: ctx_mmio_pull */
+ 0x804e9803,
+ 0x7e814f98,
+ 0xb600008f,
+ 0x12b60830,
+ 0xdf1bf401,
+/* 0x0860: ctx_mmio_done */
+ 0x80160398,
0xf6028100,
0x04bd0003,
-/* 0x07f6: ctx_mmio_loop */
- 0x34c434bd,
- 0x0e1bf4ff,
- 0xf0020045,
- 0x35fa0653,
-/* 0x0807: ctx_mmio_pull */
- 0x9803f805,
- 0x4f98804e,
- 0x008f7e81,
- 0x0830b600,
- 0xf40112b6,
-/* 0x081a: ctx_mmio_done */
- 0x0398df1b,
- 0x81008016,
- 0x0003f602,
- 0x00b504bd,
- 0x01004140,
- 0xfa0613f0,
- 0x03f80601,
-/* 0x0836: ctx_xfer */
- 0x040e00f8,
- 0x03020080,
- 0xbd000ef6,
-/* 0x0841: ctx_xfer_idle */
- 0x00008e04,
- 0x00eecf03,
- 0x2000e4f1,
- 0xf4f51bf4,
- 0x02f40611,
-/* 0x0855: ctx_xfer_pre */
- 0x7e100f0c,
- 0xf40006b3,
-/* 0x085e: ctx_xfer_pre_load */
- 0x020f1b11,
- 0x0006627e,
- 0x0006717e,
- 0x0006837e,
- 0x627ef4bd,
- 0xea7e0006,
-/* 0x0876: ctx_xfer_exec */
- 0x01980006,
- 0x8024bd16,
- 0xf6010500,
- 0x04bd0002,
- 0x008e1fb2,
- 0x8f7e41a5,
- 0xfcf00000,
- 0x022cf001,
- 0xfd0124b6,
- 0xffb205f2,
- 0x41a5048e,
+ 0x414000b5,
+ 0x13f00100,
+ 0x0601fa06,
+ 0x00f803f8,
+/* 0x087c: ctx_xfer */
+ 0x0080040e,
+ 0x0ef60302,
+/* 0x0887: ctx_xfer_idle */
+ 0x8e04bd00,
+ 0xcf030000,
+ 0xe4f100ee,
+ 0x1bf42000,
+ 0x0611f4f5,
+/* 0x089b: ctx_xfer_pre */
+ 0x0f0c02f4,
+ 0x06f97e10,
+ 0x1b11f400,
+/* 0x08a4: ctx_xfer_pre_load */
+ 0xa87e020f,
+ 0xb77e0006,
+ 0xc97e0006,
+ 0xf4bd0006,
+ 0x0006a87e,
+ 0x0007307e,
+/* 0x08bc: ctx_xfer_exec */
+ 0xbd160198,
+ 0x05008024,
+ 0x0002f601,
+ 0x1fb204bd,
+ 0x41a5008e,
0x00008f7e,
- 0x0002167e,
- 0xfc8024bd,
- 0x02f60247,
- 0xf004bd00,
- 0x20b6012c,
- 0x4afc8003,
- 0x0002f602,
- 0xacf004bd,
- 0x06a5f001,
- 0x0c98000b,
- 0x010d9800,
- 0x3d7e000e,
- 0x080a0001,
- 0x0000ec7e,
- 0x00020a7e,
- 0x0a1201f4,
- 0x00b87e0c,
- 0x7e050f00,
- 0xf40006d2,
-/* 0x08f2: ctx_xfer_post */
- 0x020f2d02,
- 0x0006627e,
- 0xb37ef4bd,
- 0x277e0006,
- 0x717e0002,
+ 0xf001fcf0,
+ 0x24b6022c,
+ 0x05f2fd01,
+ 0x048effb2,
+ 0x8f7e41a5,
+ 0x167e0000,
+ 0x24bd0002,
+ 0x0247fc80,
+ 0xbd0002f6,
+ 0x012cf004,
+ 0x800320b6,
+ 0xf6024afc,
+ 0x04bd0002,
+ 0xf001acf0,
+ 0x000b06a5,
+ 0x98000c98,
+ 0x000e010d,
+ 0x00013d7e,
+ 0xec7e080a,
+ 0x0a7e0000,
+ 0x01f40002,
+ 0x7e0c0a12,
+ 0x0f0000b8,
+ 0x07187e05,
+ 0x2d02f400,
+/* 0x0938: ctx_xfer_post */
+ 0xa87e020f,
0xf4bd0006,
- 0x0006627e,
- 0x981011f4,
- 0x11fd4001,
- 0x070bf405,
- 0x0007e87e,
-/* 0x091c: ctx_xfer_no_post_mmio */
-/* 0x091c: ctx_xfer_done */
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x0006f97e,
+ 0x0002277e,
+ 0x0006b77e,
+ 0xa87ef4bd,
+ 0x11f40006,
+ 0x40019810,
+ 0xf40511fd,
+ 0x2e7e070b,
+/* 0x0962: ctx_xfer_no_post_mmio */
+/* 0x0962: ctx_xfer_done */
+ 0x00f80008,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index f8f7b278a13f..92dfe6a4ac87 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -528,10 +528,10 @@ uint32_t nvc0_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0xb521f502,
- 0xc721f507,
- 0x10f7f007,
- 0x081421f5,
+ 0x0d21f502,
+ 0x1f21f508,
+ 0x10f7f008,
+ 0x086c21f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvc0_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x081421f5,
+ 0x086c21f5,
0xf500f7f0,
- 0xf107b521,
+ 0xf1080d21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvc0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0xe821f502,
- 0xf094bd09,
+ 0x4021f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvc0_grhub_code[] = {
0x0203f00f,
0xbd0009d0,
0x0131f404,
- 0x09e821f5,
+ 0x0a4021f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvc0_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc09e821,
+ 0xfc0a4021,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvc0_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x09e821f5,
+ 0x0a4021f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvc0_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x7821f502,
+ 0xd021f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvc0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0xe821f502,
- 0xf094bd09,
+ 0x4021f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvc0_grhub_code[] = {
/* 0x072b: ih_no_ctxsw */
0xe40421f4,
0xf40400ab,
- 0xb7f1140b,
+ 0xe7f16c0b,
+ 0xe3f00708,
+ 0x6821f440,
+ 0xf102ffb9,
+ 0xf0040007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf00704e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0x030007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0x9450fec7,
+ 0xf7f102ee,
+ 0xf3f00700,
+ 0x00efbb40,
+ 0xf16821f4,
+ 0xf0020007,
+ 0x0fd00203,
+ 0xf004bd00,
+ 0x21f503f7,
+ 0xb7f1037e,
0xbfb90100,
0x44e7f102,
0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
0xf19d21f4,
- 0xbd0104b7,
+ 0xbd0504b7,
0xb4abffb0,
0xf10f0bf4,
0xf0070007,
0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
0xf104bd00,
0xf0010007,
0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvc0_grhub_code[] = {
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x077f: ctx_4160s */
+/* 0x07d7: ctx_4160s */
0xf001f800,
0xffb901f7,
0x60e7f102,
0x40e3f041,
-/* 0x078f: ctx_4160s_wait */
+/* 0x07e7: ctx_4160s_wait */
0xf19d21f4,
0xf04160e7,
0x21f440e3,
0x02ffb968,
0xf404ffc8,
0x00f8f00b,
-/* 0x07a4: ctx_4160c */
+/* 0x07fc: ctx_4160c */
0xffb9f4bd,
0x60e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x07b5: ctx_4170s */
+/* 0x080d: ctx_4170s */
0x10f5f000,
0xf102ffb9,
0xf04170e7,
0x21f440e3,
-/* 0x07c7: ctx_4170w */
+/* 0x081f: ctx_4170w */
0xf100f89d,
0xf04170e7,
0x21f440e3,
0x02ffb968,
0xf410f4f0,
0x00f8f01b,
-/* 0x07dc: ctx_redswitch */
+/* 0x0834: ctx_redswitch */
0x0200e7f1,
0xf040e5f0,
0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvc0_grhub_code[] = {
0x0103f085,
0xbd000ed0,
0x08f7f004,
-/* 0x07f8: ctx_redswitch_delay */
+/* 0x0850: ctx_redswitch_delay */
0xf401f2b6,
0xe5f1fd1b,
0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvc0_grhub_code[] = {
0x03f08500,
0x000ed001,
0x00f804bd,
-/* 0x0814: ctx_86c */
+/* 0x086c: ctx_86c */
0x1b0007f1,
0xd00203f0,
0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvc0_grhub_code[] = {
0xa86ce7f1,
0xf441e3f0,
0x00f89d21,
-/* 0x083c: ctx_mem */
+/* 0x0894: ctx_mem */
0x840007f1,
0xd00203f0,
0x04bd000f,
-/* 0x0848: ctx_mem_wait */
+/* 0x08a0: ctx_mem_wait */
0x8400f7f1,
0xcf02f3f0,
0xfffd00ff,
0xf31bf405,
-/* 0x085a: ctx_load */
+/* 0x08b2: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvc0_grhub_code[] = {
0x02d00203,
0xf004bd00,
0x21f507f7,
- 0x07f1083c,
+ 0x07f10894,
0x03f0c000,
0x0002d002,
0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvc0_grhub_code[] = {
0x03f01700,
0x0009d002,
0x00f804bd,
-/* 0x0978: ctx_chan */
- 0x077f21f5,
- 0x085a21f5,
+/* 0x09d0: ctx_chan */
+ 0x07d721f5,
+ 0x08b221f5,
0xf40ca7f0,
0xf7f0d021,
- 0x3c21f505,
- 0xa421f508,
-/* 0x0993: ctx_mmio_exec */
+ 0x9421f505,
+ 0xfc21f508,
+/* 0x09eb: ctx_mmio_exec */
0x9800f807,
0x07f14103,
0x03f08100,
0x0003d002,
0x34bd04bd,
-/* 0x09a4: ctx_mmio_loop */
+/* 0x09fc: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x09b6: ctx_mmio_pull */
+/* 0x0a0e: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x09c8: ctx_mmio_done */
+/* 0x0a20: ctx_mmio_done */
0xf1160398,
0xf0810007,
0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvc0_grhub_code[] = {
0x13f00100,
0x0601fa06,
0x00f803f8,
-/* 0x09e8: ctx_xfer */
+/* 0x0a40: ctx_xfer */
0xf104e7f0,
0xf0020007,
0x0ed00303,
-/* 0x09f7: ctx_xfer_idle */
+/* 0x0a4f: ctx_xfer_idle */
0xf104bd00,
0xf00000e7,
0xeecf03e3,
0x00e4f100,
0xf21bf420,
0xf40611f4,
-/* 0x0a0e: ctx_xfer_pre */
+/* 0x0a66: ctx_xfer_pre */
0xf7f01102,
- 0x1421f510,
- 0x7f21f508,
+ 0x6c21f510,
+ 0xd721f508,
0x1c11f407,
-/* 0x0a1c: ctx_xfer_pre_load */
+/* 0x0a74: ctx_xfer_pre_load */
0xf502f7f0,
- 0xf507b521,
- 0xf507c721,
- 0xbd07dc21,
- 0xb521f5f4,
- 0x5a21f507,
-/* 0x0a35: ctx_xfer_exec */
+ 0xf5080d21,
+ 0xf5081f21,
+ 0xbd083421,
+ 0x0d21f5f4,
+ 0xb221f508,
+/* 0x0a8d: ctx_xfer_exec */
0x16019808,
0x07f124bd,
0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvc0_grhub_code[] = {
0x1301f402,
0xf40ca7f0,
0xf7f0d021,
- 0x3c21f505,
+ 0x9421f505,
0x3202f408,
-/* 0x0ac4: ctx_xfer_post */
+/* 0x0b1c: ctx_xfer_post */
0xf502f7f0,
- 0xbd07b521,
- 0x1421f5f4,
+ 0xbd080d21,
+ 0x6c21f5f4,
0x7f21f508,
- 0xc721f502,
- 0xf5f4bd07,
- 0xf407b521,
+ 0x1f21f502,
+ 0xf5f4bd08,
+ 0xf4080d21,
0x01981011,
0x0511fd40,
0xf5070bf4,
-/* 0x0aef: ctx_xfer_no_post_mmio */
- 0xf5099321,
-/* 0x0af3: ctx_xfer_done */
- 0xf807a421,
+/* 0x0b47: ctx_xfer_no_post_mmio */
+ 0xf509eb21,
+/* 0x0b4b: ctx_xfer_done */
+ 0xf807fc21,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
index 624215a005b0..62b0c7601d8b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
@@ -528,10 +528,10 @@ uint32_t nvd7_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0xb521f502,
- 0xc721f507,
- 0x10f7f007,
- 0x081421f5,
+ 0x0d21f502,
+ 0x1f21f508,
+ 0x10f7f008,
+ 0x086c21f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvd7_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x081421f5,
+ 0x086c21f5,
0xf500f7f0,
- 0xf107b521,
+ 0xf1080d21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvd7_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0xe821f502,
- 0xf094bd09,
+ 0x4021f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvd7_grhub_code[] = {
0x0203f00f,
0xbd0009d0,
0x0131f404,
- 0x09e821f5,
+ 0x0a4021f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvd7_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc09e821,
+ 0xfc0a4021,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvd7_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x09e821f5,
+ 0x0a4021f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvd7_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x7821f502,
+ 0xd021f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvd7_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0xe821f502,
- 0xf094bd09,
+ 0x4021f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvd7_grhub_code[] = {
/* 0x072b: ih_no_ctxsw */
0xe40421f4,
0xf40400ab,
- 0xb7f1140b,
+ 0xe7f16c0b,
+ 0xe3f00708,
+ 0x6821f440,
+ 0xf102ffb9,
+ 0xf0040007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf00704e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0x030007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0x9450fec7,
+ 0xf7f102ee,
+ 0xf3f00700,
+ 0x00efbb40,
+ 0xf16821f4,
+ 0xf0020007,
+ 0x0fd00203,
+ 0xf004bd00,
+ 0x21f503f7,
+ 0xb7f1037e,
0xbfb90100,
0x44e7f102,
0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
0xf19d21f4,
- 0xbd0104b7,
+ 0xbd0504b7,
0xb4abffb0,
0xf10f0bf4,
0xf0070007,
0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
0xf104bd00,
0xf0010007,
0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvd7_grhub_code[] = {
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x077f: ctx_4160s */
+/* 0x07d7: ctx_4160s */
0xf001f800,
0xffb901f7,
0x60e7f102,
0x40e3f041,
-/* 0x078f: ctx_4160s_wait */
+/* 0x07e7: ctx_4160s_wait */
0xf19d21f4,
0xf04160e7,
0x21f440e3,
0x02ffb968,
0xf404ffc8,
0x00f8f00b,
-/* 0x07a4: ctx_4160c */
+/* 0x07fc: ctx_4160c */
0xffb9f4bd,
0x60e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x07b5: ctx_4170s */
+/* 0x080d: ctx_4170s */
0x10f5f000,
0xf102ffb9,
0xf04170e7,
0x21f440e3,
-/* 0x07c7: ctx_4170w */
+/* 0x081f: ctx_4170w */
0xf100f89d,
0xf04170e7,
0x21f440e3,
0x02ffb968,
0xf410f4f0,
0x00f8f01b,
-/* 0x07dc: ctx_redswitch */
+/* 0x0834: ctx_redswitch */
0x0200e7f1,
0xf040e5f0,
0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvd7_grhub_code[] = {
0x0103f085,
0xbd000ed0,
0x08f7f004,
-/* 0x07f8: ctx_redswitch_delay */
+/* 0x0850: ctx_redswitch_delay */
0xf401f2b6,
0xe5f1fd1b,
0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvd7_grhub_code[] = {
0x03f08500,
0x000ed001,
0x00f804bd,
-/* 0x0814: ctx_86c */
+/* 0x086c: ctx_86c */
0x1b0007f1,
0xd00203f0,
0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvd7_grhub_code[] = {
0xa86ce7f1,
0xf441e3f0,
0x00f89d21,
-/* 0x083c: ctx_mem */
+/* 0x0894: ctx_mem */
0x840007f1,
0xd00203f0,
0x04bd000f,
-/* 0x0848: ctx_mem_wait */
+/* 0x08a0: ctx_mem_wait */
0x8400f7f1,
0xcf02f3f0,
0xfffd00ff,
0xf31bf405,
-/* 0x085a: ctx_load */
+/* 0x08b2: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvd7_grhub_code[] = {
0x02d00203,
0xf004bd00,
0x21f507f7,
- 0x07f1083c,
+ 0x07f10894,
0x03f0c000,
0x0002d002,
0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvd7_grhub_code[] = {
0x03f01700,
0x0009d002,
0x00f804bd,
-/* 0x0978: ctx_chan */
- 0x077f21f5,
- 0x085a21f5,
+/* 0x09d0: ctx_chan */
+ 0x07d721f5,
+ 0x08b221f5,
0xf40ca7f0,
0xf7f0d021,
- 0x3c21f505,
- 0xa421f508,
-/* 0x0993: ctx_mmio_exec */
+ 0x9421f505,
+ 0xfc21f508,
+/* 0x09eb: ctx_mmio_exec */
0x9800f807,
0x07f14103,
0x03f08100,
0x0003d002,
0x34bd04bd,
-/* 0x09a4: ctx_mmio_loop */
+/* 0x09fc: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x09b6: ctx_mmio_pull */
+/* 0x0a0e: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x09c8: ctx_mmio_done */
+/* 0x0a20: ctx_mmio_done */
0xf1160398,
0xf0810007,
0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvd7_grhub_code[] = {
0x13f00100,
0x0601fa06,
0x00f803f8,
-/* 0x09e8: ctx_xfer */
+/* 0x0a40: ctx_xfer */
0xf104e7f0,
0xf0020007,
0x0ed00303,
-/* 0x09f7: ctx_xfer_idle */
+/* 0x0a4f: ctx_xfer_idle */
0xf104bd00,
0xf00000e7,
0xeecf03e3,
0x00e4f100,
0xf21bf420,
0xf40611f4,
-/* 0x0a0e: ctx_xfer_pre */
+/* 0x0a66: ctx_xfer_pre */
0xf7f01102,
- 0x1421f510,
- 0x7f21f508,
+ 0x6c21f510,
+ 0xd721f508,
0x1c11f407,
-/* 0x0a1c: ctx_xfer_pre_load */
+/* 0x0a74: ctx_xfer_pre_load */
0xf502f7f0,
- 0xf507b521,
- 0xf507c721,
- 0xbd07dc21,
- 0xb521f5f4,
- 0x5a21f507,
-/* 0x0a35: ctx_xfer_exec */
+ 0xf5080d21,
+ 0xf5081f21,
+ 0xbd083421,
+ 0x0d21f5f4,
+ 0xb221f508,
+/* 0x0a8d: ctx_xfer_exec */
0x16019808,
0x07f124bd,
0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvd7_grhub_code[] = {
0x1301f402,
0xf40ca7f0,
0xf7f0d021,
- 0x3c21f505,
+ 0x9421f505,
0x3202f408,
-/* 0x0ac4: ctx_xfer_post */
+/* 0x0b1c: ctx_xfer_post */
0xf502f7f0,
- 0xbd07b521,
- 0x1421f5f4,
+ 0xbd080d21,
+ 0x6c21f5f4,
0x7f21f508,
- 0xc721f502,
- 0xf5f4bd07,
- 0xf407b521,
+ 0x1f21f502,
+ 0xf5f4bd08,
+ 0xf4080d21,
0x01981011,
0x0511fd40,
0xf5070bf4,
-/* 0x0aef: ctx_xfer_no_post_mmio */
- 0xf5099321,
-/* 0x0af3: ctx_xfer_done */
- 0xf807a421,
+/* 0x0b47: ctx_xfer_no_post_mmio */
+ 0xf509eb21,
+/* 0x0b4b: ctx_xfer_done */
+ 0xf807fc21,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index 6547b3dfc7ed..51c3797d8537 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -528,10 +528,10 @@ uint32_t nve0_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0x7f21f502,
- 0x9121f507,
+ 0xd721f502,
+ 0xe921f507,
0x10f7f007,
- 0x07de21f5,
+ 0x083621f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t nve0_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x07de21f5,
+ 0x083621f5,
0xf500f7f0,
- 0xf1077f21,
+ 0xf107d721,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nve0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0xaa21f502,
- 0xf094bd09,
+ 0x0221f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nve0_grhub_code[] = {
0x0203f00f,
0xbd0009d0,
0x0131f404,
- 0x09aa21f5,
+ 0x0a0221f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nve0_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc09aa21,
+ 0xfc0a0221,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nve0_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x09aa21f5,
+ 0x0a0221f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nve0_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x4221f502,
+ 0x9a21f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nve0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0xaa21f502,
- 0xf094bd09,
+ 0x0221f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nve0_grhub_code[] = {
/* 0x072b: ih_no_ctxsw */
0xe40421f4,
0xf40400ab,
- 0xb7f1140b,
+ 0xe7f16c0b,
+ 0xe3f00708,
+ 0x6821f440,
+ 0xf102ffb9,
+ 0xf0040007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf00704e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0x030007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0x9450fec7,
+ 0xf7f102ee,
+ 0xf3f00700,
+ 0x00efbb40,
+ 0xf16821f4,
+ 0xf0020007,
+ 0x0fd00203,
+ 0xf004bd00,
+ 0x21f503f7,
+ 0xb7f1037e,
0xbfb90100,
0x44e7f102,
0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
0xf19d21f4,
- 0xbd0104b7,
+ 0xbd0504b7,
0xb4abffb0,
0xf10f0bf4,
0xf0070007,
0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
0xf104bd00,
0xf0010007,
0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nve0_grhub_code[] = {
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x077f: ctx_4170s */
+/* 0x07d7: ctx_4170s */
0xf001f800,
0xffb910f5,
0x70e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x0791: ctx_4170w */
+/* 0x07e9: ctx_4170w */
0x70e7f100,
0x40e3f041,
0xb96821f4,
0xf4f002ff,
0xf01bf410,
-/* 0x07a6: ctx_redswitch */
+/* 0x07fe: ctx_redswitch */
0xe7f100f8,
0xe5f00200,
0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nve0_grhub_code[] = {
0xf0850007,
0x0ed00103,
0xf004bd00,
-/* 0x07c2: ctx_redswitch_delay */
+/* 0x081a: ctx_redswitch_delay */
0xf2b608f7,
0xfd1bf401,
0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nve0_grhub_code[] = {
0x850007f1,
0xd00103f0,
0x04bd000e,
-/* 0x07de: ctx_86c */
+/* 0x0836: ctx_86c */
0x07f100f8,
0x03f01b00,
0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nve0_grhub_code[] = {
0xe7f102ff,
0xe3f0a86c,
0x9d21f441,
-/* 0x0806: ctx_mem */
+/* 0x085e: ctx_mem */
0x07f100f8,
0x03f08400,
0x000fd002,
-/* 0x0812: ctx_mem_wait */
+/* 0x086a: ctx_mem_wait */
0xf7f104bd,
0xf3f08400,
0x00ffcf02,
0xf405fffd,
0x00f8f31b,
-/* 0x0824: ctx_load */
+/* 0x087c: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f00f,
@@ -797,7 +819,7 @@ uint32_t nve0_grhub_code[] = {
0x0203f083,
0xbd0002d0,
0x07f7f004,
- 0x080621f5,
+ 0x085e21f5,
0xc00007f1,
0xd00203f0,
0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nve0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x0942: ctx_chan */
+/* 0x099a: ctx_chan */
0x21f500f8,
- 0xa7f00824,
+ 0xa7f0087c,
0xd021f40c,
0xf505f7f0,
- 0xf8080621,
-/* 0x0955: ctx_mmio_exec */
+ 0xf8085e21,
+/* 0x09ad: ctx_mmio_exec */
0x41039800,
0x810007f1,
0xd00203f0,
0x04bd0003,
-/* 0x0966: ctx_mmio_loop */
+/* 0x09be: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x0978: ctx_mmio_pull */
+/* 0x09d0: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
0x0830b69d,
0xf40112b6,
-/* 0x098a: ctx_mmio_done */
+/* 0x09e2: ctx_mmio_done */
0x0398df1b,
0x0007f116,
0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nve0_grhub_code[] = {
0x010017f1,
0xfa0613f0,
0x03f80601,
-/* 0x09aa: ctx_xfer */
+/* 0x0a02: ctx_xfer */
0xe7f000f8,
0x0007f104,
0x0303f002,
0xbd000ed0,
-/* 0x09b9: ctx_xfer_idle */
+/* 0x0a11: ctx_xfer_idle */
0x00e7f104,
0x03e3f000,
0xf100eecf,
0xf42000e4,
0x11f4f21b,
0x0d02f406,
-/* 0x09d0: ctx_xfer_pre */
+/* 0x0a28: ctx_xfer_pre */
0xf510f7f0,
- 0xf407de21,
-/* 0x09da: ctx_xfer_pre_load */
+ 0xf4083621,
+/* 0x0a32: ctx_xfer_pre_load */
0xf7f01c11,
- 0x7f21f502,
- 0x9121f507,
- 0xa621f507,
+ 0xd721f502,
+ 0xe921f507,
+ 0xfe21f507,
0xf5f4bd07,
- 0xf5077f21,
-/* 0x09f3: ctx_xfer_exec */
- 0x98082421,
+ 0xf507d721,
+/* 0x0a4b: ctx_xfer_exec */
+ 0x98087c21,
0x24bd1601,
0x050007f1,
0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nve0_grhub_code[] = {
0xa7f01301,
0xd021f40c,
0xf505f7f0,
- 0xf4080621,
-/* 0x0a82: ctx_xfer_post */
+ 0xf4085e21,
+/* 0x0ada: ctx_xfer_post */
0xf7f02e02,
- 0x7f21f502,
+ 0xd721f502,
0xf5f4bd07,
- 0xf507de21,
+ 0xf5083621,
0xf5027f21,
- 0xbd079121,
- 0x7f21f5f4,
+ 0xbd07e921,
+ 0xd721f5f4,
0x1011f407,
0xfd400198,
0x0bf40511,
- 0x5521f507,
-/* 0x0aad: ctx_xfer_no_post_mmio */
-/* 0x0aad: ctx_xfer_done */
+ 0xad21f507,
+/* 0x0b05: ctx_xfer_no_post_mmio */
+/* 0x0b05: ctx_xfer_done */
0x0000f809,
0x00000000,
0x00000000,
@@ -977,4 +999,46 @@ uint32_t nve0_grhub_code[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
index a5aee5a4302f..a0af4b703a8e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
@@ -528,10 +528,10 @@ uint32_t nvf0_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0x7f21f502,
- 0x9121f507,
+ 0xd721f502,
+ 0xe921f507,
0x10f7f007,
- 0x07de21f5,
+ 0x083621f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvf0_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x07de21f5,
+ 0x083621f5,
0xf500f7f0,
- 0xf1077f21,
+ 0xf107d721,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvf0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0xaa21f502,
- 0xf094bd09,
+ 0x0221f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvf0_grhub_code[] = {
0x0203f037,
0xbd0009d0,
0x0131f404,
- 0x09aa21f5,
+ 0x0a0221f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvf0_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc09aa21,
+ 0xfc0a0221,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvf0_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x09aa21f5,
+ 0x0a0221f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvf0_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x4221f502,
+ 0x9a21f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvf0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0xaa21f502,
- 0xf094bd09,
+ 0x0221f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvf0_grhub_code[] = {
/* 0x072b: ih_no_ctxsw */
0xe40421f4,
0xf40400ab,
- 0xb7f1140b,
+ 0xe7f16c0b,
+ 0xe3f00708,
+ 0x6821f440,
+ 0xf102ffb9,
+ 0xf0040007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf00704e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0x030007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0x9450fec7,
+ 0xf7f102ee,
+ 0xf3f00700,
+ 0x00efbb40,
+ 0xf16821f4,
+ 0xf0020007,
+ 0x0fd00203,
+ 0xf004bd00,
+ 0x21f503f7,
+ 0xb7f1037e,
0xbfb90100,
0x44e7f102,
0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
0xf19d21f4,
- 0xbd0104b7,
+ 0xbd0504b7,
0xb4abffb0,
0xf10f0bf4,
0xf0070007,
0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
0xf104bd00,
0xf0010007,
0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nvf0_grhub_code[] = {
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x077f: ctx_4170s */
+/* 0x07d7: ctx_4170s */
0xf001f800,
0xffb910f5,
0x70e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x0791: ctx_4170w */
+/* 0x07e9: ctx_4170w */
0x70e7f100,
0x40e3f041,
0xb96821f4,
0xf4f002ff,
0xf01bf410,
-/* 0x07a6: ctx_redswitch */
+/* 0x07fe: ctx_redswitch */
0xe7f100f8,
0xe5f00200,
0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nvf0_grhub_code[] = {
0xf0850007,
0x0ed00103,
0xf004bd00,
-/* 0x07c2: ctx_redswitch_delay */
+/* 0x081a: ctx_redswitch_delay */
0xf2b608f7,
0xfd1bf401,
0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nvf0_grhub_code[] = {
0x850007f1,
0xd00103f0,
0x04bd000e,
-/* 0x07de: ctx_86c */
+/* 0x0836: ctx_86c */
0x07f100f8,
0x03f02300,
0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nvf0_grhub_code[] = {
0xe7f102ff,
0xe3f0a88c,
0x9d21f441,
-/* 0x0806: ctx_mem */
+/* 0x085e: ctx_mem */
0x07f100f8,
0x03f08400,
0x000fd002,
-/* 0x0812: ctx_mem_wait */
+/* 0x086a: ctx_mem_wait */
0xf7f104bd,
0xf3f08400,
0x00ffcf02,
0xf405fffd,
0x00f8f31b,
-/* 0x0824: ctx_load */
+/* 0x087c: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f037,
@@ -797,7 +819,7 @@ uint32_t nvf0_grhub_code[] = {
0x0203f083,
0xbd0002d0,
0x07f7f004,
- 0x080621f5,
+ 0x085e21f5,
0xc00007f1,
0xd00203f0,
0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nvf0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x0942: ctx_chan */
+/* 0x099a: ctx_chan */
0x21f500f8,
- 0xa7f00824,
+ 0xa7f0087c,
0xd021f40c,
0xf505f7f0,
- 0xf8080621,
-/* 0x0955: ctx_mmio_exec */
+ 0xf8085e21,
+/* 0x09ad: ctx_mmio_exec */
0x41039800,
0x810007f1,
0xd00203f0,
0x04bd0003,
-/* 0x0966: ctx_mmio_loop */
+/* 0x09be: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x0978: ctx_mmio_pull */
+/* 0x09d0: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
0x0830b69d,
0xf40112b6,
-/* 0x098a: ctx_mmio_done */
+/* 0x09e2: ctx_mmio_done */
0x0398df1b,
0x0007f116,
0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nvf0_grhub_code[] = {
0x010017f1,
0xfa0613f0,
0x03f80601,
-/* 0x09aa: ctx_xfer */
+/* 0x0a02: ctx_xfer */
0xe7f000f8,
0x0007f104,
0x0303f002,
0xbd000ed0,
-/* 0x09b9: ctx_xfer_idle */
+/* 0x0a11: ctx_xfer_idle */
0x00e7f104,
0x03e3f000,
0xf100eecf,
0xf42000e4,
0x11f4f21b,
0x0d02f406,
-/* 0x09d0: ctx_xfer_pre */
+/* 0x0a28: ctx_xfer_pre */
0xf510f7f0,
- 0xf407de21,
-/* 0x09da: ctx_xfer_pre_load */
+ 0xf4083621,
+/* 0x0a32: ctx_xfer_pre_load */
0xf7f01c11,
- 0x7f21f502,
- 0x9121f507,
- 0xa621f507,
+ 0xd721f502,
+ 0xe921f507,
+ 0xfe21f507,
0xf5f4bd07,
- 0xf5077f21,
-/* 0x09f3: ctx_xfer_exec */
- 0x98082421,
+ 0xf507d721,
+/* 0x0a4b: ctx_xfer_exec */
+ 0x98087c21,
0x24bd1601,
0x050007f1,
0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nvf0_grhub_code[] = {
0xa7f01301,
0xd021f40c,
0xf505f7f0,
- 0xf4080621,
-/* 0x0a82: ctx_xfer_post */
+ 0xf4085e21,
+/* 0x0ada: ctx_xfer_post */
0xf7f02e02,
- 0x7f21f502,
+ 0xd721f502,
0xf5f4bd07,
- 0xf507de21,
+ 0xf5083621,
0xf5027f21,
- 0xbd079121,
- 0x7f21f5f4,
+ 0xbd07e921,
+ 0xd721f5f4,
0x1011f407,
0xfd400198,
0x0bf40511,
- 0x5521f507,
-/* 0x0aad: ctx_xfer_no_post_mmio */
-/* 0x0aad: ctx_xfer_done */
+ 0xad21f507,
+/* 0x0b05: ctx_xfer_no_post_mmio */
+/* 0x0b05: ctx_xfer_done */
0x0000f809,
0x00000000,
0x00000000,
@@ -977,4 +999,46 @@ uint32_t nvf0_grhub_code[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
index a47d49db5232..2a0b0f844299 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
@@ -30,6 +30,12 @@
#define GK110 0xf0
#define GK208 0x108
+#define NV_PGRAPH_TRAPPED_ADDR 0x400704
+#define NV_PGRAPH_TRAPPED_DATA_LO 0x400708
+#define NV_PGRAPH_TRAPPED_DATA_HI 0x40070c
+
+#define NV_PGRAPH_FE_OBJECT_TABLE(n) ((n) * 4 + 0x400700)
+
#define NV_PGRAPH_FECS_INTR_ACK 0x409004
#define NV_PGRAPH_FECS_INTR 0x409008
#define NV_PGRAPH_FECS_INTR_FWMTHD 0x00000400
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
index fd1d380de094..1718ae4e8224 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
@@ -3,5 +3,6 @@
#define E_BAD_COMMAND 0x00000001
#define E_CMD_OVERFLOW 0x00000002
+#define E_BAD_FWMTHD 0x00000003
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
new file mode 100644
index 000000000000..83048a56430d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nvc0.h"
+#include "ctxnvc0.h"
+
+static struct nouveau_oclass
+gk20a_graph_sclass[] = {
+ { 0x902d, &nouveau_object_ofuncs },
+ { 0xa040, &nouveau_object_ofuncs },
+ { 0xa297, &nouveau_object_ofuncs },
+ { 0xa0c0, &nouveau_object_ofuncs },
+ {}
+};
+
+struct nouveau_oclass *
+gk20a_graph_oclass = &(struct nvc0_graph_oclass) {
+ .base.handle = NV_ENGINE(GR, 0xea),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_ctor,
+ .dtor = nvc0_graph_dtor,
+ .init = nve4_graph_init,
+ .fini = nve4_graph_fini,
+ },
+ .cclass = &gk20a_grctx_oclass,
+ .sclass = gk20a_graph_sclass,
+ .mmio = nve4_graph_pack_mmio,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 2c7809e1a09b..20665c21d80e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -901,7 +901,7 @@ nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nvaf_graph_sclass;
break;
- };
+ }
/* unfortunate hw bug workaround... */
if (nv_device(priv)->chipset != 0x50 &&
@@ -976,7 +976,6 @@ nv50_graph_init(struct nouveau_object *object)
break;
case 0xa0:
default:
- nv_wr32(priv, 0x402cc0, 0x00000000);
if (nv_device(priv)->chipset == 0xa0 ||
nv_device(priv)->chipset == 0xaa ||
nv_device(priv)->chipset == 0xac) {
@@ -991,10 +990,10 @@ nv50_graph_init(struct nouveau_object *object)
/* zero out zcull regions */
for (i = 0; i < 8; i++) {
- nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
- nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
- nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
- nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
+ nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
+ nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
+ nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
+ nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
}
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index f3c7329da0a0..aa0838916354 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -789,17 +789,40 @@ nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
static void
nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
{
- u32 ustat = nv_rd32(priv, 0x409c18);
+ u32 stat = nv_rd32(priv, 0x409c18);
- if (ustat & 0x00000001)
- nv_error(priv, "CTXCTL ucode error\n");
- if (ustat & 0x00080000)
- nv_error(priv, "CTXCTL watchdog timeout\n");
- if (ustat & ~0x00080001)
- nv_error(priv, "CTXCTL 0x%08x\n", ustat);
+ if (stat & 0x00000001) {
+ u32 code = nv_rd32(priv, 0x409814);
+ if (code == E_BAD_FWMTHD) {
+ u32 class = nv_rd32(priv, 0x409808);
+ u32 addr = nv_rd32(priv, 0x40980c);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00003ffc);
+ u32 data = nv_rd32(priv, 0x409810);
+
+ nv_error(priv, "FECS MTHD subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ subc, class, mthd, data);
+
+ nv_wr32(priv, 0x409c20, 0x00000001);
+ stat &= ~0x00000001;
+ } else {
+ nv_error(priv, "FECS ucode error %d\n", code);
+ }
+ }
- nvc0_graph_ctxctl_debug(priv);
- nv_wr32(priv, 0x409c20, ustat);
+ if (stat & 0x00080000) {
+ nv_error(priv, "FECS watchdog timeout\n");
+ nvc0_graph_ctxctl_debug(priv);
+ nv_wr32(priv, 0x409c20, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ nv_error(priv, "FECS 0x%08x\n", stat);
+ nvc0_graph_ctxctl_debug(priv);
+ nv_wr32(priv, 0x409c20, stat);
+ }
}
static void
@@ -894,6 +917,10 @@ nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
nv_wr32(priv, fuc_base + 0x0188, i >> 6);
nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
}
+
+ /* code must be padded to 0x40 words */
+ for (; i & 0x3f; i++)
+ nv_wr32(priv, fuc_base + 0x0184, 0);
}
static void
@@ -1259,10 +1286,14 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_graph_oclass *oclass = (void *)bclass;
struct nouveau_device *device = nv_device(parent);
struct nvc0_graph_priv *priv;
+ bool use_ext_fw, enable;
int ret, i;
- ret = nouveau_graph_create(parent, engine, bclass,
- (oclass->fecs.ucode != NULL), &priv);
+ use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW",
+ oclass->fecs.ucode == NULL);
+ enable = use_ext_fw || oclass->fecs.ucode != NULL;
+
+ ret = nouveau_graph_create(parent, engine, bclass, enable, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -1272,7 +1303,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.units = nvc0_graph_units;
- if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
+ if (use_ext_fw) {
nv_info(priv, "using external firmware\n");
if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 90d44616c876..ffc289198dd8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -38,6 +38,8 @@
#include <engine/fifo.h>
#include <engine/graph.h>
+#include "fuc/os.h"
+
#define GPC_MAX 32
#define TPC_MAX (GPC_MAX * 8)
@@ -116,6 +118,7 @@ int nvc0_graph_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_object **);
void nvc0_graph_dtor(struct nouveau_object *);
int nvc0_graph_init(struct nouveau_object *);
+int nve4_graph_fini(struct nouveau_object *, bool);
int nve4_graph_init(struct nouveau_object *);
extern struct nouveau_oclass nvc0_graph_sclass[];
@@ -217,6 +220,7 @@ extern const struct nvc0_graph_init nve4_graph_init_main_0[];
extern const struct nvc0_graph_init nve4_graph_init_tpccs_0[];
extern const struct nvc0_graph_init nve4_graph_init_pe_0[];
extern const struct nvc0_graph_init nve4_graph_init_be_0[];
+extern const struct nvc0_graph_pack nve4_graph_pack_mmio[];
extern const struct nvc0_graph_init nvf0_graph_init_fe_0[];
extern const struct nvc0_graph_init nvf0_graph_init_sked_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
index f7c011217175..51e0c075ad34 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
@@ -151,7 +151,7 @@ nve4_graph_init_be_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nve4_graph_pack_mmio[] = {
{ nve4_graph_init_main_0 },
{ nvc0_graph_init_fe_0 },
@@ -189,7 +189,7 @@ nve4_graph_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
+int
nve4_graph_fini(struct nouveau_object *object, bool suspend)
{
struct nvc0_graph_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index 5ce686ee729e..f3b4d9dbf23c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -124,7 +124,7 @@ nv50_software_sclass[] = {
******************************************************************************/
static int
-nv50_software_vblsem_release(void *data, int head)
+nv50_software_vblsem_release(void *data, u32 type, int head)
{
struct nv50_software_chan *chan = data;
struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
@@ -183,7 +183,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
return -ENOMEM;
for (i = 0; i < chan->vblank.nr_event; i++) {
- ret = nouveau_event_new(pdisp->vblank, i, pclass->vblank,
+ ret = nouveau_event_new(pdisp->vblank, 1, i, pclass->vblank,
chan, &chan->vblank.event[i]);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
index 2de370c21279..bb49a7a20857 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -19,7 +19,7 @@ int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
struct nv50_software_cclass {
struct nouveau_oclass base;
- int (*vblank)(void *, int);
+ int (*vblank)(void *, u32, int);
};
struct nv50_software_chan {
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index f9430c1bf3e5..135c20f38356 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -104,7 +104,7 @@ nvc0_software_sclass[] = {
******************************************************************************/
static int
-nvc0_software_vblsem_release(void *data, int head)
+nvc0_software_vblsem_release(void *data, u32 type, int head)
{
struct nv50_software_chan *chan = data;
struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 9c0cd73462d9..e0c812bc884f 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -295,6 +295,10 @@ struct nv04_display_scanoutpos {
#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
+#define NV94_DISP_SOR_DP_PWR 0x00016000
+#define NV94_DISP_SOR_DP_PWR_STATE 0x00000001
+#define NV94_DISP_SOR_DP_PWR_STATE_OFF 0x00000000
+#define NV94_DISP_SOR_DP_PWR_STATE_ON 0x00000001
#define NV50_DISP_DAC_MTHD 0x00020000
#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index 5d539ebff3ed..ba3f1a76a815 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -12,32 +12,33 @@ struct nouveau_eventh {
struct nouveau_event *event;
struct list_head head;
unsigned long flags;
+ u32 types;
int index;
- int (*func)(void *, int);
+ int (*func)(void *, u32, int);
void *priv;
};
struct nouveau_event {
- spinlock_t list_lock;
- spinlock_t refs_lock;
-
void *priv;
- void (*enable)(struct nouveau_event *, int index);
- void (*disable)(struct nouveau_event *, int index);
+ int (*check)(struct nouveau_event *, u32 type, int index);
+ void (*enable)(struct nouveau_event *, int type, int index);
+ void (*disable)(struct nouveau_event *, int type, int index);
+ int types_nr;
int index_nr;
- struct {
- struct list_head list;
- int refs;
- } index[];
+
+ spinlock_t list_lock;
+ struct list_head *list;
+ spinlock_t refs_lock;
+ int refs[];
};
-int nouveau_event_create(int index_nr, struct nouveau_event **);
+int nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **);
void nouveau_event_destroy(struct nouveau_event **);
-void nouveau_event_trigger(struct nouveau_event *, int index);
+void nouveau_event_trigger(struct nouveau_event *, u32 types, int index);
-int nouveau_event_new(struct nouveau_event *, int index,
- int (*func)(void *, int), void *,
+int nouveau_event_new(struct nouveau_event *, u32 types, int index,
+ int (*func)(void *, u32, int), void *,
struct nouveau_eventh **);
void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **);
void nouveau_event_get(struct nouveau_eventh *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index fd0c68804de3..fde842896806 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -6,8 +6,19 @@
#include <core/device.h>
#include <core/event.h>
+enum nvkm_hpd_event {
+ NVKM_HPD_PLUG = 1,
+ NVKM_HPD_UNPLUG = 2,
+ NVKM_HPD_IRQ = 4,
+ NVKM_HPD = (NVKM_HPD_PLUG | NVKM_HPD_UNPLUG | NVKM_HPD_IRQ)
+};
+
struct nouveau_disp {
struct nouveau_engine base;
+
+ struct list_head outp;
+ struct nouveau_event *hpd;
+
struct nouveau_event *vblank;
};
@@ -17,25 +28,6 @@ nouveau_disp(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
}
-#define nouveau_disp_create(p,e,c,h,i,x,d) \
- nouveau_disp_create_((p), (e), (c), (h), (i), (x), \
- sizeof(**d), (void **)d)
-#define nouveau_disp_destroy(d) ({ \
- struct nouveau_disp *disp = (d); \
- _nouveau_disp_dtor(nv_object(disp)); \
-})
-#define nouveau_disp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_disp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int heads,
- const char *, const char *, int, void **);
-void _nouveau_disp_dtor(struct nouveau_object *);
-#define _nouveau_disp_init _nouveau_engine_init
-#define _nouveau_disp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass *nv04_disp_oclass;
extern struct nouveau_oclass *nv50_disp_oclass;
extern struct nouveau_oclass *nv84_disp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 26b6b2bb1112..b639eb2c74ff 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -109,6 +109,7 @@ extern struct nouveau_oclass *nv50_fifo_oclass;
extern struct nouveau_oclass *nv84_fifo_oclass;
extern struct nouveau_oclass *nvc0_fifo_oclass;
extern struct nouveau_oclass *nve0_fifo_oclass;
+extern struct nouveau_oclass *gk20a_fifo_oclass;
extern struct nouveau_oclass *nv108_fifo_oclass;
void nv04_fifo_intr(struct nouveau_subdev *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 871edfdf3d5b..8c1d4772da0c 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -68,6 +68,7 @@ extern struct nouveau_oclass *nvc8_graph_oclass;
extern struct nouveau_oclass *nvd7_graph_oclass;
extern struct nouveau_oclass *nvd9_graph_oclass;
extern struct nouveau_oclass *nve4_graph_oclass;
+extern struct nouveau_oclass *gk20a_graph_oclass;
extern struct nouveau_oclass *nvf0_graph_oclass;
extern struct nouveau_oclass *nv108_graph_oclass;
extern struct nouveau_oclass *gm107_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
index a32feb3f3fb6..f3930c27cb7a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
@@ -22,7 +22,25 @@ enum dcb_connector_type {
DCB_CONNECTOR_NONE = 0xff
};
-u16 dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len);
+struct nvbios_connT {
+};
+
+u32 nvbios_connTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_connTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_connT *info);
+
+struct nvbios_connE {
+ u8 type;
+ u8 location;
+ u8 hpd;
+ u8 dp;
+ u8 di;
+ u8 sr;
+ u8 lcdid;
+};
+
+u32 nvbios_connEe(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *hdr);
+u32 nvbios_connEp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *hdr,
+ struct nvbios_connE *info);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
index 6e54218b55fc..728206e21777 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -17,9 +17,10 @@ u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
struct nvbios_dpout *);
struct nvbios_dpcfg {
- u8 drv;
- u8 pre;
- u8 unk;
+ u8 pc;
+ u8 dc;
+ u8 pe;
+ u8 tx_pu;
};
u16
@@ -27,7 +28,7 @@ nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpcfg *);
u16
-nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
+nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 pc, u8 vs, u8 pe,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpcfg *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 8f4ced75444a..c01e29c9f89a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -77,6 +77,8 @@ struct nouveau_clock {
int tstate; /* thermal adjustment (max-) */
int dstate; /* display adjustment (min+) */
+ bool allow_reclock;
+
int (*read)(struct nouveau_clock *, enum nv_clk_src);
int (*calc)(struct nouveau_clock *, struct nouveau_cstate *);
int (*prog)(struct nouveau_clock *);
@@ -106,8 +108,8 @@ struct nouveau_clocks {
int mdiv;
};
-#define nouveau_clock_create(p,e,o,i,d) \
- nouveau_clock_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
+#define nouveau_clock_create(p,e,o,i,r,d) \
+ nouveau_clock_create_((p), (e), (o), (i), (r), sizeof(**d), (void **)d)
#define nouveau_clock_destroy(p) ({ \
struct nouveau_clock *clk = (p); \
_nouveau_clock_dtor(nv_object(clk)); \
@@ -121,7 +123,7 @@ struct nouveau_clocks {
int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *,
- struct nouveau_clocks *, int, void **);
+ struct nouveau_clocks *, bool, int, void **);
void _nouveau_clock_dtor(struct nouveau_object *);
int _nouveau_clock_init(struct nouveau_object *);
#define _nouveau_clock_fini _nouveau_subdev_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 58c7ccdebb01..871e73914b24 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -105,6 +105,7 @@ extern struct nouveau_oclass *nvaa_fb_oclass;
extern struct nouveau_oclass *nvaf_fb_oclass;
extern struct nouveau_oclass *nvc0_fb_oclass;
extern struct nouveau_oclass *nve0_fb_oclass;
+extern struct nouveau_oclass *gk20a_fb_oclass;
extern struct nouveau_oclass *gm107_fb_oclass;
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index c85b9f1579ad..612d82ab683d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -8,17 +8,18 @@
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
+enum nvkm_gpio_event {
+ NVKM_GPIO_HI = 1,
+ NVKM_GPIO_LO = 2,
+ NVKM_GPIO_TOGGLED = (NVKM_GPIO_HI | NVKM_GPIO_LO),
+};
+
struct nouveau_gpio {
struct nouveau_subdev base;
struct nouveau_event *events;
- /* hardware interfaces */
void (*reset)(struct nouveau_gpio *, u8 func);
- int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
- int (*sense)(struct nouveau_gpio *, int line);
-
- /* software interfaces */
int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
struct dcb_gpio_func *);
int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
@@ -31,23 +32,10 @@ nouveau_gpio(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
}
-#define nouveau_gpio_create(p,e,o,l,d) \
- nouveau_gpio_create_((p), (e), (o), (l), sizeof(**d), (void **)d)
-#define nouveau_gpio_destroy(p) ({ \
- struct nouveau_gpio *gpio = (p); \
- _nouveau_gpio_dtor(nv_object(gpio)); \
-})
-#define nouveau_gpio_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, int, void **);
-void _nouveau_gpio_dtor(struct nouveau_object *);
-int nouveau_gpio_init(struct nouveau_gpio *);
-
-extern struct nouveau_oclass nv10_gpio_oclass;
-extern struct nouveau_oclass nv50_gpio_oclass;
-extern struct nouveau_oclass nvd0_gpio_oclass;
-extern struct nouveau_oclass nve0_gpio_oclass;
+extern struct nouveau_oclass *nv10_gpio_oclass;
+extern struct nouveau_oclass *nv50_gpio_oclass;
+extern struct nouveau_oclass *nv92_gpio_oclass;
+extern struct nouveau_oclass *nvd0_gpio_oclass;
+extern struct nouveau_oclass *nve0_gpio_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 7f50a858b16f..825f7bb46b67 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -14,52 +14,41 @@
#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
+enum nvkm_i2c_event {
+ NVKM_I2C_PLUG = 1,
+ NVKM_I2C_UNPLUG = 2,
+ NVKM_I2C_IRQ = 4,
+ NVKM_I2C_DONE = 8,
+ NVKM_I2C_ANY = (NVKM_I2C_PLUG |
+ NVKM_I2C_UNPLUG |
+ NVKM_I2C_IRQ |
+ NVKM_I2C_DONE),
+};
+
struct nouveau_i2c_port {
struct nouveau_object base;
struct i2c_adapter adapter;
+ struct mutex mutex;
struct list_head head;
u8 index;
+ int aux;
const struct nouveau_i2c_func *func;
};
struct nouveau_i2c_func {
- void (*acquire)(struct nouveau_i2c_port *);
- void (*release)(struct nouveau_i2c_port *);
-
void (*drive_scl)(struct nouveau_i2c_port *, int);
void (*drive_sda)(struct nouveau_i2c_port *, int);
int (*sense_scl)(struct nouveau_i2c_port *);
int (*sense_sda)(struct nouveau_i2c_port *);
- int (*aux)(struct nouveau_i2c_port *, u8, u32, u8 *, u8);
+ int (*aux)(struct nouveau_i2c_port *, bool, u8, u32, u8 *, u8);
int (*pattern)(struct nouveau_i2c_port *, int pattern);
int (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh);
int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
};
-#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
- nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
- sizeof(**d), (void **)d)
-#define nouveau_i2c_port_destroy(p) ({ \
- struct nouveau_i2c_port *port = (p); \
- _nouveau_i2c_port_dtor(nv_object(i2c)); \
-})
-#define nouveau_i2c_port_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_i2c_port_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u8,
- const struct i2c_algorithm *,
- const struct nouveau_i2c_func *,
- int, void **);
-void _nouveau_i2c_port_dtor(struct nouveau_object *);
-#define _nouveau_i2c_port_init nouveau_object_init
-#define _nouveau_i2c_port_fini nouveau_object_fini
-
struct nouveau_i2c_board_info {
struct i2c_board_info dev;
u8 udelay; /* set to 0 to use the standard delay */
@@ -67,13 +56,20 @@ struct nouveau_i2c_board_info {
struct nouveau_i2c {
struct nouveau_subdev base;
+ struct nouveau_event *ntfy;
struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
+ int (*acquire_pad)(struct nouveau_i2c_port *, unsigned long timeout);
+ void (*release_pad)(struct nouveau_i2c_port *);
+ int (*acquire)(struct nouveau_i2c_port *, unsigned long timeout);
+ void (*release)(struct nouveau_i2c_port *);
int (*identify)(struct nouveau_i2c *, int index,
const char *what, struct nouveau_i2c_board_info *,
bool (*match)(struct nouveau_i2c_port *,
struct i2c_board_info *, void *), void *);
+
+ wait_queue_head_t wait;
struct list_head ports;
};
@@ -83,37 +79,13 @@ nouveau_i2c(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
}
-#define nouveau_i2c_create(p,e,o,s,d) \
- nouveau_i2c_create_((p), (e), (o), (s), sizeof(**d), (void **)d)
-#define nouveau_i2c_destroy(p) ({ \
- struct nouveau_i2c *i2c = (p); \
- _nouveau_i2c_dtor(nv_object(i2c)); \
-})
-#define nouveau_i2c_init(p) ({ \
- struct nouveau_i2c *i2c = (p); \
- _nouveau_i2c_init(nv_object(i2c)); \
-})
-#define nouveau_i2c_fini(p,s) ({ \
- struct nouveau_i2c *i2c = (p); \
- _nouveau_i2c_fini(nv_object(i2c), (s)); \
-})
-
-int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, struct nouveau_oclass *,
- int, void **);
-void _nouveau_i2c_dtor(struct nouveau_object *);
-int _nouveau_i2c_init(struct nouveau_object *);
-int _nouveau_i2c_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass nv04_i2c_oclass;
-extern struct nouveau_oclass nv4e_i2c_oclass;
-extern struct nouveau_oclass nv50_i2c_oclass;
-extern struct nouveau_oclass nv94_i2c_oclass;
-extern struct nouveau_oclass nvd0_i2c_oclass;
-extern struct nouveau_oclass nouveau_anx9805_sclass[];
-
-extern const struct i2c_algorithm nouveau_i2c_bit_algo;
-extern const struct i2c_algorithm nouveau_i2c_aux_algo;
+extern struct nouveau_oclass *nv04_i2c_oclass;
+extern struct nouveau_oclass *nv4e_i2c_oclass;
+extern struct nouveau_oclass *nv50_i2c_oclass;
+extern struct nouveau_oclass *nv94_i2c_oclass;
+extern struct nouveau_oclass *nvd0_i2c_oclass;
+extern struct nouveau_oclass *gf117_i2c_oclass;
+extern struct nouveau_oclass *nve0_i2c_oclass;
static inline int
nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
index 88814f159d89..31df634c0fdc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
@@ -30,5 +30,6 @@ nouveau_ibus(void *obj)
extern struct nouveau_oclass nvc0_ibus_oclass;
extern struct nouveau_oclass nve0_ibus_oclass;
+extern struct nouveau_oclass gk20a_ibus_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index bdf594116f3f..73b1ed20c8d5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -118,8 +118,10 @@ nouveau_bar_create_(struct nouveau_object *parent,
if (ret)
return ret;
- bar->iomem = ioremap(nv_device_resource_start(device, 3),
- nv_device_resource_len(device, 3));
+ if (nv_device_resource_len(device, 3) != 0)
+ bar->iomem = ioremap(nv_device_resource_start(device, 3),
+ nv_device_resource_len(device, 3));
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index 3f30db62e656..ca8139b9ab27 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -30,14 +30,16 @@
#include "priv.h"
+struct nvc0_bar_priv_vm {
+ struct nouveau_gpuobj *mem;
+ struct nouveau_gpuobj *pgd;
+ struct nouveau_vm *vm;
+};
+
struct nvc0_bar_priv {
struct nouveau_bar base;
spinlock_t lock;
- struct {
- struct nouveau_gpuobj *mem;
- struct nouveau_gpuobj *pgd;
- struct nouveau_vm *vm;
- } bar[2];
+ struct nvc0_bar_priv_vm bar[2];
};
static int
@@ -79,87 +81,87 @@ nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
}
static int
-nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvc0_bar_init_vm(struct nvc0_bar_priv *priv, struct nvc0_bar_priv_vm *bar_vm,
+ int bar_nr)
{
- struct nouveau_device *device = nv_device(parent);
- struct nvc0_bar_priv *priv;
- struct nouveau_gpuobj *mem;
+ struct nouveau_device *device = nv_device(&priv->base);
struct nouveau_vm *vm;
+ resource_size_t bar_len;
int ret;
- ret = nouveau_bar_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- /* BAR3 */
ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
- &priv->bar[0].mem);
- mem = priv->bar[0].mem;
+ &bar_vm->mem);
if (ret)
return ret;
ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
- &priv->bar[0].pgd);
+ &bar_vm->pgd);
if (ret)
return ret;
- ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 3), 0, &vm);
+ bar_len = nv_device_resource_len(device, bar_nr);
+
+ ret = nouveau_vm_new(device, 0, bar_len, 0, &vm);
if (ret)
return ret;
atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
- ret = nouveau_gpuobj_new(nv_object(priv), NULL,
- (nv_device_resource_len(device, 3) >> 12) * 8,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
- &vm->pgt[0].obj[0]);
- vm->pgt[0].refcount[0] = 1;
- if (ret)
- return ret;
+ /*
+ * Bootstrap page table lookup.
+ */
+ if (bar_nr == 3) {
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL,
+ (bar_len >> 12) * 8, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &vm->pgt[0].obj[0]);
+ vm->pgt[0].refcount[0] = 1;
+ if (ret)
+ return ret;
+ }
- ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
+ ret = nouveau_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
nouveau_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
- nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
- nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
- nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 3) - 1));
- nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 3) - 1));
+ nv_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
+ nv_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
+ nv_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
+ nv_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
- /* BAR1 */
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
- &priv->bar[1].mem);
- mem = priv->bar[1].mem;
- if (ret)
- return ret;
+ return 0;
+}
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
- &priv->bar[1].pgd);
- if (ret)
- return ret;
+static int
+nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_device *device = nv_device(parent);
+ struct nvc0_bar_priv *priv;
+ bool has_bar3 = nv_device_resource_len(device, 3) != 0;
+ int ret;
- ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 1), 0, &vm);
+ ret = nouveau_bar_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
if (ret)
return ret;
- atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+ /* BAR3 */
+ if (has_bar3) {
+ ret = nvc0_bar_init_vm(priv, &priv->bar[0], 3);
+ if (ret)
+ return ret;
+ priv->base.alloc = nouveau_bar_alloc;
+ priv->base.kmap = nvc0_bar_kmap;
+ }
- ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
- nouveau_vm_ref(NULL, &vm, NULL);
+ /* BAR1 */
+ ret = nvc0_bar_init_vm(priv, &priv->bar[1], 1);
if (ret)
return ret;
- nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
- nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
- nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 1) - 1));
- nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 1) - 1));
-
- priv->base.alloc = nouveau_bar_alloc;
- priv->base.kmap = nvc0_bar_kmap;
priv->base.umap = nvc0_bar_umap;
priv->base.unmap = nvc0_bar_unmap;
priv->base.flush = nv84_bar_flush;
@@ -201,7 +203,9 @@ nvc0_bar_init(struct nouveau_object *object)
nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
- nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12);
+ if (priv->bar[0].mem)
+ nv_wr32(priv, 0x001714,
+ 0xc0000000 | priv->bar[0].mem->addr >> 12);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 222e8ebb669d..d45704a2c2df 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -183,10 +183,11 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
goto out;
bios->data = kmalloc(bios->size, GFP_KERNEL);
- if (bios->data) {
- for (i = 0; i < bios->size; i += 4)
- ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
- }
+ if (!bios->data)
+ goto out;
+
+ for (i = 0; i < bios->size; i += 4)
+ ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
/* check the PCI record header */
pcir = nv_ro16(bios, 0x0018);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
index 5ac010efd959..2ede3bcd96a1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
@@ -28,12 +28,12 @@
#include <subdev/bios/dcb.h>
#include <subdev/bios/conn.h>
-u16
-dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u32
+nvbios_connTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
+ u32 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
- u16 data = nv_ro16(bios, dcb + 0x14);
+ u32 data = nv_ro16(bios, dcb + 0x14);
if (data) {
*ver = nv_ro08(bios, data + 0);
*hdr = nv_ro08(bios, data + 1);
@@ -42,15 +42,59 @@ dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
return data;
}
}
- return 0x0000;
+ return 0x00000000;
}
-u16
-dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+u32
+nvbios_connTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_connT *info)
+{
+ u32 data = nvbios_connTe(bios, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ case 0x30:
+ case 0x40:
+ return data;
+ default:
+ break;
+ }
+ return 0x00000000;
+}
+
+u32
+nvbios_connEe(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 data = dcb_conntab(bios, ver, &hdr, &cnt, len);
+ u32 data = nvbios_connTe(bios, ver, &hdr, &cnt, len);
if (data && idx < cnt)
return data + hdr + (idx * *len);
- return 0x0000;
+ return 0x00000000;
+}
+
+u32
+nvbios_connEp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+ struct nvbios_connE *info)
+{
+ u32 data = nvbios_connEe(bios, idx, ver, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ case 0x30:
+ case 0x40:
+ info->type = nv_ro08(bios, data + 0x00);
+ info->location = nv_ro08(bios, data + 0x01) & 0x0f;
+ info->hpd = (nv_ro08(bios, data + 0x01) & 0x30) >> 4;
+ info->dp = (nv_ro08(bios, data + 0x01) & 0xc0) >> 6;
+ if (*len < 4)
+ return data;
+ info->hpd |= (nv_ro08(bios, data + 0x02) & 0x03) << 2;
+ info->dp |= nv_ro08(bios, data + 0x02) & 0x0c;
+ info->di = (nv_ro08(bios, data + 0x02) & 0xf0) >> 4;
+ info->hpd |= (nv_ro08(bios, data + 0x03) & 0x07) << 4;
+ info->sr = (nv_ro08(bios, data + 0x03) & 0x08) >> 3;
+ info->lcdid = (nv_ro08(bios, data + 0x03) & 0x70) >> 4;
+ return data;
+ default:
+ break;
+ }
+ return 0x00000000;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 7628fe759220..f309dd657250 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -162,18 +162,20 @@ nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
struct nvbios_dpcfg *info)
{
u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
if (data) {
switch (*ver) {
case 0x21:
- info->drv = nv_ro08(bios, data + 0x02);
- info->pre = nv_ro08(bios, data + 0x03);
- info->unk = nv_ro08(bios, data + 0x04);
+ info->dc = nv_ro08(bios, data + 0x02);
+ info->pe = nv_ro08(bios, data + 0x03);
+ info->tx_pu = nv_ro08(bios, data + 0x04);
break;
case 0x30:
case 0x40:
- info->drv = nv_ro08(bios, data + 0x01);
- info->pre = nv_ro08(bios, data + 0x02);
- info->unk = nv_ro08(bios, data + 0x03);
+ info->pc = nv_ro08(bios, data + 0x00);
+ info->dc = nv_ro08(bios, data + 0x01);
+ info->pe = nv_ro08(bios, data + 0x02);
+ info->tx_pu = nv_ro08(bios, data + 0x03);
break;
default:
data = 0x0000;
@@ -184,7 +186,7 @@ nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
}
u16
-nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
+nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpcfg *info)
{
@@ -193,16 +195,15 @@ nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
if (*ver >= 0x30) {
const u8 vsoff[] = { 0, 4, 7, 9 };
- idx = (un * 10) + vsoff[vs] + pe;
+ idx = (pc * 10) + vsoff[vs] + pe;
} else {
- while ((data = nvbios_dpcfg_entry(bios, outp, idx,
+ while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
ver, hdr, cnt, len))) {
if (nv_ro08(bios, data + 0x00) == vs &&
nv_ro08(bios, data + 0x01) == pe)
break;
- idx++;
}
}
- return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
+ return nvbios_dpcfg_parse(bios, outp, idx, ver, hdr, cnt, len, info);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index acaeaf79e3f0..626380f9e4c0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -98,15 +98,16 @@ static u8
init_conn(struct nvbios_init *init)
{
struct nouveau_bios *bios = init->bios;
- u8 ver, len;
- u16 conn;
+ struct nvbios_connE connE;
+ u8 ver, hdr;
+ u32 conn;
if (init_exec(init)) {
if (init->outp) {
conn = init->outp->connector;
- conn = dcb_conn(bios, conn, &ver, &len);
+ conn = nvbios_connEp(bios, conn, &ver, &hdr, &connE);
if (conn)
- return nv_ro08(bios, conn);
+ return connE.type;
}
error("script needs connector type\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
index dd62baead39c..22351f594d2a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -346,8 +346,8 @@ nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
struct nouveau_pstate *pstate;
int i = 0;
- /* YKW repellant */
- return -ENOSYS;
+ if (!clk->allow_reclock)
+ return -ENOSYS;
if (req != -1 && req != -2) {
list_for_each_entry(pstate, &clk->states, head) {
@@ -456,6 +456,7 @@ nouveau_clock_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
struct nouveau_clocks *clocks,
+ bool allow_reclock,
int length, void **object)
{
struct nouveau_device *device = nv_device(parent);
@@ -478,6 +479,8 @@ nouveau_clock_create_(struct nouveau_object *parent,
ret = nouveau_pstate_new(clk, idx++);
} while (ret == 0);
+ clk->allow_reclock = allow_reclock;
+
mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
if (mode) {
if (!strncasecmpz(mode, "disabled", arglen)) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index b74db6cfc4e2..eb2d4425a49e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -82,7 +82,8 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, false,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index db7346f79080..8a9e16839791 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -213,7 +213,8 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv40_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, true,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index 250a6d96016b..8c132772ba9e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -507,7 +507,7 @@ nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
- &priv);
+ false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index 4f5a1373f002..9fb58354a80b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -302,7 +302,8 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nva3_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, false,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
index 7a723b4f564d..6a65fc9e9663 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -421,7 +421,8 @@ nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvaa_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, true,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index c3105720ed24..dbf8517f54da 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -437,7 +437,8 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, false,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index d3c37c96f0e7..0e62a3240144 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -307,7 +307,6 @@ calc_clk(struct nve0_clock_priv *priv,
info->dsrc = src0;
if (div0) {
info->ddiv |= 0x80000000;
- info->ddiv |= div0 << 8;
info->ddiv |= div0;
}
if (div1D) {
@@ -352,7 +351,7 @@ nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
{
struct nve0_clock_info *info = &priv->eng[clk];
if (!info->ssel) {
- nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+ nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
}
}
@@ -389,7 +388,10 @@ static void
nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
{
struct nve0_clock_info *info = &priv->eng[clk];
- nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+ if (info->ssel)
+ nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
+ else
+ nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
}
static void
@@ -473,7 +475,8 @@ nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nve0_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, true,
+ &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
new file mode 100644
index 000000000000..a16024a74771
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nvc0.h"
+
+struct gk20a_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct gk20a_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_oclass *
+gk20a_fb_oclass = &(struct nouveau_fb_impl) {
+ .base.handle = NV_SUBDEV(FB, 0xea),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gk20a_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = _nouveau_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+ .memtype = nvc0_fb_memtype_valid,
+ .ram = &gk20a_ram_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index da74c889aed4..82273f832e42 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -32,6 +32,7 @@ extern struct nouveau_oclass nva3_ram_oclass;
extern struct nouveau_oclass nvaa_ram_oclass;
extern struct nouveau_oclass nvc0_ram_oclass;
extern struct nouveau_oclass nve0_ram_oclass;
+extern struct nouveau_oclass gk20a_ram_oclass;
extern struct nouveau_oclass gm107_ram_oclass;
int nouveau_sddr3_calc(struct nouveau_ram *ram);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index 0f57fcfe0bbf..2af9cfd2c60f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -26,7 +26,7 @@ ramfuc_reg2(u32 addr1, u32 addr2)
};
}
-static inline struct ramfuc_reg
+static noinline struct ramfuc_reg
ramfuc_reg(u32 addr)
{
return ramfuc_reg2(addr, addr);
@@ -107,7 +107,7 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec)
#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
-#define ram_have(s,r) ((s)->r_##r.addr != 0x000000)
+#define ram_have(s,r) ((s)->r_##r.addr[0] != 0x000000)
#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c
new file mode 100644
index 000000000000..4d77d75e4673
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramgk20a.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+#include <subdev/fb.h>
+
+struct gk20a_mem {
+ struct nouveau_mem base;
+ void *cpuaddr;
+ dma_addr_t handle;
+};
+#define to_gk20a_mem(m) container_of(m, struct gk20a_mem, base)
+
+static void
+gk20a_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+ struct device *dev = nv_device_base(nv_device(pfb));
+ struct gk20a_mem *mem = to_gk20a_mem(*pmem);
+
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
+
+ if (likely(mem->cpuaddr))
+ dma_free_coherent(dev, mem->base.size << PAGE_SHIFT,
+ mem->cpuaddr, mem->handle);
+
+ kfree(mem->base.pages);
+ kfree(mem);
+}
+
+static int
+gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nouveau_mem **pmem)
+{
+ struct device *dev = nv_device_base(nv_device(pfb));
+ struct gk20a_mem *mem;
+ u32 type = memtype & 0xff;
+ u32 npages, order;
+ int i;
+
+ nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, size,
+ align, ncmin);
+
+ npages = size >> PAGE_SHIFT;
+ if (npages == 0)
+ npages = 1;
+
+ if (align == 0)
+ align = PAGE_SIZE;
+ align >>= PAGE_SHIFT;
+
+ /* round alignment to the next power of 2, if needed */
+ order = fls(align);
+ if ((align & (align - 1)) == 0)
+ order--;
+ align = BIT(order);
+
+ /* ensure returned address is correctly aligned */
+ npages = max(align, npages);
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ mem->base.size = npages;
+ mem->base.memtype = type;
+
+ mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL);
+ if (!mem->base.pages) {
+ kfree(mem);
+ return -ENOMEM;
+ }
+
+ *pmem = &mem->base;
+
+ mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT,
+ &mem->handle, GFP_KERNEL);
+ if (!mem->cpuaddr) {
+ nv_error(pfb, "%s: cannot allocate memory!\n", __func__);
+ gk20a_ram_put(pfb, pmem);
+ return -ENOMEM;
+ }
+
+ align <<= PAGE_SHIFT;
+
+ /* alignment check */
+ if (unlikely(mem->handle & (align - 1)))
+ nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n",
+ &mem->handle, align);
+
+ nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n",
+ npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr);
+
+ for (i = 0; i < npages; i++)
+ mem->base.pages[i] = mem->handle + (PAGE_SIZE * i);
+
+ mem->base.offset = (u64)mem->base.pages[0];
+
+ return 0;
+}
+
+static int
+gk20a_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 datasize,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_ram *ram;
+ int ret;
+
+ ret = nouveau_ram_create(parent, engine, oclass, &ram);
+ *pobject = nv_object(ram);
+ if (ret)
+ return ret;
+ ram->type = NV_MEM_TYPE_STOLEN;
+ ram->size = get_num_physpages() << PAGE_SHIFT;
+
+ ram->get = gk20a_ram_get;
+ ram->put = gk20a_ram_put;
+
+ return 0;
+}
+
+struct nouveau_oclass
+gk20a_ram_oclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gk20a_ram_ctor,
+ .dtor = _nouveau_ram_dtor,
+ .init = _nouveau_ram_init,
+ .fini = _nouveau_ram_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index ef91b6e893af..e5d12c24cc43 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -211,7 +211,7 @@ nv50_ram_prog(struct nouveau_fb *pfb)
struct nv50_ram *ram = (void *)pfb->ram;
struct nv50_ramseq *hwsq = &ram->hwsq;
- ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+ ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index 6eb97f16fbda..8076fb195dd5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -309,7 +309,7 @@ nva3_ram_prog(struct nouveau_fb *pfb)
struct nouveau_device *device = nv_device(pfb);
struct nva3_ram *ram = (void *)pfb->ram;
struct nva3_ramfuc *fuc = &ram->fuc;
- ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+ ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 8edc92224c84..5a6a5027f749 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -408,7 +408,7 @@ nvc0_ram_prog(struct nouveau_fb *pfb)
struct nouveau_device *device = nv_device(pfb);
struct nvc0_ram *ram = (void *)pfb->ram;
struct nvc0_ramfuc *fuc = &ram->fuc;
- ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+ ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 16752192cf87..c5b46e302319 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -200,6 +200,7 @@ r1373f4_init(struct nve0_ramfuc *fuc)
/* (re)program mempll, if required */
if (ram->mode == 2) {
ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+ ram_mask(fuc, 0x132000, 0x80000000, 0x80000000);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
@@ -262,8 +263,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
struct nouveau_ram_data *next = ram->base.next;
- int vc = !(next->bios.ramcfg_11_02_08);
- int mv = !(next->bios.ramcfg_11_02_04);
+ int vc = !next->bios.ramcfg_11_02_08;
+ int mv = !next->bios.ramcfg_11_02_04;
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -370,8 +371,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
}
}
- if ( (next->bios.ramcfg_11_02_40) ||
- (next->bios.ramcfg_11_07_10)) {
+ if (next->bios.ramcfg_11_02_40 ||
+ next->bios.ramcfg_11_07_10) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
@@ -417,7 +418,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f694, 0xff00ff00, data);
}
- if (ram->mode == 2 && (next->bios.ramcfg_11_08_10))
+ if (ram->mode == 2 && next->bios.ramcfg_11_08_10)
data = 0x00000080;
else
data = 0x00000000;
@@ -425,13 +426,13 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
mask = 0x00070000;
data = 0x00000000;
- if (!(next->bios.ramcfg_11_02_80))
+ if (!next->bios.ramcfg_11_02_80)
data |= 0x03000000;
- if (!(next->bios.ramcfg_11_02_40))
+ if (!next->bios.ramcfg_11_02_40)
data |= 0x00002000;
- if (!(next->bios.ramcfg_11_07_10))
+ if (!next->bios.ramcfg_11_07_10)
data |= 0x00004000;
- if (!(next->bios.ramcfg_11_07_08))
+ if (!next->bios.ramcfg_11_07_08)
data |= 0x00000003;
else
data |= 0x74000000;
@@ -486,7 +487,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
data = mask = 0x00000000;
if (NOTE00(ramcfg_02_03 != 0)) {
- data |= (next->bios.ramcfg_11_02_03) << 8;
+ data |= next->bios.ramcfg_11_02_03 << 8;
mask |= 0x00000300;
}
if (NOTE00(ramcfg_01_10)) {
@@ -498,7 +499,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
data = mask = 0x00000000;
if (NOTE00(timing_30_07 != 0)) {
- data |= (next->bios.timing_20_30_07) << 28;
+ data |= next->bios.timing_20_30_07 << 28;
mask |= 0x70000000;
}
if (NOTE00(ramcfg_01_01)) {
@@ -510,7 +511,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
data = mask = 0x00000000;
if (NOTE00(timing_30_07 != 0)) {
- data |= (next->bios.timing_20_30_07) << 28;
+ data |= next->bios.timing_20_30_07 << 28;
mask |= 0x70000000;
}
if (NOTE00(ramcfg_01_02)) {
@@ -522,16 +523,16 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
mask = 0x33f00000;
data = 0x00000000;
- if (!(next->bios.ramcfg_11_01_04))
+ if (!next->bios.ramcfg_11_01_04)
data |= 0x20200000;
- if (!(next->bios.ramcfg_11_07_80))
+ if (!next->bios.ramcfg_11_07_80)
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
- if ( (next->bios.ramcfg_11_03_f0)) {
+ if (next->bios.ramcfg_11_03_f0) {
if (next->bios.rammap_11_08_0c) {
- if (!(next->bios.ramcfg_11_07_80))
+ if (!next->bios.ramcfg_11_07_80)
mask |= 0x00000020;
else
data |= 0x00000020;
@@ -563,7 +564,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
}
- data = (next->bios.timing_20_30_07) << 8;
+ data = next->bios.timing_20_30_07 << 8;
if (next->bios.ramcfg_11_01_01)
data |= 0x80000000;
ram_mask(fuc, 0x100778, 0x00000700, data);
@@ -588,7 +589,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
- if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) {
+ if (next->bios.ramcfg_11_08_10 && (ram->mode == 2) /*XXX*/) {
u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
ram_nsec(fuc, 1000);
@@ -621,8 +622,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
data = ram_rd32(fuc, 0x10f978);
data &= ~0x00046144;
data |= 0x0000000b;
- if (!(next->bios.ramcfg_11_07_08)) {
- if (!(next->bios.ramcfg_11_07_04))
+ if (!next->bios.ramcfg_11_07_08) {
+ if (!next->bios.ramcfg_11_07_04)
data |= 0x0000200c;
else
data |= 0x00000000;
@@ -636,11 +637,11 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f830, data);
}
- if (!(next->bios.ramcfg_11_07_08)) {
+ if (!next->bios.ramcfg_11_07_08) {
data = 0x88020000;
- if ( (next->bios.ramcfg_11_07_04))
+ if ( next->bios.ramcfg_11_07_04)
data |= 0x10000000;
- if (!(next->bios.rammap_11_08_10))
+ if (!next->bios.rammap_11_08_10)
data |= 0x00080000;
} else {
data = 0xa40e0000;
@@ -689,8 +690,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
const u32 runk0 = ram->fN1 << 16;
const u32 runk1 = ram->fN1;
struct nouveau_ram_data *next = ram->base.next;
- int vc = !(next->bios.ramcfg_11_02_08);
- int mv = !(next->bios.ramcfg_11_02_04);
+ int vc = !next->bios.ramcfg_11_02_08;
+ int mv = !next->bios.ramcfg_11_02_04;
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -705,7 +706,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
}
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
- if ((next->bios.ramcfg_11_03_f0))
+ if (next->bios.ramcfg_11_03_f0)
ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
@@ -761,7 +762,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
data = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
- data |= (next->bios.ramcfg_11_03_30) << 12;
+ data |= next->bios.ramcfg_11_03_30 << 16;
ram_wr32(fuc, 0x1373ec, data);
ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
@@ -793,8 +794,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
}
}
- if ( (next->bios.ramcfg_11_02_40) ||
- (next->bios.ramcfg_11_07_10)) {
+ if (next->bios.ramcfg_11_02_40 ||
+ next->bios.ramcfg_11_07_10) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
@@ -810,13 +811,13 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
mask = 0x00010000;
data = 0x00000000;
- if (!(next->bios.ramcfg_11_02_80))
+ if (!next->bios.ramcfg_11_02_80)
data |= 0x03000000;
- if (!(next->bios.ramcfg_11_02_40))
+ if (!next->bios.ramcfg_11_02_40)
data |= 0x00002000;
- if (!(next->bios.ramcfg_11_07_10))
+ if (!next->bios.ramcfg_11_07_10)
data |= 0x00004000;
- if (!(next->bios.ramcfg_11_07_08))
+ if (!next->bios.ramcfg_11_07_08)
data |= 0x00000003;
else
data |= 0x14000000;
@@ -844,16 +845,16 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
mask = 0x33f00000;
data = 0x00000000;
- if (!(next->bios.ramcfg_11_01_04))
+ if (!next->bios.ramcfg_11_01_04)
data |= 0x20200000;
- if (!(next->bios.ramcfg_11_07_80))
+ if (!next->bios.ramcfg_11_07_80)
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
- if ( (next->bios.ramcfg_11_03_f0)) {
+ if (next->bios.ramcfg_11_03_f0) {
if (next->bios.rammap_11_08_0c) {
- if (!(next->bios.ramcfg_11_07_80))
+ if (!next->bios.ramcfg_11_07_80)
mask |= 0x00000020;
else
data |= 0x00000020;
@@ -876,7 +877,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
data = next->bios.timing_20_2c_1fc0;
ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
- ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8);
+ ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
ram_wr32(fuc, 0x10f090, 0x4000007f);
ram_nsec(fuc, 1000);
@@ -1111,7 +1112,7 @@ nve0_ram_prog(struct nouveau_fb *pfb)
struct nouveau_device *device = nv_device(pfb);
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
- ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+ ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
return (ram->base.next == &ram->base.xition);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index f572c2804c32..45e0202f3151 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -22,21 +22,24 @@
* Authors: Ben Skeggs
*/
-#include <subdev/gpio.h>
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
+#include "priv.h"
+
static int
nouveau_gpio_drive(struct nouveau_gpio *gpio,
int idx, int line, int dir, int out)
{
- return gpio->drive ? gpio->drive(gpio, line, dir, out) : -ENODEV;
+ const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ return impl->drive ? impl->drive(gpio, line, dir, out) : -ENODEV;
}
static int
nouveau_gpio_sense(struct nouveau_gpio *gpio, int idx, int line)
{
- return gpio->sense ? gpio->sense(gpio, line) : -ENODEV;
+ const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ return impl->sense ? impl->sense(gpio, line) : -ENODEV;
}
static int
@@ -102,6 +105,80 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
return ret;
}
+static void
+nouveau_gpio_intr_disable(struct nouveau_event *event, int type, int index)
+{
+ struct nouveau_gpio *gpio = nouveau_gpio(event->priv);
+ const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ impl->intr_mask(gpio, type, 1 << index, 0);
+}
+
+static void
+nouveau_gpio_intr_enable(struct nouveau_event *event, int type, int index)
+{
+ struct nouveau_gpio *gpio = nouveau_gpio(event->priv);
+ const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ impl->intr_mask(gpio, type, 1 << index, 1 << index);
+}
+
+static void
+nouveau_gpio_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_gpio *gpio = nouveau_gpio(subdev);
+ const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
+ u32 hi, lo, e, i;
+
+ impl->intr_stat(gpio, &hi, &lo);
+
+ for (i = 0; e = 0, (hi | lo) && i < impl->lines; i++) {
+ if (hi & (1 << i))
+ e |= NVKM_GPIO_HI;
+ if (lo & (1 << i))
+ e |= NVKM_GPIO_LO;
+ nouveau_event_trigger(gpio->events, e, i);
+ }
+}
+
+int
+_nouveau_gpio_fini(struct nouveau_object *object, bool suspend)
+{
+ const struct nouveau_gpio_impl *impl = (void *)object->oclass;
+ struct nouveau_gpio *gpio = nouveau_gpio(object);
+ u32 mask = (1 << impl->lines) - 1;
+
+ impl->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
+ impl->intr_stat(gpio, &mask, &mask);
+
+ return nouveau_subdev_fini(&gpio->base, suspend);
+}
+
+static struct dmi_system_id gpio_reset_ids[] = {
+ {
+ .ident = "Apple Macbook 10,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
+ }
+ },
+ { }
+};
+
+int
+_nouveau_gpio_init(struct nouveau_object *object)
+{
+ struct nouveau_gpio *gpio = nouveau_gpio(object);
+ int ret;
+
+ ret = nouveau_subdev_init(&gpio->base);
+ if (ret)
+ return ret;
+
+ if (gpio->reset && dmi_check_system(gpio_reset_ids))
+ gpio->reset(gpio, DCB_GPIO_UNUSED);
+
+ return ret;
+}
+
void
_nouveau_gpio_dtor(struct nouveau_object *object)
{
@@ -113,9 +190,10 @@ _nouveau_gpio_dtor(struct nouveau_object *object)
int
nouveau_gpio_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int lines,
+ struct nouveau_oclass *oclass,
int length, void **pobject)
{
+ const struct nouveau_gpio_impl *impl = (void *)oclass;
struct nouveau_gpio *gpio;
int ret;
@@ -125,34 +203,34 @@ nouveau_gpio_create_(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_event_create(lines, &gpio->events);
- if (ret)
- return ret;
-
gpio->find = nouveau_gpio_find;
gpio->set = nouveau_gpio_set;
gpio->get = nouveau_gpio_get;
+ gpio->reset = impl->reset;
+
+ ret = nouveau_event_create(2, impl->lines, &gpio->events);
+ if (ret)
+ return ret;
+
+ gpio->events->priv = gpio;
+ gpio->events->enable = nouveau_gpio_intr_enable;
+ gpio->events->disable = nouveau_gpio_intr_disable;
+ nv_subdev(gpio)->intr = nouveau_gpio_intr;
return 0;
}
-static struct dmi_system_id gpio_reset_ids[] = {
- {
- .ident = "Apple Macbook 10,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
- }
- },
- { }
-};
-
int
-nouveau_gpio_init(struct nouveau_gpio *gpio)
+_nouveau_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- int ret = nouveau_subdev_init(&gpio->base);
- if (ret == 0 && gpio->reset) {
- if (dmi_check_system(gpio_reset_ids))
- gpio->reset(gpio, DCB_GPIO_UNUSED);
- }
- return ret;
+ struct nouveau_gpio *gpio;
+ int ret;
+
+ ret = nouveau_gpio_create(parent, engine, oclass, &gpio);
+ *pobject = nv_object(gpio);
+ if (ret)
+ return ret;
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
index 76d5d5465ddd..27ad23eaf185 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -26,10 +26,6 @@
#include "priv.h"
-struct nv10_gpio_priv {
- struct nouveau_gpio base;
-};
-
static int
nv10_gpio_sense(struct nouveau_gpio *gpio, int line)
{
@@ -83,95 +79,38 @@ nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
}
static void
-nv10_gpio_intr(struct nouveau_subdev *subdev)
-{
- struct nv10_gpio_priv *priv = (void *)subdev;
- u32 intr = nv_rd32(priv, 0x001104);
- u32 hi = (intr & 0x0000ffff) >> 0;
- u32 lo = (intr & 0xffff0000) >> 16;
- int i;
-
- for (i = 0; (hi | lo) && i < 32; i++) {
- if ((hi | lo) & (1 << i))
- nouveau_event_trigger(priv->base.events, i);
- }
-
- nv_wr32(priv, 0x001104, intr);
-}
-
-static void
-nv10_gpio_intr_enable(struct nouveau_event *event, int line)
-{
- nv_wr32(event->priv, 0x001104, 0x00010001 << line);
- nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00010001 << line);
-}
-
-static void
-nv10_gpio_intr_disable(struct nouveau_event *event, int line)
-{
- nv_wr32(event->priv, 0x001104, 0x00010001 << line);
- nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00000000);
-}
-
-static int
-nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nv10_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
{
- struct nv10_gpio_priv *priv;
- int ret;
-
- ret = nouveau_gpio_create(parent, engine, oclass, 16, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.drive = nv10_gpio_drive;
- priv->base.sense = nv10_gpio_sense;
- priv->base.events->priv = priv;
- priv->base.events->enable = nv10_gpio_intr_enable;
- priv->base.events->disable = nv10_gpio_intr_disable;
- nv_subdev(priv)->intr = nv10_gpio_intr;
- return 0;
+ u32 intr = nv_rd32(gpio, 0x001104);
+ u32 stat = nv_rd32(gpio, 0x001144) & intr;
+ *lo = (stat & 0xffff0000) >> 16;
+ *hi = (stat & 0x0000ffff);
+ nv_wr32(gpio, 0x001104, intr);
}
static void
-nv10_gpio_dtor(struct nouveau_object *object)
-{
- struct nv10_gpio_priv *priv = (void *)object;
- nouveau_gpio_destroy(&priv->base);
-}
-
-static int
-nv10_gpio_init(struct nouveau_object *object)
-{
- struct nv10_gpio_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_gpio_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x001144, 0x00000000);
- nv_wr32(priv, 0x001104, 0xffffffff);
- return 0;
-}
-
-static int
-nv10_gpio_fini(struct nouveau_object *object, bool suspend)
+nv10_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
{
- struct nv10_gpio_priv *priv = (void *)object;
- nv_wr32(priv, 0x001144, 0x00000000);
- return nouveau_gpio_fini(&priv->base, suspend);
+ u32 inte = nv_rd32(gpio, 0x001144);
+ if (type & NVKM_GPIO_LO)
+ inte = (inte & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte = (inte & ~mask) | data;
+ nv_wr32(gpio, 0x001144, inte);
}
-struct nouveau_oclass
-nv10_gpio_oclass = {
- .handle = NV_SUBDEV(GPIO, 0x10),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv10_gpio_ctor,
- .dtor = nv10_gpio_dtor,
- .init = nv10_gpio_init,
- .fini = nv10_gpio_fini,
+struct nouveau_oclass *
+nv10_gpio_oclass = &(struct nouveau_gpio_impl) {
+ .base.handle = NV_SUBDEV(GPIO, 0x10),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_gpio_ctor,
+ .dtor = _nouveau_gpio_dtor,
+ .init = _nouveau_gpio_init,
+ .fini = _nouveau_gpio_fini,
},
-};
+ .lines = 16,
+ .intr_stat = nv10_gpio_intr_stat,
+ .intr_mask = nv10_gpio_intr_mask,
+ .drive = nv10_gpio_drive,
+ .sense = nv10_gpio_sense,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index 2ef774731629..1864fa98e6b1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -24,15 +24,10 @@
#include "priv.h"
-struct nv50_gpio_priv {
- struct nouveau_gpio base;
-};
-
-static void
+void
nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
- struct nv50_gpio_priv *priv = (void *)gpio;
u8 ver, len;
u16 entry;
int ent = -1;
@@ -55,7 +50,7 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
gpio->set(gpio, 0, func, line, defs);
- nv_mask(priv, reg, 0x00010001 << lsh, val << lsh);
+ nv_mask(gpio, reg, 0x00010001 << lsh, val << lsh);
}
}
@@ -72,7 +67,7 @@ nv50_gpio_location(int line, u32 *reg, u32 *shift)
return 0;
}
-static int
+int
nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
{
u32 reg, shift;
@@ -84,7 +79,7 @@ nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
return 0;
}
-static int
+int
nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
{
u32 reg, shift;
@@ -95,119 +90,40 @@ nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
return !!(nv_rd32(gpio, reg) & (4 << shift));
}
-void
-nv50_gpio_intr(struct nouveau_subdev *subdev)
-{
- struct nv50_gpio_priv *priv = (void *)subdev;
- u32 intr0, intr1 = 0;
- u32 hi, lo;
- int i;
-
- intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
- if (nv_device(priv)->chipset > 0x92)
- intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
-
- hi = (intr0 & 0x0000ffff) | (intr1 << 16);
- lo = (intr0 >> 16) | (intr1 & 0xffff0000);
-
- for (i = 0; (hi | lo) && i < 32; i++) {
- if ((hi | lo) & (1 << i))
- nouveau_event_trigger(priv->base.events, i);
- }
-
- nv_wr32(priv, 0xe054, intr0);
- if (nv_device(priv)->chipset > 0x92)
- nv_wr32(priv, 0xe074, intr1);
-}
-
-void
-nv50_gpio_intr_enable(struct nouveau_event *event, int line)
-{
- const u32 addr = line < 16 ? 0xe050 : 0xe070;
- const u32 mask = 0x00010001 << (line & 0xf);
- nv_wr32(event->priv, addr + 0x04, mask);
- nv_mask(event->priv, addr + 0x00, mask, mask);
-}
-
-void
-nv50_gpio_intr_disable(struct nouveau_event *event, int line)
-{
- const u32 addr = line < 16 ? 0xe050 : 0xe070;
- const u32 mask = 0x00010001 << (line & 0xf);
- nv_wr32(event->priv, addr + 0x04, mask);
- nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
-}
-
-static int
-nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_gpio_priv *priv;
- int ret;
-
- ret = nouveau_gpio_create(parent, engine, oclass,
- nv_device(parent)->chipset > 0x92 ? 32 : 16,
- &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.reset = nv50_gpio_reset;
- priv->base.drive = nv50_gpio_drive;
- priv->base.sense = nv50_gpio_sense;
- priv->base.events->priv = priv;
- priv->base.events->enable = nv50_gpio_intr_enable;
- priv->base.events->disable = nv50_gpio_intr_disable;
- nv_subdev(priv)->intr = nv50_gpio_intr;
- return 0;
-}
-
-void
-nv50_gpio_dtor(struct nouveau_object *object)
-{
- struct nv50_gpio_priv *priv = (void *)object;
- nouveau_gpio_destroy(&priv->base);
-}
-
-int
-nv50_gpio_init(struct nouveau_object *object)
+static void
+nv50_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
{
- struct nv50_gpio_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_gpio_init(&priv->base);
- if (ret)
- return ret;
-
- /* disable, and ack any pending gpio interrupts */
- nv_wr32(priv, 0xe050, 0x00000000);
- nv_wr32(priv, 0xe054, 0xffffffff);
- if (nv_device(priv)->chipset > 0x92) {
- nv_wr32(priv, 0xe070, 0x00000000);
- nv_wr32(priv, 0xe074, 0xffffffff);
- }
-
- return 0;
+ u32 intr = nv_rd32(gpio, 0x00e054);
+ u32 stat = nv_rd32(gpio, 0x00e050) & intr;
+ *lo = (stat & 0xffff0000) >> 16;
+ *hi = (stat & 0x0000ffff);
+ nv_wr32(gpio, 0x00e054, intr);
}
-int
-nv50_gpio_fini(struct nouveau_object *object, bool suspend)
+static void
+nv50_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
{
- struct nv50_gpio_priv *priv = (void *)object;
- nv_wr32(priv, 0xe050, 0x00000000);
- if (nv_device(priv)->chipset > 0x92)
- nv_wr32(priv, 0xe070, 0x00000000);
- return nouveau_gpio_fini(&priv->base, suspend);
+ u32 inte = nv_rd32(gpio, 0x00e050);
+ if (type & NVKM_GPIO_LO)
+ inte = (inte & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte = (inte & ~mask) | data;
+ nv_wr32(gpio, 0x00e050, inte);
}
-struct nouveau_oclass
-nv50_gpio_oclass = {
- .handle = NV_SUBDEV(GPIO, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_gpio_ctor,
- .dtor = nv50_gpio_dtor,
- .init = nv50_gpio_init,
- .fini = nv50_gpio_fini,
+struct nouveau_oclass *
+nv50_gpio_oclass = &(struct nouveau_gpio_impl) {
+ .base.handle = NV_SUBDEV(GPIO, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_gpio_ctor,
+ .dtor = _nouveau_gpio_dtor,
+ .init = _nouveau_gpio_init,
+ .fini = _nouveau_gpio_fini,
},
-};
+ .lines = 16,
+ .intr_stat = nv50_gpio_intr_stat,
+ .intr_mask = nv50_gpio_intr_mask,
+ .drive = nv50_gpio_drive,
+ .sense = nv50_gpio_sense,
+ .reset = nv50_gpio_reset,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c
new file mode 100644
index 000000000000..252083d376f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+void
+nv92_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
+{
+ u32 intr0 = nv_rd32(gpio, 0x00e054);
+ u32 intr1 = nv_rd32(gpio, 0x00e074);
+ u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
+ u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
+ *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
+ *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
+ nv_wr32(gpio, 0x00e054, intr0);
+ nv_wr32(gpio, 0x00e074, intr1);
+}
+
+void
+nv92_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
+{
+ u32 inte0 = nv_rd32(gpio, 0x00e050);
+ u32 inte1 = nv_rd32(gpio, 0x00e070);
+ if (type & NVKM_GPIO_LO)
+ inte0 = (inte0 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
+ mask >>= 16;
+ data >>= 16;
+ if (type & NVKM_GPIO_LO)
+ inte1 = (inte1 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte1 = (inte1 & ~mask) | data;
+ nv_wr32(gpio, 0x00e050, inte0);
+ nv_wr32(gpio, 0x00e070, inte1);
+}
+
+struct nouveau_oclass *
+nv92_gpio_oclass = &(struct nouveau_gpio_impl) {
+ .base.handle = NV_SUBDEV(GPIO, 0x92),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_gpio_ctor,
+ .dtor = _nouveau_gpio_dtor,
+ .init = _nouveau_gpio_init,
+ .fini = _nouveau_gpio_fini,
+ },
+ .lines = 32,
+ .intr_stat = nv92_gpio_intr_stat,
+ .intr_mask = nv92_gpio_intr_mask,
+ .drive = nv50_gpio_drive,
+ .sense = nv50_gpio_sense,
+ .reset = nv50_gpio_reset,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 010431e3acec..a4682b0956ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -24,15 +24,10 @@
#include "priv.h"
-struct nvd0_gpio_priv {
- struct nouveau_gpio base;
-};
-
void
nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
- struct nvd0_gpio_priv *priv = (void *)gpio;
u8 ver, len;
u16 entry;
int ent = -1;
@@ -51,9 +46,9 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
gpio->set(gpio, 0, func, line, defs);
- nv_mask(priv, 0x00d610 + (line * 4), 0xff, unk0);
+ nv_mask(gpio, 0x00d610 + (line * 4), 0xff, unk0);
if (unk1--)
- nv_mask(priv, 0x00d740 + (unk1 * 4), 0xff, line);
+ nv_mask(gpio, 0x00d740 + (unk1 * 4), 0xff, line);
}
}
@@ -72,36 +67,19 @@ nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
}
-static int
-nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvd0_gpio_priv *priv;
- int ret;
-
- ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.reset = nvd0_gpio_reset;
- priv->base.drive = nvd0_gpio_drive;
- priv->base.sense = nvd0_gpio_sense;
- priv->base.events->priv = priv;
- priv->base.events->enable = nv50_gpio_intr_enable;
- priv->base.events->disable = nv50_gpio_intr_disable;
- nv_subdev(priv)->intr = nv50_gpio_intr;
- return 0;
-}
-
-struct nouveau_oclass
-nvd0_gpio_oclass = {
- .handle = NV_SUBDEV(GPIO, 0xd0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_gpio_ctor,
- .dtor = nv50_gpio_dtor,
- .init = nv50_gpio_init,
- .fini = nv50_gpio_fini,
+struct nouveau_oclass *
+nvd0_gpio_oclass = &(struct nouveau_gpio_impl) {
+ .base.handle = NV_SUBDEV(GPIO, 0xd0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_gpio_ctor,
+ .dtor = _nouveau_gpio_dtor,
+ .init = _nouveau_gpio_init,
+ .fini = _nouveau_gpio_fini,
},
-};
+ .lines = 32,
+ .intr_stat = nv92_gpio_intr_stat,
+ .intr_mask = nv92_gpio_intr_mask,
+ .drive = nvd0_gpio_drive,
+ .sense = nvd0_gpio_sense,
+ .reset = nvd0_gpio_reset,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
index 16b8c5bf5efa..e1145b48c76c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
@@ -24,108 +24,51 @@
#include "priv.h"
-struct nve0_gpio_priv {
- struct nouveau_gpio base;
-};
-
-void
-nve0_gpio_intr(struct nouveau_subdev *subdev)
+static void
+nve0_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
{
- struct nve0_gpio_priv *priv = (void *)subdev;
- u32 intr0 = nv_rd32(priv, 0xdc00) & nv_rd32(priv, 0xdc08);
- u32 intr1 = nv_rd32(priv, 0xdc80) & nv_rd32(priv, 0xdc88);
- u32 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
- u32 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
- int i;
-
- for (i = 0; (hi | lo) && i < 32; i++) {
- if ((hi | lo) & (1 << i))
- nouveau_event_trigger(priv->base.events, i);
- }
-
- nv_wr32(priv, 0xdc00, intr0);
- nv_wr32(priv, 0xdc88, intr1);
+ u32 intr0 = nv_rd32(gpio, 0x00dc00);
+ u32 intr1 = nv_rd32(gpio, 0x00dc80);
+ u32 stat0 = nv_rd32(gpio, 0x00dc08) & intr0;
+ u32 stat1 = nv_rd32(gpio, 0x00dc88) & intr1;
+ *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
+ *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
+ nv_wr32(gpio, 0x00dc00, intr0);
+ nv_wr32(gpio, 0x00dc80, intr1);
}
void
-nve0_gpio_intr_enable(struct nouveau_event *event, int line)
+nve0_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
{
- const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
- const u32 mask = 0x00010001 << (line & 0xf);
- nv_wr32(event->priv, addr + 0x08, mask);
- nv_mask(event->priv, addr + 0x00, mask, mask);
-}
-
-void
-nve0_gpio_intr_disable(struct nouveau_event *event, int line)
-{
- const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
- const u32 mask = 0x00010001 << (line & 0xf);
- nv_wr32(event->priv, addr + 0x08, mask);
- nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
-}
-
-int
-nve0_gpio_fini(struct nouveau_object *object, bool suspend)
-{
- struct nve0_gpio_priv *priv = (void *)object;
- nv_wr32(priv, 0xdc08, 0x00000000);
- nv_wr32(priv, 0xdc88, 0x00000000);
- return nouveau_gpio_fini(&priv->base, suspend);
-}
-
-int
-nve0_gpio_init(struct nouveau_object *object)
-{
- struct nve0_gpio_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_gpio_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0xdc00, 0xffffffff);
- nv_wr32(priv, 0xdc80, 0xffffffff);
- return 0;
-}
-
-void
-nve0_gpio_dtor(struct nouveau_object *object)
-{
- struct nve0_gpio_priv *priv = (void *)object;
- nouveau_gpio_destroy(&priv->base);
-}
-
-static int
-nve0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nve0_gpio_priv *priv;
- int ret;
-
- ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.reset = nvd0_gpio_reset;
- priv->base.drive = nvd0_gpio_drive;
- priv->base.sense = nvd0_gpio_sense;
- priv->base.events->priv = priv;
- priv->base.events->enable = nve0_gpio_intr_enable;
- priv->base.events->disable = nve0_gpio_intr_disable;
- nv_subdev(priv)->intr = nve0_gpio_intr;
- return 0;
+ u32 inte0 = nv_rd32(gpio, 0x00dc08);
+ u32 inte1 = nv_rd32(gpio, 0x00dc88);
+ if (type & NVKM_GPIO_LO)
+ inte0 = (inte0 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
+ mask >>= 16;
+ data >>= 16;
+ if (type & NVKM_GPIO_LO)
+ inte1 = (inte1 & ~(mask << 16)) | (data << 16);
+ if (type & NVKM_GPIO_HI)
+ inte1 = (inte1 & ~mask) | data;
+ nv_wr32(gpio, 0x00dc08, inte0);
+ nv_wr32(gpio, 0x00dc88, inte1);
}
-struct nouveau_oclass
-nve0_gpio_oclass = {
- .handle = NV_SUBDEV(GPIO, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_gpio_ctor,
- .dtor = nv50_gpio_dtor,
- .init = nve0_gpio_init,
- .fini = nve0_gpio_fini,
+struct nouveau_oclass *
+nve0_gpio_oclass = &(struct nouveau_gpio_impl) {
+ .base.handle = NV_SUBDEV(GPIO, 0xe0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_gpio_ctor,
+ .dtor = _nouveau_gpio_dtor,
+ .init = _nouveau_gpio_init,
+ .fini = _nouveau_gpio_fini,
},
-};
+ .lines = 32,
+ .intr_stat = nve0_gpio_intr_stat,
+ .intr_mask = nve0_gpio_intr_mask,
+ .drive = nvd0_gpio_drive,
+ .sense = nvd0_gpio_sense,
+ .reset = nvd0_gpio_reset,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
index 2ee1c895c782..e1724dfc86ae 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
@@ -3,15 +3,65 @@
#include <subdev/gpio.h>
-void nv50_gpio_dtor(struct nouveau_object *);
-int nv50_gpio_init(struct nouveau_object *);
-int nv50_gpio_fini(struct nouveau_object *, bool);
-void nv50_gpio_intr(struct nouveau_subdev *);
-void nv50_gpio_intr_enable(struct nouveau_event *, int line);
-void nv50_gpio_intr_disable(struct nouveau_event *, int line);
+#define nouveau_gpio_create(p,e,o,d) \
+ nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_gpio_destroy(p) ({ \
+ struct nouveau_gpio *gpio = (p); \
+ _nouveau_gpio_dtor(nv_object(gpio)); \
+})
+#define nouveau_gpio_init(p) ({ \
+ struct nouveau_gpio *gpio = (p); \
+ _nouveau_gpio_init(nv_object(gpio)); \
+})
+#define nouveau_gpio_fini(p,s) ({ \
+ struct nouveau_gpio *gpio = (p); \
+ _nouveau_gpio_fini(nv_object(gpio), (s)); \
+})
+
+int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+int _nouveau_gpio_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void _nouveau_gpio_dtor(struct nouveau_object *);
+int _nouveau_gpio_init(struct nouveau_object *);
+int _nouveau_gpio_fini(struct nouveau_object *, bool);
+
+struct nouveau_gpio_impl {
+ struct nouveau_oclass base;
+ int lines;
+
+ /* read and ack pending interrupts, returning only data
+ * for lines that have not been masked off, while still
+ * performing the ack for anything that was pending.
+ */
+ void (*intr_stat)(struct nouveau_gpio *, u32 *, u32 *);
+
+ /* mask on/off interrupts for hi/lo transitions on a
+ * given set of gpio lines
+ */
+ void (*intr_mask)(struct nouveau_gpio *, u32, u32, u32);
+
+ /* configure gpio direction and output value */
+ int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
+
+ /* sense current state of given gpio line */
+ int (*sense)(struct nouveau_gpio *, int line);
+
+ /*XXX*/
+ void (*reset)(struct nouveau_gpio *, u8);
+};
+
+void nv50_gpio_reset(struct nouveau_gpio *, u8);
+int nv50_gpio_drive(struct nouveau_gpio *, int, int, int);
+int nv50_gpio_sense(struct nouveau_gpio *, int);
+
+void nv92_gpio_intr_stat(struct nouveau_gpio *, u32 *, u32 *);
+void nv92_gpio_intr_mask(struct nouveau_gpio *, u32, u32, u32);
void nvd0_gpio_reset(struct nouveau_gpio *, u8);
int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
int nvd0_gpio_sense(struct nouveau_gpio *, int);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
index 4b195ac4da66..2c2731a6cf91 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include <subdev/i2c.h>
+#include "port.h"
struct anx9805_i2c_port {
struct nouveau_i2c_port base;
@@ -37,6 +37,8 @@ anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
u8 tmp, i;
+ DBG("ANX9805 train %d 0x%02x %d\n", link_nr, link_bw, enh);
+
nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
@@ -60,21 +62,29 @@ anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
}
static int
-anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size)
+anx9805_aux(struct nouveau_i2c_port *port, bool retry,
+ u8 type, u32 addr, u8 *data, u8 size)
{
struct anx9805_i2c_port *chan = (void *)port;
struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
int i, ret = -ETIMEDOUT;
+ u8 buf[16] = {};
u8 tmp;
+ DBG("%02x %05x %d\n", type, addr, size);
+
tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
- for (i = 0; !(type & 1) && i < size; i++)
- nv_wri2cr(mast, chan->addr, 0xf0 + i, data[i]);
+ if (!(type & 1)) {
+ memcpy(buf, data, size);
+ DBG("%16ph", buf);
+ for (i = 0; i < size; i++)
+ nv_wri2cr(mast, chan->addr, 0xf0 + i, buf[i]);
+ }
nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0);
nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8);
@@ -93,8 +103,13 @@ anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size)
goto done;
}
- for (i = 0; (type & 1) && i < size; i++)
- data[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
+ if (type & 1) {
+ for (i = 0; i < size; i++)
+ buf[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
+ DBG("%16ph", buf);
+ memcpy(data, buf, size);
+ }
+
ret = 0;
done:
nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index 5de074ad170b..02eb42be2e9e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -22,15 +22,19 @@
* Authors: Ben Skeggs
*/
-#include <subdev/i2c.h>
+#include "priv.h"
int
nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
{
+ struct nouveau_i2c *i2c = nouveau_i2c(port);
if (port->func->aux) {
- if (port->func->acquire)
- port->func->acquire(port);
- return port->func->aux(port, 9, addr, data, size);
+ int ret = i2c->acquire(port, 0);
+ if (ret == 0) {
+ ret = port->func->aux(port, true, 9, addr, data, size);
+ i2c->release(port);
+ }
+ return ret;
}
return -ENODEV;
}
@@ -38,10 +42,14 @@ nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
int
nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
{
+ struct nouveau_i2c *i2c = nouveau_i2c(port);
if (port->func->aux) {
- if (port->func->acquire)
- port->func->acquire(port);
- return port->func->aux(port, 8, addr, data, size);
+ int ret = i2c->acquire(port, 0);
+ if (ret == 0) {
+ ret = port->func->aux(port, true, 8, addr, data, size);
+ i2c->release(port);
+ }
+ return ret;
}
return -ENODEV;
}
@@ -50,13 +58,16 @@ static int
aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct nouveau_i2c_port *port = adap->algo_data;
+ struct nouveau_i2c *i2c = nouveau_i2c(port);
struct i2c_msg *msg = msgs;
int ret, mcnt = num;
if (!port->func->aux)
return -ENODEV;
- if ( port->func->acquire)
- port->func->acquire(port);
+
+ ret = i2c->acquire(port, 0);
+ if (ret)
+ return ret;
while (mcnt--) {
u8 remaining = msg->len;
@@ -74,9 +85,11 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = port->func->aux(port, cmd, msg->addr, ptr, cnt);
- if (ret < 0)
+ ret = port->func->aux(port, true, cmd, msg->addr, ptr, cnt);
+ if (ret < 0) {
+ i2c->release(port);
return ret;
+ }
ptr += cnt;
remaining -= cnt;
@@ -85,6 +98,7 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
msg++;
}
+ i2c->release(port);
return num;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 378e05b88e6f..09ba2cc851cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -23,13 +23,16 @@
*/
#include <core/option.h>
+#include <core/event.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/i2c.h>
-#include <subdev/i2c.h>
#include <subdev/vga.h>
+#include "priv.h"
+#include "pad.h"
+
/******************************************************************************
* interface to linux i2c bit-banging algorithm
*****************************************************************************/
@@ -45,9 +48,15 @@ nouveau_i2c_pre_xfer(struct i2c_adapter *adap)
{
struct i2c_algo_bit_data *bit = adap->algo_data;
struct nouveau_i2c_port *port = bit->data;
- if (port->func->acquire)
- port->func->acquire(port);
- return 0;
+ return nouveau_i2c(port)->acquire(port, bit->timeout);
+}
+
+static void
+nouveau_i2c_post_xfer(struct i2c_adapter *adap)
+{
+ struct i2c_algo_bit_data *bit = adap->algo_data;
+ struct nouveau_i2c_port *port = bit->data;
+ return nouveau_i2c(port)->release(port);
}
static void
@@ -82,6 +91,15 @@ nouveau_i2c_getsda(void *data)
* base i2c "port" class implementation
*****************************************************************************/
+int
+_nouveau_i2c_port_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_i2c_port *port = (void *)object;
+ struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
+ nv_ofuncs(pad)->fini(nv_object(pad), suspend);
+ return nouveau_object_fini(&port->base, suspend);
+}
+
void
_nouveau_i2c_port_dtor(struct nouveau_object *object)
{
@@ -98,7 +116,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
const struct nouveau_i2c_func *func,
int size, void **pobject)
{
- struct nouveau_device *device = nv_device(parent);
+ struct nouveau_device *device = nv_device(engine);
struct nouveau_i2c *i2c = (void *)engine;
struct nouveau_i2c_port *port;
int ret;
@@ -113,8 +131,9 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
port->adapter.owner = THIS_MODULE;
port->adapter.dev.parent = nv_device_base(device);
port->index = index;
+ port->aux = -1;
port->func = func;
- i2c_set_adapdata(&port->adapter, i2c);
+ mutex_init(&port->mutex);
if ( algo == &nouveau_i2c_bit_algo &&
!nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
@@ -128,6 +147,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
bit->timeout = usecs_to_jiffies(2200);
bit->data = port;
bit->pre_xfer = nouveau_i2c_pre_xfer;
+ bit->post_xfer = nouveau_i2c_post_xfer;
bit->setsda = nouveau_i2c_setsda;
bit->setscl = nouveau_i2c_setscl;
bit->getsda = nouveau_i2c_getsda;
@@ -141,7 +161,6 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
ret = i2c_add_adapter(&port->adapter);
}
- /* drop port's i2c subdev refcount, i2c handles this itself */
if (ret == 0)
list_add_tail(&port->head, &i2c->ports);
return ret;
@@ -193,6 +212,75 @@ nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
return NULL;
}
+static void
+nouveau_i2c_release_pad(struct nouveau_i2c_port *port)
+{
+ struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
+ struct nouveau_i2c *i2c = nouveau_i2c(port);
+
+ if (atomic_dec_and_test(&nv_object(pad)->usecount)) {
+ nv_ofuncs(pad)->fini(nv_object(pad), false);
+ wake_up_all(&i2c->wait);
+ }
+}
+
+static int
+nouveau_i2c_try_acquire_pad(struct nouveau_i2c_port *port)
+{
+ struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
+
+ if (atomic_add_return(1, &nv_object(pad)->usecount) != 1) {
+ struct nouveau_object *owner = (void *)pad->port;
+ do {
+ if (owner == (void *)port)
+ return 0;
+ owner = owner->parent;
+ } while(owner);
+ nouveau_i2c_release_pad(port);
+ return -EBUSY;
+ }
+
+ pad->next = port;
+ nv_ofuncs(pad)->init(nv_object(pad));
+ return 0;
+}
+
+static int
+nouveau_i2c_acquire_pad(struct nouveau_i2c_port *port, unsigned long timeout)
+{
+ struct nouveau_i2c *i2c = nouveau_i2c(port);
+
+ if (timeout) {
+ if (wait_event_timeout(i2c->wait,
+ nouveau_i2c_try_acquire_pad(port) == 0,
+ timeout) == 0)
+ return -EBUSY;
+ } else {
+ wait_event(i2c->wait, nouveau_i2c_try_acquire_pad(port) == 0);
+ }
+
+ return 0;
+}
+
+static void
+nouveau_i2c_release(struct nouveau_i2c_port *port)
+__releases(pad->mutex)
+{
+ nouveau_i2c(port)->release_pad(port);
+ mutex_unlock(&port->mutex);
+}
+
+static int
+nouveau_i2c_acquire(struct nouveau_i2c_port *port, unsigned long timeout)
+__acquires(pad->mutex)
+{
+ int ret;
+ mutex_lock(&port->mutex);
+ if ((ret = nouveau_i2c(port)->acquire_pad(port, timeout)))
+ mutex_unlock(&port->mutex);
+ return ret;
+}
+
static int
nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
struct nouveau_i2c_board_info *info,
@@ -237,11 +325,59 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
return -ENODEV;
}
+static void
+nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index)
+{
+ struct nouveau_i2c *i2c = nouveau_i2c(event->priv);
+ struct nouveau_i2c_port *port = i2c->find(i2c, index);
+ const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
+ if (port && port->aux >= 0)
+ impl->aux_mask(i2c, type, 1 << port->aux, 0);
+}
+
+static void
+nouveau_i2c_intr_enable(struct nouveau_event *event, int type, int index)
+{
+ struct nouveau_i2c *i2c = nouveau_i2c(event->priv);
+ struct nouveau_i2c_port *port = i2c->find(i2c, index);
+ const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
+ if (port && port->aux >= 0)
+ impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
+}
+
+static void
+nouveau_i2c_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_i2c_impl *impl = (void *)nv_oclass(subdev);
+ struct nouveau_i2c *i2c = nouveau_i2c(subdev);
+ struct nouveau_i2c_port *port;
+ u32 hi, lo, rq, tx, e;
+
+ if (impl->aux_stat) {
+ impl->aux_stat(i2c, &hi, &lo, &rq, &tx);
+ if (hi || lo || rq || tx) {
+ list_for_each_entry(port, &i2c->ports, head) {
+ if (e = 0, port->aux < 0)
+ continue;
+
+ if (hi & (1 << port->aux)) e |= NVKM_I2C_PLUG;
+ if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
+ if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
+ if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
+
+ nouveau_event_trigger(i2c->ntfy, e, port->index);
+ }
+ }
+ }
+}
+
int
_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
{
+ struct nouveau_i2c_impl *impl = (void *)nv_oclass(object);
struct nouveau_i2c *i2c = (void *)object;
struct nouveau_i2c_port *port;
+ u32 mask;
int ret;
list_for_each_entry(port, &i2c->ports, head) {
@@ -250,6 +386,11 @@ _nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
goto fail;
}
+ if ((mask = (1 << impl->aux) - 1), impl->aux_stat) {
+ impl->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
+ impl->aux_stat(i2c, &mask, &mask, &mask, &mask);
+ }
+
return nouveau_subdev_fini(&i2c->base, suspend);
fail:
list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
@@ -290,6 +431,8 @@ _nouveau_i2c_dtor(struct nouveau_object *object)
struct nouveau_i2c *i2c = (void *)object;
struct nouveau_i2c_port *port, *temp;
+ nouveau_event_destroy(&i2c->ntfy);
+
list_for_each_entry_safe(port, temp, &i2c->ports, head) {
nouveau_object_ref(NULL, (struct nouveau_object **)&port);
}
@@ -306,14 +449,14 @@ int
nouveau_i2c_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
- struct nouveau_oclass *sclass,
int length, void **pobject)
{
+ const struct nouveau_i2c_impl *impl = (void *)oclass;
struct nouveau_bios *bios = nouveau_bios(parent);
struct nouveau_i2c *i2c;
struct nouveau_object *object;
struct dcb_i2c_entry info;
- int ret, i, j, index = -1;
+ int ret, i, j, index = -1, pad;
struct dcb_output outp;
u8 ver, hdr;
u32 data;
@@ -324,24 +467,48 @@ nouveau_i2c_create_(struct nouveau_object *parent,
if (ret)
return ret;
+ nv_subdev(i2c)->intr = nouveau_i2c_intr;
i2c->find = nouveau_i2c_find;
i2c->find_type = nouveau_i2c_find_type;
+ i2c->acquire_pad = nouveau_i2c_acquire_pad;
+ i2c->release_pad = nouveau_i2c_release_pad;
+ i2c->acquire = nouveau_i2c_acquire;
+ i2c->release = nouveau_i2c_release;
i2c->identify = nouveau_i2c_identify;
+ init_waitqueue_head(&i2c->wait);
INIT_LIST_HEAD(&i2c->ports);
while (!dcb_i2c_parse(bios, ++index, &info)) {
if (info.type == DCB_I2C_UNUSED)
continue;
- oclass = sclass;
+ if (info.share != DCB_I2C_UNUSED) {
+ if (info.type == DCB_I2C_NVIO_AUX)
+ pad = info.drive;
+ else
+ pad = info.share;
+ oclass = impl->pad_s;
+ } else {
+ pad = 0x100 + info.drive;
+ oclass = impl->pad_x;
+ }
+
+ ret = nouveau_object_ctor(NULL, *pobject, oclass,
+ NULL, pad, &parent);
+ if (ret < 0)
+ continue;
+
+ oclass = impl->sclass;
do {
ret = -EINVAL;
if (oclass->handle == info.type) {
- ret = nouveau_object_ctor(*pobject, *pobject,
+ ret = nouveau_object_ctor(parent, *pobject,
oclass, &info,
index, &object);
}
} while (ret && (++oclass)->handle);
+
+ nouveau_object_ref(NULL, &parent);
}
/* in addition to the busses specified in the i2c table, there
@@ -380,5 +547,28 @@ nouveau_i2c_create_(struct nouveau_object *parent,
}
}
+ ret = nouveau_event_create(4, index, &i2c->ntfy);
+ if (ret)
+ return ret;
+
+ i2c->ntfy->priv = i2c;
+ i2c->ntfy->enable = nouveau_i2c_intr_enable;
+ i2c->ntfy->disable = nouveau_i2c_intr_disable;
+ return 0;
+}
+
+int
+_nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_i2c *i2c;
+ int ret;
+
+ ret = nouveau_i2c_create(parent, engine, oclass, &i2c);
+ *pobject = nv_object(i2c);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
index a6e72d3b06b5..813ffc96e864 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include "subdev/i2c.h"
+#include "priv.h"
#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
#define T_TIMEOUT 2200000
@@ -187,8 +187,9 @@ i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
struct i2c_msg *msg = msgs;
int ret = 0, mcnt = num;
- if (port->func->acquire)
- port->func->acquire(port);
+ ret = nouveau_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT));
+ if (ret)
+ return ret;
while (!ret && mcnt--) {
u8 remaining = msg->len;
@@ -210,6 +211,7 @@ i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
}
i2c_stop(port);
+ nouveau_i2c(port)->release(port);
return (ret < 0) ? ret : num;
}
#else
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c
new file mode 100644
index 000000000000..fa891c39866b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+gf117_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0xd7),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+ .sclass = nvd0_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+ .pad_s = &nv04_i2c_pad_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
index 860d5d2365da..b1725bdea967 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -22,9 +22,10 @@
* Authors: Ben Skeggs
*/
-#include <subdev/i2c.h>
#include <subdev/vga.h>
+#include "priv.h"
+
struct nv04_i2c_priv {
struct nouveau_i2c base;
};
@@ -115,29 +116,15 @@ nv04_i2c_sclass[] = {
{}
};
-static int
-nv04_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv04_i2c_priv *priv;
- int ret;
-
- ret = nouveau_i2c_create(parent, engine, oclass, nv04_i2c_sclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_oclass
-nv04_i2c_oclass = {
- .handle = NV_SUBDEV(I2C, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv04_i2c_ctor,
+struct nouveau_oclass *
+nv04_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
.dtor = _nouveau_i2c_dtor,
.init = _nouveau_i2c_init,
.fini = _nouveau_i2c_fini,
},
-};
+ .sclass = nv04_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
index 0c2655a03bb4..f16c87ce5ba1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -22,9 +22,10 @@
* Authors: Ben Skeggs
*/
-#include <subdev/i2c.h>
#include <subdev/vga.h>
+#include "priv.h"
+
struct nv4e_i2c_priv {
struct nouveau_i2c base;
};
@@ -107,29 +108,15 @@ nv4e_i2c_sclass[] = {
{}
};
-static int
-nv4e_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv4e_i2c_priv *priv;
- int ret;
-
- ret = nouveau_i2c_create(parent, engine, oclass, nv4e_i2c_sclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_oclass
-nv4e_i2c_oclass = {
- .handle = NV_SUBDEV(I2C, 0x4e),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv4e_i2c_ctor,
+struct nouveau_oclass *
+nv4e_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0x4e),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
.dtor = _nouveau_i2c_dtor,
.init = _nouveau_i2c_init,
.fini = _nouveau_i2c_fini,
},
-};
+ .sclass = nv4e_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
index a8d67a287704..7b8756d4df08 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -121,29 +121,15 @@ nv50_i2c_sclass[] = {
{}
};
-static int
-nv50_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_i2c_priv *priv;
- int ret;
-
- ret = nouveau_i2c_create(parent, engine, oclass, nv50_i2c_sclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_oclass
-nv50_i2c_oclass = {
- .handle = NV_SUBDEV(I2C, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_i2c_ctor,
+struct nouveau_oclass *
+nv50_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
.dtor = _nouveau_i2c_dtor,
.init = _nouveau_i2c_init,
.fini = _nouveau_i2c_fini,
},
-};
+ .sclass = nv50_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
index 4e5ba48ebf5a..5d2a77421c74 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -1,7 +1,7 @@
#ifndef __NV50_I2C_H__
#define __NV50_I2C_H__
-#include <subdev/i2c.h>
+#include "priv.h"
struct nv50_i2c_priv {
struct nouveau_i2c base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
index df6d3e4b68be..f59c3a255462 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -24,6 +24,36 @@
#include "nv50.h"
+void
+nv94_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
+{
+ u32 intr = nv_rd32(i2c, 0x00e06c);
+ u32 stat = nv_rd32(i2c, 0x00e068) & intr, i;
+ for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
+ if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
+ if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
+ if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
+ if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
+ }
+ nv_wr32(i2c, 0x00e06c, intr);
+}
+
+void
+nv94_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
+{
+ u32 temp = nv_rd32(i2c, 0x00e068), i;
+ for (i = 0; i < 8; i++) {
+ if (mask & (1 << i)) {
+ if (!(data & (1 << i))) {
+ temp &= ~(type << (i * 4));
+ continue;
+ }
+ temp |= type << (i * 4);
+ }
+ }
+ nv_wr32(i2c, 0x00e068, temp);
+}
+
#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
@@ -69,7 +99,8 @@ auxch_init(struct nouveau_i2c *aux, int ch)
}
int
-nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
+nv94_aux(struct nouveau_i2c_port *base, bool retry,
+ u8 type, u32 addr, u8 *data, u8 size)
{
struct nouveau_i2c *aux = nouveau_i2c(base);
struct nv50_i2c_port *port = (void *)base;
@@ -105,9 +136,8 @@ nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
ctrl |= size - 1;
nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
- /* retry transaction a number of times on failure... */
- ret = -EREMOTEIO;
- for (retries = 0; retries < 32; retries++) {
+ /* (maybe) retry transaction a number of times on failure... */
+ for (retries = 0; !ret && retries < 32; retries++) {
/* reset, and delay a while if this is a retry */
nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
@@ -123,16 +153,21 @@ nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
udelay(1);
if (!timeout--) {
AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+ ret = -EIO;
goto out;
}
} while (ctrl & 0x00010000);
+ ret = 1;
/* read status, and check if transaction completed ok */
stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
- if (!(stat & 0x000f0f00)) {
- ret = 0;
- break;
- }
+ if ((stat & 0x000f0000) == 0x00080000 ||
+ (stat & 0x000f0000) == 0x00020000)
+ ret = retry ? 0 : 1;
+ if ((stat & 0x00000100))
+ ret = -ETIMEDOUT;
+ if ((stat & 0x00000e00))
+ ret = -EIO;
AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
}
@@ -147,29 +182,11 @@ nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
out:
auxch_fini(aux, ch);
- return ret;
-}
-
-void
-nv94_i2c_acquire(struct nouveau_i2c_port *base)
-{
- struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
- struct nv50_i2c_port *port = (void *)base;
- if (port->ctrl) {
- nv_mask(priv, port->ctrl + 0x0c, 0x00000001, 0x00000000);
- nv_mask(priv, port->ctrl + 0x00, 0x0000f003, port->data);
- }
-}
-
-void
-nv94_i2c_release(struct nouveau_i2c_port *base)
-{
+ return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
static const struct nouveau_i2c_func
nv94_i2c_func = {
- .acquire = nv94_i2c_acquire,
- .release = nv94_i2c_release,
.drive_scl = nv50_i2c_drive_scl,
.drive_sda = nv50_i2c_drive_sda,
.sense_scl = nv50_i2c_sense_scl,
@@ -206,8 +223,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
static const struct nouveau_i2c_func
nv94_aux_func = {
- .acquire = nv94_i2c_acquire,
- .release = nv94_i2c_release,
.aux = nv94_aux,
};
@@ -227,6 +242,7 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ port->base.aux = info->drive;
port->addr = info->drive;
if (info->share != DCB_I2C_UNUSED) {
port->ctrl = 0x00e500 + (info->drive * 0x50);
@@ -257,29 +273,19 @@ nv94_i2c_sclass[] = {
{}
};
-static int
-nv94_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_i2c_priv *priv;
- int ret;
-
- ret = nouveau_i2c_create(parent, engine, oclass, nv94_i2c_sclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_oclass
-nv94_i2c_oclass = {
- .handle = NV_SUBDEV(I2C, 0x94),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv94_i2c_ctor,
+struct nouveau_oclass *
+nv94_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0x94),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
.dtor = _nouveau_i2c_dtor,
.init = _nouveau_i2c_init,
.fini = _nouveau_i2c_fini,
},
-};
+ .sclass = nv94_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+ .pad_s = &nv94_i2c_pad_oclass,
+ .aux = 4,
+ .aux_stat = nv94_aux_stat,
+ .aux_mask = nv94_aux_mask,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
index 29967d30f97c..364ddb1c5f03 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -42,8 +42,6 @@ nvd0_i2c_sense_sda(struct nouveau_i2c_port *base)
static const struct nouveau_i2c_func
nvd0_i2c_func = {
- .acquire = nv94_i2c_acquire,
- .release = nv94_i2c_release,
.drive_scl = nv50_i2c_drive_scl,
.drive_sda = nv50_i2c_drive_sda,
.sense_scl = nvd0_i2c_sense_scl,
@@ -75,7 +73,7 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static struct nouveau_oclass
+struct nouveau_oclass
nvd0_i2c_sclass[] = {
{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
.ofuncs = &(struct nouveau_ofuncs) {
@@ -96,29 +94,19 @@ nvd0_i2c_sclass[] = {
{}
};
-static int
-nvd0_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_i2c_priv *priv;
- int ret;
-
- ret = nouveau_i2c_create(parent, engine, oclass, nvd0_i2c_sclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_oclass
-nvd0_i2c_oclass = {
- .handle = NV_SUBDEV(I2C, 0xd0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_i2c_ctor,
+struct nouveau_oclass *
+nvd0_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0xd0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
.dtor = _nouveau_i2c_dtor,
.init = _nouveau_i2c_init,
.fini = _nouveau_i2c_fini,
},
-};
+ .sclass = nvd0_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+ .pad_s = &nv94_i2c_pad_oclass,
+ .aux = 4,
+ .aux_stat = nv94_aux_stat,
+ .aux_mask = nv94_aux_mask,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
new file mode 100644
index 000000000000..cae77e1ad8dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static void
+nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
+{
+ u32 intr = nv_rd32(i2c, 0x00dc60);
+ u32 stat = nv_rd32(i2c, 0x00dc68) & intr, i;
+ for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
+ if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
+ if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
+ if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
+ if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
+ }
+ nv_wr32(i2c, 0x00dc60, intr);
+}
+
+static void
+nve0_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
+{
+ u32 temp = nv_rd32(i2c, 0x00dc68), i;
+ for (i = 0; i < 8; i++) {
+ if (mask & (1 << i)) {
+ if (!(data & (1 << i))) {
+ temp &= ~(type << (i * 4));
+ continue;
+ }
+ temp |= type << (i * 4);
+ }
+ }
+ nv_wr32(i2c, 0x00dc68, temp);
+}
+
+struct nouveau_oclass *
+nve0_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0xe0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+ .sclass = nvd0_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+ .pad_s = &nv94_i2c_pad_oclass,
+ .aux = 4,
+ .aux_stat = nve0_aux_stat,
+ .aux_mask = nve0_aux_mask,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c
new file mode 100644
index 000000000000..e9e412477c12
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "pad.h"
+
+int
+_nvkm_i2c_pad_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvkm_i2c_pad *pad = (void *)object;
+ DBG("-> NULL\n");
+ pad->port = NULL;
+ return nouveau_object_fini(&pad->base, suspend);
+}
+
+int
+_nvkm_i2c_pad_init(struct nouveau_object *object)
+{
+ struct nvkm_i2c_pad *pad = (void *)object;
+ DBG("-> PORT:%02x\n", pad->next->index);
+ pad->port = pad->next;
+ return nouveau_object_init(&pad->base);
+}
+
+int
+nvkm_i2c_pad_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int index,
+ int size, void **pobject)
+{
+ struct nouveau_i2c *i2c = (void *)engine;
+ struct nouveau_i2c_port *port;
+ struct nvkm_i2c_pad *pad;
+ int ret;
+
+ list_for_each_entry(port, &i2c->ports, head) {
+ pad = nvkm_i2c_pad(port);
+ if (pad->index == index) {
+ atomic_inc(&nv_object(pad)->refcount);
+ *pobject = pad;
+ return 1;
+ }
+ }
+
+ ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject);
+ pad = *pobject;
+ if (ret)
+ return ret;
+
+ pad->index = index;
+ return 0;
+}
+
+int
+_nvkm_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nvkm_i2c_pad *pad;
+ int ret;
+ ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
+ *pobject = nv_object(pad);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h
new file mode 100644
index 000000000000..452ac10c3004
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/pad.h
@@ -0,0 +1,58 @@
+#ifndef __NVKM_I2C_PAD_H__
+#define __NVKM_I2C_PAD_H__
+
+#include "priv.h"
+
+struct nvkm_i2c_pad {
+ struct nouveau_object base;
+ int index;
+ struct nouveau_i2c_port *port;
+ struct nouveau_i2c_port *next;
+};
+
+static inline struct nvkm_i2c_pad *
+nvkm_i2c_pad(struct nouveau_i2c_port *port)
+{
+ struct nouveau_object *pad = nv_object(port);
+ while (pad->parent)
+ pad = pad->parent;
+ return (void *)pad;
+}
+
+#define nvkm_i2c_pad_create(p,e,o,i,d) \
+ nvkm_i2c_pad_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
+#define nvkm_i2c_pad_destroy(p) ({ \
+ struct nvkm_i2c_pad *_p = (p); \
+ _nvkm_i2c_pad_dtor(nv_object(_p)); \
+})
+#define nvkm_i2c_pad_init(p) ({ \
+ struct nvkm_i2c_pad *_p = (p); \
+ _nvkm_i2c_pad_init(nv_object(_p)); \
+})
+#define nvkm_i2c_pad_fini(p,s) ({ \
+ struct nvkm_i2c_pad *_p = (p); \
+ _nvkm_i2c_pad_fini(nv_object(_p), (s)); \
+})
+
+int nvkm_i2c_pad_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int index, int, void **);
+
+int _nvkm_i2c_pad_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+#define _nvkm_i2c_pad_dtor nouveau_object_destroy
+int _nvkm_i2c_pad_init(struct nouveau_object *);
+int _nvkm_i2c_pad_fini(struct nouveau_object *, bool);
+
+#ifndef MSG
+#define MSG(l,f,a...) do { \
+ struct nvkm_i2c_pad *_pad = (void *)pad; \
+ nv_##l(nv_object(_pad)->engine, "PAD:%c:%02x: "f, \
+ _pad->index >= 0x100 ? 'X' : 'S', \
+ _pad->index >= 0x100 ? _pad->index - 0x100 : _pad->index, ##a); \
+} while(0)
+#define DBG(f,a...) MSG(debug, f, ##a)
+#define ERR(f,a...) MSG(error, f, ##a)
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c
new file mode 100644
index 000000000000..2c4b61296dd1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv04.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "pad.h"
+
+struct nouveau_oclass
+nv04_i2c_pad_oclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_i2c_pad_ctor,
+ .dtor = _nvkm_i2c_pad_dtor,
+ .init = _nvkm_i2c_pad_init,
+ .fini = _nvkm_i2c_pad_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c
new file mode 100644
index 000000000000..0dc6753014f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/padnv94.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "pad.h"
+
+struct nv94_i2c_pad {
+ struct nvkm_i2c_pad base;
+ int addr;
+};
+
+static int
+nv94_i2c_pad_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_i2c *i2c = (void *)object->engine;
+ struct nv94_i2c_pad *pad = (void *)object;
+ nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000001);
+ return nvkm_i2c_pad_fini(&pad->base, suspend);
+}
+
+static int
+nv94_i2c_pad_init(struct nouveau_object *object)
+{
+ struct nouveau_i2c *i2c = (void *)object->engine;
+ struct nv94_i2c_pad *pad = (void *)object;
+
+ switch (nv_oclass(pad->base.next)->handle) {
+ case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
+ nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x00000002);
+ break;
+ case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
+ default:
+ nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x0000c001);
+ break;
+ }
+
+ nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000000);
+ return nvkm_i2c_pad_init(&pad->base);
+}
+
+static int
+nv94_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct nv94_i2c_pad *pad;
+ int ret;
+
+ ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
+ *pobject = nv_object(pad);
+ if (ret)
+ return ret;
+
+ pad->addr = index * 0x50;;
+ return 0;
+}
+
+struct nouveau_oclass
+nv94_i2c_pad_oclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_i2c_pad_ctor,
+ .dtor = _nvkm_i2c_pad_dtor,
+ .init = nv94_i2c_pad_init,
+ .fini = nv94_i2c_pad_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/port.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/port.h
new file mode 100644
index 000000000000..a8ff6e077af5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/port.h
@@ -0,0 +1,15 @@
+#ifndef __NVKM_I2C_PORT_H__
+#define __NVKM_I2C_PORT_H__
+
+#include "priv.h"
+
+#ifndef MSG
+#define MSG(l,f,a...) do { \
+ struct nouveau_i2c_port *_port = (void *)port; \
+ nv_##l(nv_object(_port)->engine, "PORT:%02x: "f, _port->index, ##a); \
+} while(0)
+#define DBG(f,a...) MSG(debug, f, ##a)
+#define ERR(f,a...) MSG(error, f, ##a)
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
new file mode 100644
index 000000000000..780090b6425a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
@@ -0,0 +1,85 @@
+#ifndef __NVKM_I2C_H__
+#define __NVKM_I2C_H__
+
+#include <subdev/i2c.h>
+
+extern struct nouveau_oclass nv04_i2c_pad_oclass;
+extern struct nouveau_oclass nv94_i2c_pad_oclass;
+
+#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
+ nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
+ sizeof(**d), (void **)d)
+#define nouveau_i2c_port_destroy(p) ({ \
+ struct nouveau_i2c_port *port = (p); \
+ _nouveau_i2c_port_dtor(nv_object(i2c)); \
+})
+#define nouveau_i2c_port_init(p) \
+ nouveau_object_init(&(p)->base)
+#define nouveau_i2c_port_fini(p,s) \
+ nouveau_object_fini(&(p)->base, (s))
+
+int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u8,
+ const struct i2c_algorithm *,
+ const struct nouveau_i2c_func *,
+ int, void **);
+void _nouveau_i2c_port_dtor(struct nouveau_object *);
+#define _nouveau_i2c_port_init nouveau_object_init
+int _nouveau_i2c_port_fini(struct nouveau_object *, bool);
+
+#define nouveau_i2c_create(p,e,o,d) \
+ nouveau_i2c_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_i2c_destroy(p) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_dtor(nv_object(i2c)); \
+})
+#define nouveau_i2c_init(p) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_init(nv_object(i2c)); \
+})
+#define nouveau_i2c_fini(p,s) ({ \
+ struct nouveau_i2c *i2c = (p); \
+ _nouveau_i2c_fini(nv_object(i2c), (s)); \
+})
+
+int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+int _nouveau_i2c_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void _nouveau_i2c_dtor(struct nouveau_object *);
+int _nouveau_i2c_init(struct nouveau_object *);
+int _nouveau_i2c_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass nouveau_anx9805_sclass[];
+extern struct nouveau_oclass nvd0_i2c_sclass[];
+
+extern const struct i2c_algorithm nouveau_i2c_bit_algo;
+extern const struct i2c_algorithm nouveau_i2c_aux_algo;
+
+struct nouveau_i2c_impl {
+ struct nouveau_oclass base;
+
+ /* supported i2c port classes */
+ struct nouveau_oclass *sclass;
+ struct nouveau_oclass *pad_x;
+ struct nouveau_oclass *pad_s;
+
+ /* number of native dp aux channels present */
+ int aux;
+
+ /* read and ack pending interrupts, returning only data
+ * for ports that have not been masked off, while still
+ * performing the ack for anything that was pending.
+ */
+ void (*aux_stat)(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
+
+ /* mask on/off interrupt types for a given set of auxch
+ */
+ void (*aux_mask)(struct nouveau_i2c *, u32, u32, u32);
+};
+
+void nv94_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
+void nv94_aux_mask(struct nouveau_i2c *, u32, u32, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c
new file mode 100644
index 000000000000..245f0ebaa6af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/gk20a.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <subdev/ibus.h>
+#include <subdev/timer.h>
+
+struct gk20a_ibus_priv {
+ struct nouveau_ibus base;
+};
+
+static void
+gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
+{
+ nv_mask(priv, 0x137250, 0x3f, 0);
+
+ nv_mask(priv, 0x000200, 0x20, 0);
+ usleep_range(20, 30);
+ nv_mask(priv, 0x000200, 0x20, 0x20);
+
+ nv_wr32(priv, 0x12004c, 0x4);
+ nv_wr32(priv, 0x122204, 0x2);
+ nv_rd32(priv, 0x122204);
+}
+
+static void
+gk20a_ibus_intr(struct nouveau_subdev *subdev)
+{
+ struct gk20a_ibus_priv *priv = (void *)subdev;
+ u32 status0 = nv_rd32(priv, 0x120058);
+
+ if (status0 & 0x7) {
+ nv_debug(priv, "resetting priv ring\n");
+ gk20a_ibus_init_priv_ring(priv);
+ }
+
+ /* Acknowledge interrupt */
+ nv_mask(priv, 0x12004c, 0x2, 0x2);
+
+ if (!nv_wait(subdev, 0x12004c, 0x3f, 0x00))
+ nv_warn(priv, "timeout waiting for ringmaster ack\n");
+}
+
+static int
+gk20a_ibus_init(struct nouveau_object *object)
+{
+ struct gk20a_ibus_priv *priv = (void *)object;
+ int ret;
+
+ ret = _nouveau_ibus_init(object);
+ if (ret)
+ return ret;
+
+ gk20a_ibus_init_priv_ring(priv);
+
+ return 0;
+}
+
+static int
+gk20a_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct gk20a_ibus_priv *priv;
+ int ret;
+
+ ret = nouveau_ibus_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->intr = gk20a_ibus_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+gk20a_ibus_oclass = {
+ .handle = NV_SUBDEV(IBUS, 0xea),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gk20a_ibus_ctor,
+ .dtor = _nouveau_ibus_dtor,
+ .init = gk20a_ibus_init,
+ .fini = _nouveau_ibus_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
index 7120124dceac..ebef970a0645 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
@@ -95,6 +95,23 @@ nve0_ibus_intr(struct nouveau_subdev *subdev)
}
static int
+nve0_ibus_init(struct nouveau_object *object)
+{
+ struct nve0_ibus_priv *priv = (void *)object;
+ int ret = nouveau_ibus_init(&priv->base);
+ if (ret == 0) {
+ nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
+ nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
+ nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800);
+ nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100);
+ nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff);
+ nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200);
+ nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
+ }
+ return ret;
+}
+
+static int
nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -117,7 +134,7 @@ nve0_ibus_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_ibus_ctor,
.dtor = _nouveau_ibus_dtor,
- .init = _nouveau_ibus_init,
+ .init = nve0_ibus_init,
.fini = _nouveau_ibus_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index e8822a934c48..9ca93e2718f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -26,6 +26,7 @@
const struct nouveau_mc_intr
nv50_mc_intr[] = {
+ { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */
{ 0x00000001, NVDEV_ENGINE_MPEG },
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
@@ -33,8 +34,8 @@ nv50_mc_intr[] = {
{ 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */
{ 0x00020000, NVDEV_ENGINE_VP }, /* NV84- */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
- { 0x00200000, NVDEV_SUBDEV_GPIO },
- { 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
+ { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0002d101, NVDEV_SUBDEV_FB },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index f8a6f18e2d34..3c76d9038f38 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -26,6 +26,7 @@
static const struct nouveau_mc_intr
nv98_mc_intr[] = {
+ { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */
{ 0x00000001, NVDEV_ENGINE_PPP },
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
@@ -35,9 +36,9 @@ nv98_mc_intr[] = {
{ 0x00040000, NVDEV_SUBDEV_PWR }, /* NVA3:NVC0 */
{ 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
- { 0x00200000, NVDEV_SUBDEV_GPIO },
+ { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
+ { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
{ 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
- { 0x04000000, NVDEV_ENGINE_DISP },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0042d101, NVDEV_SUBDEV_FB },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 34472d317097..f9c6a678b47d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -26,6 +26,7 @@
const struct nouveau_mc_intr
nvc0_mc_intr[] = {
+ { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */
{ 0x00000001, NVDEV_ENGINE_PPP },
{ 0x00000020, NVDEV_ENGINE_COPY0 },
{ 0x00000040, NVDEV_ENGINE_COPY1 },
@@ -37,10 +38,10 @@ nvc0_mc_intr[] = {
{ 0x00040000, NVDEV_SUBDEV_THERM },
{ 0x00020000, NVDEV_ENGINE_VP },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
- { 0x00200000, NVDEV_SUBDEV_GPIO },
+ { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
+ { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
{ 0x01000000, NVDEV_SUBDEV_PWR },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
- { 0x04000000, NVDEV_ENGINE_DISP },
{ 0x08000000, NVDEV_SUBDEV_FB },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x40000000, NVDEV_SUBDEV_IBUS },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
index 64f8b4702bf7..fcaabe8456e3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -150,7 +150,7 @@ mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
* common example is DP->eDP.
*/
conn = bios->data;
- conn += dcb_conn(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len);
+ conn += nvbios_connEe(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len);
type = conn[0];
switch (ctx.desc.conn_type) {
case 0x01: /* LVDS */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
index 2284ecb1c9b8..c2bb616a8da5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
@@ -83,7 +83,7 @@ host_send:
// increment GET
add b32 $r1 0x1
and $r14 $r1 #fifo_qmaskf
- nv_iowr(NV_PPWR_FIFO_GET(0), $r1)
+ nv_iowr(NV_PPWR_FIFO_GET(0), $r14)
bra #host_send
host_send_done:
ret
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 4bd43a99fdcc..39a5dc150a05 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -1018,7 +1018,7 @@ uint32_t nv108_pwr_code[] = {
0xb600023f,
0x1ec40110,
0x04b0400f,
- 0xbd0001f6,
+ 0xbd000ef6,
0xc70ef404,
/* 0x0328: host_send_done */
/* 0x032a: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 5a73fa620978..254205cd5166 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -1124,7 +1124,7 @@ uint32_t nva3_pwr_code[] = {
0x0f1ec401,
0x04b007f1,
0xd00604b6,
- 0x04bd0001,
+ 0x04bd000e,
/* 0x03cb: host_send_done */
0xf8ba0ef4,
/* 0x03cd: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 4dba00d2dd1a..7ac87405d01b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -1124,7 +1124,7 @@ uint32_t nvc0_pwr_code[] = {
0x0f1ec401,
0x04b007f1,
0xd00604b6,
- 0x04bd0001,
+ 0x04bd000e,
/* 0x03cb: host_send_done */
0xf8ba0ef4,
/* 0x03cd: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index 5e24c6bc041d..cd9ff1a73284 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -1033,7 +1033,7 @@ uint32_t nvd0_pwr_code[] = {
0xb6026b21,
0x1ec40110,
0xb007f10f,
- 0x0001d004,
+ 0x000ed004,
0x0ef404bd,
/* 0x0365: host_send_done */
/* 0x0367: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index 7610fc5f8fa2..ca9ad9fd47be 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -60,9 +60,9 @@ static struct nouveau_i2c_board_info
nv_board_infos[] = {
{ { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
{ { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 },
- { { I2C_BOARD_INFO("adt7473", 0x2e) }, 20 },
- { { I2C_BOARD_INFO("adt7473", 0x2d) }, 20 },
- { { I2C_BOARD_INFO("adt7473", 0x2c) }, 20 },
+ { { I2C_BOARD_INFO("adt7473", 0x2e) }, 40 },
+ { { I2C_BOARD_INFO("adt7473", 0x2d) }, 40 },
+ { { I2C_BOARD_INFO("adt7473", 0x2c) }, 40 },
{ { I2C_BOARD_INFO("f75375", 0x2e) }, 0 },
{ { I2C_BOARD_INFO("lm99", 0x4c) }, 0 },
{ { I2C_BOARD_INFO("lm90", 0x4c) }, 0 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index 3b2c4580098b..0478b2e3fb1d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -36,7 +36,7 @@ nva3_therm_fan_sense(struct nouveau_therm *therm)
u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
u32 ctrl = nv_rd32(therm, 0x00e720);
if (ctrl & 0x00000001)
- return tach * 60;
+ return tach * 60 / 2;
return -ENODEV;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 434b920f6bd4..a96dda48718e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -414,7 +414,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nouveau_encoder_connector_get(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index a2d669b4acf2..e57babb206d3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -477,7 +477,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nouveau_encoder_connector_get(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 2f1ed61f7c8c..4342fdaee707 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -115,7 +115,7 @@ nv04_display_create(struct drm_device *dev)
&dev->mode_config.connector_list, head) {
if (!connector->encoder_ids[0]) {
NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
+ connector->name);
connector->funcs->destroy(connector);
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 244822df8ffc..8667620b703a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -171,7 +171,8 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ nouveau_encoder_connector_get(nv_encoder)->base.name,
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_tv_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index acef48f4a4ea..195bd8e86c6a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -612,8 +612,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(
- &nouveau_encoder_connector_get(nv_encoder)->base),
+ nouveau_encoder_connector_get(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d07ce028af51..1fa222e8f007 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -44,6 +44,7 @@
#include <subdev/i2c.h>
#include <subdev/gpio.h>
+#include <engine/disp.h>
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
static int nouveau_tv_disable = 0;
@@ -75,7 +76,8 @@ find_encoder(struct drm_connector *connector, int type)
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
- if (type == DCB_OUTPUT_ANY || nv_encoder->dcb->type == type)
+ if (type == DCB_OUTPUT_ANY ||
+ (nv_encoder->dcb && nv_encoder->dcb->type == type))
return nv_encoder;
}
@@ -100,22 +102,24 @@ static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
- nouveau_event_ref(NULL, &nv_connector->hpd_func);
+ nouveau_event_ref(NULL, &nv_connector->hpd);
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
+ if (nv_connector->aux.transfer)
+ drm_dp_aux_unregister(&nv_connector->aux);
kfree(connector);
}
-static struct nouveau_i2c_port *
-nouveau_connector_ddc_detect(struct drm_connector *connector,
- struct nouveau_encoder **pnv_encoder)
+static struct nouveau_encoder *
+nouveau_connector_ddc_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
- struct nouveau_i2c_port *port = NULL;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_mode_object *obj;
int i, panel = -ENODEV;
/* eDP panels need powering on by us (if the VBIOS doesn't default it
@@ -130,13 +134,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
}
}
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct nouveau_encoder *nv_encoder;
- struct drm_mode_object *obj;
- int id;
-
- id = connector->encoder_ids[i];
- if (!id)
+ for (i = 0; nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ int id = connector->encoder_ids[i];
+ if (id == 0)
break;
obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
@@ -144,22 +144,24 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
- port = nv_encoder->i2c;
- if (port && nv_probe_i2c(port, 0x50)) {
- *pnv_encoder = nv_encoder;
- break;
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+ int ret = nouveau_dp_detect(nv_encoder);
+ if (ret == 0)
+ break;
+ } else
+ if (nv_encoder->i2c) {
+ if (nv_probe_i2c(nv_encoder->i2c, 0x50))
+ break;
}
-
- port = NULL;
}
/* eDP panel not detected, restore panel power GPIO to previous
* state to avoid confusing the SOR for other output types.
*/
- if (!port && panel == 0)
+ if (!nv_encoder && panel == 0)
gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
- return port;
+ return nv_encoder;
}
static struct nouveau_encoder *
@@ -258,25 +260,17 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
if (ret < 0 && ret != -EACCES)
return conn_status;
- i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
- if (i2c) {
+ nv_encoder = nouveau_connector_ddc_detect(connector);
+ if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
drm_mode_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
- drm_get_connector_name(connector));
+ connector->name);
goto detect_analog;
}
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
- !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
- NV_ERROR(drm, "Detected %s, but failed init\n",
- drm_get_connector_name(connector));
- conn_status = connector_status_disconnected;
- goto out;
- }
-
/* Override encoder type for DVI-I based on whether EDID
* says the display is digital or analog, both use the
* same i2c channel so the value returned from ddc_detect
@@ -437,7 +431,7 @@ nouveau_connector_force(struct drm_connector *connector)
nv_encoder = find_encoder(connector, type);
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
- drm_get_connector_name(connector));
+ connector->name);
connector->status = connector_status_disconnected;
return;
}
@@ -912,33 +906,103 @@ nouveau_connector_funcs_lvds = {
};
static void
+nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
+{
+ struct nouveau_encoder *nv_encoder = NULL;
+
+ if (connector->encoder)
+ nv_encoder = nouveau_encoder(connector->encoder);
+ if (nv_encoder && nv_encoder->dcb &&
+ nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+ if (mode == DRM_MODE_DPMS_ON) {
+ u8 data = DP_SET_POWER_D0;
+ nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+ usleep_range(1000, 2000);
+ } else {
+ u8 data = DP_SET_POWER_D3;
+ nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+ }
+ }
+
+ drm_helper_connector_dpms(connector, mode);
+}
+
+static const struct drm_connector_funcs
+nouveau_connector_funcs_dp = {
+ .dpms = nouveau_connector_dp_dpms,
+ .save = NULL,
+ .restore = NULL,
+ .detect = nouveau_connector_detect,
+ .destroy = nouveau_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = nouveau_connector_set_property,
+ .force = nouveau_connector_force
+};
+
+static void
nouveau_connector_hotplug_work(struct work_struct *work)
{
struct nouveau_connector *nv_connector =
- container_of(work, struct nouveau_connector, hpd_work);
+ container_of(work, typeof(*nv_connector), work);
struct drm_connector *connector = &nv_connector->base;
- struct drm_device *dev = connector->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
- bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff);
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ const char *name = connector->name;
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector));
+ if (nv_connector->status & NVKM_HPD_IRQ) {
+ } else {
+ bool plugged = (nv_connector->status != NVKM_HPD_UNPLUG);
- if (plugged)
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
- drm_helper_hpd_irq_event(dev);
+ if (plugged)
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ else
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_hpd_irq_event(connector->dev);
+ }
+
+ nouveau_event_get(nv_connector->hpd);
}
static int
-nouveau_connector_hotplug(void *data, int index)
+nouveau_connector_hotplug(void *data, u32 type, int index)
{
struct nouveau_connector *nv_connector = data;
- schedule_work(&nv_connector->hpd_work);
- return NVKM_EVENT_KEEP;
+ nv_connector->status = type;
+ schedule_work(&nv_connector->work);
+ return NVKM_EVENT_DROP;
+}
+
+static ssize_t
+nouveau_connector_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct nouveau_connector *nv_connector =
+ container_of(aux, typeof(*nv_connector), aux);
+ struct nouveau_encoder *nv_encoder;
+ struct nouveau_i2c_port *port;
+ int ret;
+
+ nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
+ if (!nv_encoder || !(port = nv_encoder->i2c))
+ return -ENODEV;
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+ if (msg->size == 0)
+ return msg->size;
+
+ ret = nouveau_i2c(port)->acquire(port, 0);
+ if (ret)
+ return ret;
+
+ ret = port->func->aux(port, false, msg->request, msg->address,
+ msg->buffer, msg->size);
+ nouveau_i2c(port)->release(port);
+ if (ret >= 0) {
+ msg->reply = ret;
+ return msg->size;
+ }
+
+ return ret;
}
static int
@@ -974,9 +1038,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
+ struct nouveau_disp *pdisp = nouveau_disp(drm->device);
struct drm_connector *connector;
int type, ret = 0;
bool dummy;
@@ -992,33 +1056,15 @@ nouveau_connector_create(struct drm_device *dev, int index)
return ERR_PTR(-ENOMEM);
connector = &nv_connector->base;
- INIT_WORK(&nv_connector->hpd_work, nouveau_connector_hotplug_work);
nv_connector->index = index;
/* attempt to parse vbios connector type and hotplug gpio */
nv_connector->dcb = olddcb_conn(dev, index);
if (nv_connector->dcb) {
- static const u8 hpd[16] = {
- 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
- };
-
u32 entry = ROM16(nv_connector->dcb[0]);
if (olddcb_conntab(dev)[3] >= 4)
entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
- ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
- DCB_GPIO_UNUSED, &nv_connector->hpd);
- if (ret)
- nv_connector->hpd.func = DCB_GPIO_UNUSED;
-
- if (nv_connector->hpd.func != DCB_GPIO_UNUSED) {
- nouveau_event_new(gpio->events, nv_connector->hpd.line,
- nouveau_connector_hotplug,
- nv_connector,
- &nv_connector->hpd_func);
- }
-
nv_connector->type = nv_connector->dcb[0];
if (drm_conntype_from_dcb(nv_connector->type) ==
DRM_MODE_CONNECTOR_Unknown) {
@@ -1040,7 +1086,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
} else {
nv_connector->type = DCB_CONNECTOR_NONE;
- nv_connector->hpd.func = DCB_GPIO_UNUSED;
}
/* no vbios data, or an unknown dcb connector type - attempt to
@@ -1080,8 +1125,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
}
- type = drm_conntype_from_dcb(nv_connector->type);
- if (type == DRM_MODE_CONNECTOR_LVDS) {
+ switch ((type = drm_conntype_from_dcb(nv_connector->type))) {
+ case DRM_MODE_CONNECTOR_LVDS:
ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
if (ret) {
NV_ERROR(drm, "Error parsing LVDS table, disabling\n");
@@ -1090,8 +1135,23 @@ nouveau_connector_create(struct drm_device *dev, int index)
}
funcs = &nouveau_connector_funcs_lvds;
- } else {
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ nv_connector->aux.dev = dev->dev;
+ nv_connector->aux.transfer = nouveau_connector_aux_xfer;
+ ret = drm_dp_aux_register(&nv_connector->aux);
+ if (ret) {
+ NV_ERROR(drm, "failed to register aux channel\n");
+ kfree(nv_connector);
+ return ERR_PTR(ret);
+ }
+
+ funcs = &nouveau_connector_funcs_dp;
+ break;
+ default:
funcs = &nouveau_connector_funcs;
+ break;
}
/* defaults, will get overridden in detect() */
@@ -1166,10 +1226,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- if (nv_connector->hpd.func != DCB_GPIO_UNUSED)
+ ret = nouveau_event_new(pdisp->hpd, NVKM_HPD, index,
+ nouveau_connector_hotplug,
+ nv_connector, &nv_connector->hpd);
+ if (ret)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ else
connector->polled = DRM_CONNECTOR_POLL_HPD;
+ INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work);
+
drm_sysfs_connector_add(connector);
return connector;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 264a778f473b..8861b6c579ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,12 +28,12 @@
#define __NOUVEAU_CONNECTOR_H__
#include <drm/drm_edid.h>
+#include <drm/drm_dp_helper.h>
#include "nouveau_crtc.h"
#include <core/event.h>
#include <subdev/bios.h>
-#include <subdev/bios/gpio.h>
struct nouveau_i2c_port;
@@ -67,9 +67,11 @@ struct nouveau_connector {
u8 index;
u8 *dcb;
- struct dcb_gpio_func hpd;
- struct work_struct hpd_work;
- struct nouveau_eventh *hpd_func;
+ struct nouveau_eventh *hpd;
+ u32 status;
+ struct work_struct work;
+
+ struct drm_dp_aux aux;
int dithering_mode;
int dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index d1e5890784d7..a0534489d23f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -74,7 +74,7 @@ struct nouveau_crtc {
static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
{
- return container_of(crtc, struct nouveau_crtc, base);
+ return crtc ? container_of(crtc, struct nouveau_crtc, base) : NULL;
}
static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index da764a4ed958..47ad74255bf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -42,7 +42,7 @@
#include <core/class.h>
static int
-nouveau_display_vblank_handler(void *data, int head)
+nouveau_display_vblank_handler(void *data, u32 type, int head)
{
struct nouveau_drm *drm = data;
drm_handle_vblank(drm->dev, head);
@@ -178,7 +178,7 @@ nouveau_display_vblank_init(struct drm_device *dev)
return -ENOMEM;
for (i = 0; i < dev->mode_config.num_crtc; i++) {
- ret = nouveau_event_new(pdisp->vblank, i,
+ ret = nouveau_event_new(pdisp->vblank, 1, i,
nouveau_display_vblank_handler,
drm, &disp->vblank[i]);
if (ret) {
@@ -393,7 +393,7 @@ nouveau_display_init(struct drm_device *dev)
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (conn->hpd_func) nouveau_event_get(conn->hpd_func);
+ if (conn->hpd) nouveau_event_get(conn->hpd);
}
return ret;
@@ -408,7 +408,7 @@ nouveau_display_fini(struct drm_device *dev)
/* disable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (conn->hpd_func) nouveau_event_put(conn->hpd_func);
+ if (conn->hpd) nouveau_event_put(conn->hpd);
}
drm_kms_helper_poll_disable(dev);
@@ -736,6 +736,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
new_bo->bo.offset };
+ /* Keep vblanks on during flip, for the target crtc of this flip */
+ drm_vblank_get(dev, nouveau_crtc(crtc)->index);
+
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
@@ -779,6 +782,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return 0;
fail_unreserve:
+ drm_vblank_put(dev, nouveau_crtc(crtc)->index);
ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
mutex_unlock(&chan->cli->mutex);
@@ -798,6 +802,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
struct drm_device *dev = drm->dev;
struct nouveau_page_flip_state *s;
unsigned long flags;
+ int crtcid = -1;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -808,8 +813,16 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
}
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
- if (s->event)
- drm_send_vblank_event(dev, s->crtc, s->event);
+ if (s->event) {
+ /* Vblank timestamps/counts are only correct on >= NV-50 */
+ if (nv_device(drm->device)->card_type >= NV_50)
+ crtcid = s->crtc;
+
+ drm_send_vblank_event(dev, crtcid, s->event);
+ }
+
+ /* Give up ownership of vblank for page-flipped crtc */
+ drm_vblank_put(dev, s->crtc);
list_del(&s->head);
if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 36fd22500569..5675ffc175ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -55,11 +55,10 @@ nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
}
-bool
-nouveau_dp_detect(struct drm_encoder *encoder)
+int
+nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = nv_encoder->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_i2c_port *auxch;
u8 *dpcd = nv_encoder->dp.dpcd;
@@ -67,11 +66,11 @@ nouveau_dp_detect(struct drm_encoder *encoder)
auxch = nv_encoder->i2c;
if (!auxch)
- return false;
+ return -ENODEV;
ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
if (ret)
- return false;
+ return ret;
nv_encoder->dp.link_bw = 27000 * dpcd[1];
nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
@@ -91,6 +90,5 @@ nouveau_dp_detect(struct drm_encoder *encoder)
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
nouveau_dp_probe_oui(dev, auxch, dpcd);
-
- return true;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ddd83756b9a2..5425ffe3931d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -652,12 +652,12 @@ int nouveau_pmops_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev);
if (ret)
return ret;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 0);
- nouveau_fbcon_zfill_all(drm_dev);
- if (drm_dev->mode_config.num_crtc)
+ if (drm_dev->mode_config.num_crtc) {
nouveau_display_resume(drm_dev);
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ }
+
return 0;
}
@@ -683,11 +683,12 @@ static int nouveau_pmops_thaw(struct device *dev)
ret = nouveau_do_resume(drm_dev);
if (ret)
return ret;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 0);
- nouveau_fbcon_zfill_all(drm_dev);
- if (drm_dev->mode_config.num_crtc)
+
+ if (drm_dev->mode_config.num_crtc) {
nouveau_display_resume(drm_dev);
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 24660c0f713d..5f0e37fc2849 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -46,6 +46,7 @@ struct nouveau_encoder {
/* different to drm_encoder.crtc, this reflects what's
* actually programmed on the hw, not the proposed crtc */
struct drm_crtc *crtc;
+ u32 ctrl;
struct drm_display_mode mode;
int last_dpms;
@@ -84,9 +85,7 @@ get_slave_funcs(struct drm_encoder *enc)
}
/* nouveau_dp.c */
-bool nouveau_dp_detect(struct drm_encoder *);
-void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
- struct nouveau_object *);
+int nouveau_dp_detect(struct nouveau_encoder *);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 64a42cfd3717..191665ee7f52 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -531,17 +531,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
if (state == 1)
nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(drm->fbcon->helper.fbdev, state);
- if (state == 0)
+ if (state == 0) {
nouveau_fbcon_restore_accel(dev);
+ nouveau_fbcon_zfill(dev, drm->fbcon);
+ }
console_unlock();
}
}
-
-void
-nouveau_fbcon_zfill_all(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- nouveau_fbcon_zfill(dev, drm->fbcon);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index fdfc0c94fbcc..fcff797d2084 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -61,7 +61,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
void nouveau_fbcon_fini(struct drm_device *dev);
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
-void nouveau_fbcon_zfill_all(struct drm_device *dev);
void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
void nouveau_fbcon_restore_accel(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 90074d620e31..ab5ea3b0d666 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -166,7 +166,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
}
static int
-nouveau_fence_wait_uevent_handler(void *data, int index)
+nouveau_fence_wait_uevent_handler(void *data, u32 type, int index)
{
struct nouveau_fence_priv *priv = data;
wake_up_all(&priv->waiting);
@@ -183,7 +183,7 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
struct nouveau_eventh *handler;
int ret = 0;
- ret = nouveau_event_new(pfifo->uevent, 0,
+ ret = nouveau_event_new(pfifo->uevent, 1, 0,
nouveau_fence_wait_uevent_handler,
priv, &handler);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index c1a7e5a73a26..462679a8fec5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -57,7 +57,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
return drm_compat_ioctl(filp, cmd, arg);
#if 0
- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
#endif
if (fn != NULL)
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index fb84da3cb50d..4f4c3fec6916 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -64,12 +64,13 @@ static bool
nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- bool can_switch;
- spin_lock(&dev->count_lock);
- can_switch = (dev->open_count == 0);
- spin_unlock(&dev->count_lock);
- return can_switch;
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return dev->open_count == 0;
}
static const struct vga_switcheroo_client_ops
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 58af547b0b93..4c534b7b04da 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1,4 +1,4 @@
- /*
+/*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -957,7 +958,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
nv50_display_flip_stop(crtc);
- push = evo_wait(mast, 2);
+ push = evo_wait(mast, 6);
if (push) {
if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
@@ -1207,6 +1208,7 @@ static void
nv50_crtc_disable(struct drm_crtc *crtc)
{
struct nv50_head *head = nv50_head(crtc);
+ evo_sync(crtc->dev);
if (head->image)
nouveau_bo_unpin(head->image);
nouveau_bo_ref(NULL, &head->image);
@@ -1700,10 +1702,9 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
}
static void
-nv50_hdmi_disconnect(struct drm_encoder *encoder)
+nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
struct nv50_disp *disp = nv50_disp(encoder->dev);
const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
@@ -1722,7 +1723,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct nv50_disp *disp = nv50_disp(dev);
struct drm_encoder *partner;
- int or = nv_encoder->or;
+ u32 mthd;
nv_encoder->last_dpms = mode;
@@ -1740,7 +1741,18 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
}
- nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
+ mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3;
+ mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
+ mthd |= nv_encoder->or;
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+ nv_call(disp->core, NV50_DISP_SOR_PWR | mthd, 1);
+ mthd |= NV94_DISP_SOR_DP_PWR;
+ } else {
+ mthd |= NV50_DISP_SOR_PWR;
+ }
+
+ nv_call(disp->core, mthd, (mode == DRM_MODE_DPMS_ON));
}
static bool
@@ -1764,33 +1776,36 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder,
}
static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
+nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
- const int or = nv_encoder->or;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nv50_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
- evo_mthd(push, 0x0600 + (or * 0x40), 1);
- evo_data(push, 0x00000000);
- } else {
- evo_mthd(push, 0x0200 + (or * 0x20), 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
+ struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
+ u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
+ if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
+ evo_data(push, (nv_encoder->ctrl = temp));
+ } else {
+ evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
+ evo_data(push, (nv_encoder->ctrl = temp));
}
-
- nv50_hdmi_disconnect(encoder);
+ evo_kick(push, mast);
}
+}
+
+static void
+nv50_sor_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
nv_encoder->crtc = NULL;
+
+ if (nv_crtc) {
+ nv50_crtc_prepare(&nv_crtc->base);
+ nv50_sor_ctrl(nv_encoder, 1 << nv_crtc->index, 0);
+ nv50_hdmi_disconnect(&nv_encoder->base.base, nv_crtc);
+ }
}
static void
@@ -1810,12 +1825,14 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
- u32 *push, lvds = 0;
+ u32 lvds = 0, mask, ctrl;
u8 owner = 1 << nv_crtc->index;
u8 proto = 0xf;
u8 depth = 0x0;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ nv_encoder->crtc = encoder->crtc;
+
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
if (nv_encoder->dcb->sorconf.link & 1) {
@@ -1827,7 +1844,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
proto = 0x2;
}
- nv50_hdmi_mode_set(encoder, mode);
+ nv50_hdmi_mode_set(&nv_encoder->base.base, mode);
break;
case DCB_OUTPUT_LVDS:
proto = 0x0;
@@ -1883,19 +1900,11 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
break;
}
- nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+ nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
- push = evo_wait(nv50_mast(dev), 8);
- if (push) {
- if (nv50_vers(mast) < NVD0_DISP_CLASS) {
- u32 ctrl = (depth << 16) | (proto << 8) | owner;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- ctrl |= 0x00001000;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- ctrl |= 0x00002000;
- evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
- evo_data(push, ctrl);
- } else {
+ if (nv50_vers(mast) >= NVD0_DISP_CLASS) {
+ u32 *push = evo_wait(mast, 3);
+ if (push) {
u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
u32 syncs = 0x00000001;
@@ -1910,14 +1919,21 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
evo_data(push, syncs | (depth << 6));
evo_data(push, magic);
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
- evo_data(push, owner | (proto << 8));
+ evo_kick(push, mast);
}
- evo_kick(push, mast);
+ ctrl = proto << 8;
+ mask = 0x00000f00;
+ } else {
+ ctrl = (depth << 16) | (proto << 8);
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl |= 0x00002000;
+ mask = 0x000f3f00;
}
- nv_encoder->crtc = encoder->crtc;
+ nv50_sor_ctrl(nv_encoder, mask | owner, ctrl | owner);
}
static void
@@ -2295,7 +2311,7 @@ nv50_display_create(struct drm_device *dev)
continue;
NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
+ connector->name);
connector->funcs->destroy(connector);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index e3c47a8005ff..2d28dc337cfb 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -319,13 +319,13 @@ static void page_flip_worker(struct work_struct *work)
struct drm_display_mode *mode = &crtc->mode;
struct drm_gem_object *bo;
- mutex_lock(&crtc->mutex);
+ drm_modeset_lock(&crtc->mutex, NULL);
omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
crtc->x << 16, crtc->y << 16,
mode->hdisplay << 16, mode->vdisplay << 16,
vblank_cb, crtc);
- mutex_unlock(&crtc->mutex);
+ drm_modeset_unlock(&crtc->mutex);
bo = omap_framebuffer_bo(crtc->primary->fb, 0);
drm_gem_object_unreference_unlocked(bo);
@@ -465,7 +465,7 @@ static void apply_worker(struct work_struct *work)
* the callbacks and list modification all serialized
* with respect to modesetting ioctls from userspace.
*/
- mutex_lock(&crtc->mutex);
+ drm_modeset_lock(&crtc->mutex, NULL);
dispc_runtime_get();
/*
@@ -510,7 +510,7 @@ static void apply_worker(struct work_struct *work)
out:
dispc_runtime_put();
- mutex_unlock(&crtc->mutex);
+ drm_modeset_unlock(&crtc->mutex);
}
int omap_crtc_apply(struct drm_crtc *crtc,
@@ -518,7 +518,7 @@ int omap_crtc_apply(struct drm_crtc *crtc,
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- WARN_ON(!mutex_is_locked(&crtc->mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
/* no need to queue it again if it is already queued: */
if (apply->queued)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index c8270e4b26f3..002b9721e85a 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -588,9 +588,7 @@ static void dev_lastclose(struct drm_device *dev)
}
}
- drm_modeset_lock_all(dev);
- ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
- drm_modeset_unlock_all(dev);
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
if (ret)
DBG("failed to restore crtc mode");
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 8b019602ffe6..2a5cacdc344b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -346,6 +346,7 @@ void omap_framebuffer_flush(struct drm_framebuffer *fb,
VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+ /* FIXME: This is racy - no protection against modeset config changes. */
while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
/* only consider connectors that are part of a chain */
if (connector->encoder && connector->encoder->crtc) {
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index 1f1f8371a199..db1601fdbe29 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -27,6 +27,7 @@
#define MCS_ELVSS_ON 0xb1
#define MCS_USER_SETTING 0xf0
#define MCS_DISPCTL 0xf2
+#define MCS_POWER_CTRL 0xf4
#define MCS_GTCON 0xf7
#define MCS_PANEL_CONDITION 0xf8
#define MCS_GAMMA_SET1 0xf9
@@ -182,6 +183,8 @@ static void ld9040_init(struct ld9040 *ctx)
ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
0x02, 0x08, 0x08, 0x10, 0x10);
ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
+ ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL,
+ 0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88);
ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
ld9040_brightness_set(ctx);
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index 35941d2412b8..06e57a26db7a 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -847,6 +847,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
if (i >= ARRAY_SIZE(s6e8aa0_variants)) {
dev_err(ctx->dev, "unsupported display version %d\n", id[1]);
ctx->error = -EINVAL;
+ return;
}
ctx->variant = &s6e8aa0_variants[i];
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 309f29e9234a..a25136132c31 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -262,6 +262,13 @@ static int panel_simple_remove(struct device *dev)
return 0;
}
+static void panel_simple_shutdown(struct device *dev)
+{
+ struct panel_simple *panel = dev_get_drvdata(dev);
+
+ panel_simple_disable(&panel->base);
+}
+
static const struct drm_display_mode auo_b101aw03_mode = {
.clock = 51450,
.hdisplay = 1024,
@@ -284,6 +291,28 @@ static const struct panel_desc auo_b101aw03 = {
},
};
+static const struct drm_display_mode auo_b133xtn01_mode = {
+ .clock = 69500,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 20,
+ .vdisplay = 768,
+ .vsync_start = 768 + 3,
+ .vsync_end = 768 + 3 + 6,
+ .vtotal = 768 + 3 + 6 + 13,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b133xtn01 = {
+ .modes = &auo_b133xtn01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.clock = 72070,
.hdisplay = 1366,
@@ -328,6 +357,52 @@ static const struct panel_desc chunghwa_claa101wb01 = {
},
};
+static const struct drm_display_mode edt_et057090dhu_mode = {
+ .clock = 25175,
+ .hdisplay = 640,
+ .hsync_start = 640 + 16,
+ .hsync_end = 640 + 16 + 30,
+ .htotal = 640 + 16 + 30 + 114,
+ .vdisplay = 480,
+ .vsync_start = 480 + 10,
+ .vsync_end = 480 + 10 + 3,
+ .vtotal = 480 + 10 + 3 + 32,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc edt_et057090dhu = {
+ .modes = &edt_et057090dhu_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 115,
+ .height = 86,
+ },
+};
+
+static const struct drm_display_mode edt_etm0700g0dh6_mode = {
+ .clock = 33260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 128,
+ .htotal = 800 + 40 + 128 + 88,
+ .vdisplay = 480,
+ .vsync_start = 480 + 10,
+ .vsync_end = 480 + 10 + 2,
+ .vtotal = 480 + 10 + 2 + 33,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc edt_etm0700g0dh6 = {
+ .modes = &edt_etm0700g0dh6_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -377,12 +452,24 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
+ .compatible = "auo,b133xtn01",
+ .data = &auo_b133xtn01,
+ }, {
.compatible = "chunghwa,claa101wa01a",
.data = &chunghwa_claa101wa01a
}, {
.compatible = "chunghwa,claa101wb01",
.data = &chunghwa_claa101wb01
}, {
+ .compatible = "edt,et057090dhu",
+ .data = &edt_et057090dhu,
+ }, {
+ .compatible = "edt,et070080dh6",
+ .data = &edt_etm0700g0dh6,
+ }, {
+ .compatible = "edt,etm0700g0dh6",
+ .data = &edt_etm0700g0dh6,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
@@ -412,6 +499,11 @@ static int panel_simple_platform_remove(struct platform_device *pdev)
return panel_simple_remove(&pdev->dev);
}
+static void panel_simple_platform_shutdown(struct platform_device *pdev)
+{
+ panel_simple_shutdown(&pdev->dev);
+}
+
static struct platform_driver panel_simple_platform_driver = {
.driver = {
.name = "panel-simple",
@@ -420,6 +512,7 @@ static struct platform_driver panel_simple_platform_driver = {
},
.probe = panel_simple_platform_probe,
.remove = panel_simple_platform_remove,
+ .shutdown = panel_simple_platform_shutdown,
};
struct panel_desc_dsi {
@@ -561,6 +654,11 @@ static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
return panel_simple_remove(&dsi->dev);
}
+static void panel_simple_dsi_shutdown(struct mipi_dsi_device *dsi)
+{
+ panel_simple_shutdown(&dsi->dev);
+}
+
static struct mipi_dsi_driver panel_simple_dsi_driver = {
.driver = {
.name = "panel-simple-dsi",
@@ -569,6 +667,7 @@ static struct mipi_dsi_driver panel_simple_dsi_driver = {
},
.probe = panel_simple_dsi_probe,
.remove = panel_simple_dsi_remove,
+ .shutdown = panel_simple_dsi_shutdown,
};
static int __init panel_simple_init(void)
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 41bdd174657e..5d7ea2461852 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -574,6 +574,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
bo->surf.height, bo->surf.stride, bo->surf.format);
qxl_io_create_primary(qdev, base_offset, bo);
bo->is_primary = true;
+ }
+
+ if (bo->is_primary) {
+ DRM_DEBUG_KMS("setting surface_id to 0 for primary surface %d on crtc %d\n", bo->surface_id, qcrtc->index);
surf_id = 0;
} else {
surf_id = bo->surface_id;
@@ -841,7 +845,7 @@ static const struct drm_connector_funcs qxl_connector_funcs = {
.save = qxl_conn_save,
.restore = qxl_conn_restore,
.detect = qxl_conn_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes_nomerge,
.set_property = qxl_conn_set_property,
.destroy = qxl_conn_destroy,
};
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index fee8748bdca5..6e936634d65c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -214,7 +214,6 @@ static struct pci_driver qxl_pci_driver = {
static struct drm_driver qxl_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
- .dev_priv_size = 0,
.load = qxl_driver_load,
.unload = qxl_driver_unload,
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 0bb86e6d41b4..b110883f8253 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -451,4 +451,4 @@ const struct drm_ioctl_desc qxl_ioctls[] = {
DRM_AUTH|DRM_UNLOCKED),
};
-int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
+int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 28f84b4fce32..34d6a85e9023 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -87,7 +87,7 @@ int qxl_irq_init(struct qxl_device *qdev)
atomic_set(&qdev->irq_received_cursor, 0);
atomic_set(&qdev->irq_received_io_cmd, 0);
qdev->irq_received_error = 0;
- ret = drm_irq_install(qdev->ddev);
+ ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (unlikely(ret != 0)) {
DRM_ERROR("Failed installing irq: %d\n", ret);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index d52c27527b9a..71a1baeac14e 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -109,13 +109,11 @@ static const struct vm_operations_struct *ttm_vm_ops;
static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
- struct qxl_device *qdev;
int r;
bo = (struct ttm_buffer_object *)vma->vm_private_data;
if (bo == NULL)
return VM_FAULT_NOPAGE;
- qdev = qxl_get_qdev(bo->bdev);
r = ttm_vm_ops->fault(vma, vmf);
return r;
}
@@ -162,10 +160,6 @@ static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct qxl_device *qdev;
-
- qdev = qxl_get_qdev(bdev);
-
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index b0d0fd3e4376..663f38c63ba6 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -203,7 +203,7 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index e806dacd452f..575e986f82a7 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1594,7 +1594,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
switch (param->param) {
case R128_PARAM_IRQ_NR:
- value = drm_dev_to_irq(dev);
+ value = dev->pdev->irq;
break;
default:
return -EINVAL;
@@ -1641,4 +1641,4 @@ const struct drm_ioctl_desc r128_ioctls[] = {
DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
};
-int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
+int r128_max_ioctl = ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 09433534dc47..dbcbfe80aac0 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -72,7 +72,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
- radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o dce3_1_afmt.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index e911898348f8..a03c73411a56 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,6 +557,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
+ u32 clock = mode->clock;
int bpc = radeon_crtc->bpc;
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
@@ -632,6 +633,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
}
+ /* adjust pll for deep color modes */
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ break;
+ case 10:
+ clock = (clock * 5) / 4;
+ break;
+ case 12:
+ clock = (clock * 3) / 2;
+ break;
+ case 16:
+ clock = clock * 2;
+ break;
+ }
+ }
+
/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
* accordingly based on the encoder/transmitter to work around
* special hw requirements.
@@ -653,7 +672,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
switch (crev) {
case 1:
case 2:
- args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v1.usPixelClock = cpu_to_le16(clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
@@ -665,7 +684,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
case 3:
- args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10);
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
args.v3.sInput.ucEncodeMode = encoder_mode;
args.v3.sInput.ucDispPllConfig = 0;
@@ -679,10 +698,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
- /* deep color support */
- args.v3.sInput.usPixelClock =
- cpu_to_le16((mode->clock * bpc / 8) / 10);
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
@@ -862,14 +877,21 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
- switch (bpc) {
- case 8:
- default:
- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
- break;
- case 10:
- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
- break;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ /* yes this is correct, the atom define is wrong */
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
+ break;
+ case 12:
+ /* yes this is correct, the atom define is wrong */
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+ break;
+ }
}
args.v5.ucTransmitterID = encoder_id;
args.v5.ucEncoderMode = encoder_mode;
@@ -884,20 +906,22 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
- switch (bpc) {
- case 8:
- default:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
- break;
- case 10:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
- break;
- case 12:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
- break;
- case 16:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
- break;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
+ break;
+ case 12:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
+ break;
+ case 16:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+ break;
+ }
}
args.v6.ucTransmitterID = encoder_id;
args.v6.ucEncoderMode = encoder_mode;
@@ -938,6 +962,9 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
+
+ /* Assign mode clock for hdmi deep color max clock limit check */
+ radeon_connector->pixelclock_for_modeset = mode->clock;
radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
switch (encoder_mode) {
@@ -1019,10 +1046,17 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
u32 pll_clock = mode->clock;
+ u32 clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+ /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
+ if (ASIC_IS_DCE5(rdev) &&
+ (encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
+ (radeon_crtc->bpc > 8))
+ clock = radeon_crtc->adjusted_clock;
+
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
@@ -1057,7 +1091,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
radeon_crtc->crtc_id, &radeon_crtc->ss);
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
- encoder_mode, radeon_encoder->encoder_id, mode->clock,
+ encoder_mode, radeon_encoder->encoder_id, clock,
ref_div, fb_div, frac_fb_div, post_div,
radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
@@ -1102,6 +1136,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
+ bool bypass_lut = false;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1140,33 +1175,73 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (target_fb->bits_per_pixel) {
- case 8:
+ switch (target_fb->pixel_format) {
+ case DRM_FORMAT_C8:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
break;
- case 15:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_BGRA5551:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
break;
- case 16:
+ case DRM_FORMAT_RGB565:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
#endif
break;
- case 24:
- case 32:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
#endif
break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_BGRA1010102:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
default:
- DRM_ERROR("Unsupported screen depth %d\n",
- target_fb->bits_per_pixel);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format));
return -EINVAL;
}
@@ -1295,6 +1370,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+ /*
+ * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+ * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+ * retain the full precision throughout the pipeline.
+ */
+ WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset,
+ (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+ ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+ if (bypass_lut)
+ DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
@@ -1362,6 +1449,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
u32 tmp, viewport_w, viewport_h;
int r;
+ bool bypass_lut = false;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1399,18 +1487,30 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (target_fb->bits_per_pixel) {
- case 8:
+ switch (target_fb->pixel_format) {
+ case DRM_FORMAT_C8:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
break;
- case 15:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+ AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+ break;
+ case DRM_FORMAT_XRGB1555:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
break;
- case 16:
+ case DRM_FORMAT_RGB565:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
@@ -1418,8 +1518,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
#endif
break;
- case 24:
- case 32:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
@@ -1427,9 +1527,20 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
#endif
break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+ AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
default:
- DRM_ERROR("Unsupported screen depth %d\n",
- target_fb->bits_per_pixel);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format));
return -EINVAL;
}
@@ -1468,6 +1579,13 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
if (rdev->family >= CHIP_R600)
WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+ /* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */
+ WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset,
+ (bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN);
+
+ if (bypass_lut)
+ DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 54e4f52549af..b1e11f8434e2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -95,9 +95,12 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int recv_bytes;
+ int r = 0;
memset(&args, 0, sizeof(args));
+ mutex_lock(&chan->mutex);
+
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
radeon_atom_copy_swap(base, send, send_bytes, true);
@@ -117,19 +120,22 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
/* timeout */
if (args.v1.ucReplyStatus == 1) {
DRM_DEBUG_KMS("dp_aux_ch timeout\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto done;
}
/* flags not zero */
if (args.v1.ucReplyStatus == 2) {
DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
- return -EBUSY;
+ r = -EIO;
+ goto done;
}
/* error */
if (args.v1.ucReplyStatus == 3) {
DRM_DEBUG_KMS("dp_aux_ch error\n");
- return -EIO;
+ r = -EIO;
+ goto done;
}
recv_bytes = args.v1.ucDataOutLen;
@@ -139,7 +145,11 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
if (recv && recv_size)
radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
- return recv_bytes;
+ r = recv_bytes;
+done:
+ mutex_unlock(&chan->mutex);
+
+ return r;
}
#define BARE_ADDRESS_SIZE 3
@@ -212,11 +222,12 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer;
- ret = drm_dp_aux_register_i2c_bus(&radeon_connector->ddc_bus->aux);
+
+ ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
if (!ret)
radeon_connector->ddc_bus->has_aux = true;
- WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
+ WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
}
/***** general DP utility functions *****/
@@ -281,6 +292,19 @@ static int dp_get_max_dp_pix_clock(int link_rate,
/***** radeon specific DP functions *****/
+static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+ u8 dpcd[DP_DPCD_SIZE])
+{
+ int max_link_rate;
+
+ if (radeon_connector_is_dp12_capable(connector))
+ max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
+ else
+ max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
+
+ return max_link_rate;
+}
+
/* First get the min lane# when low rate is used according to pixel clock
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
@@ -290,7 +314,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int max_link_rate = drm_dp_max_link_rate(dpcd);
+ int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
@@ -328,7 +352,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
return 540000;
}
- return drm_dp_max_link_rate(dpcd);
+ return radeon_dp_get_max_link_rate(connector, dpcd);
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -379,16 +403,18 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
- int ret, i;
+ int ret;
+
+ char dpcd_hex_dump[DP_DPCD_SIZE * 3];
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
DP_DPCD_SIZE);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- DRM_DEBUG_KMS("DPCD: ");
- for (i = 0; i < DP_DPCD_SIZE; i++)
- DRM_DEBUG_KMS("%02x ", msg[i]);
- DRM_DEBUG_KMS("\n");
+
+ hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
radeon_dp_probe_oui(radeon_connector);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e6eb5097597f..2b2908440644 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1884,8 +1884,11 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
else
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
- } else
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+ } else {
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ }
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index b5162c3b6111..9c570fb15b8c 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -43,15 +43,19 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
u16 out = cpu_to_le16(0);
+ int r = 0;
memset(&args, 0, sizeof(args));
+ mutex_lock(&chan->mutex);
+
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
- return -EINVAL;
+ r = -EINVAL;
+ goto done;
}
if (buf == NULL)
args.ucRegIndex = 0;
@@ -65,7 +69,8 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- return -EINVAL;
+ r = -EINVAL;
+ goto done;
}
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
@@ -82,13 +87,17 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
DRM_DEBUG_KMS("hw_i2c error\n");
- return -EIO;
+ r = -EIO;
+ goto done;
}
if (!(flags & HW_I2C_WRITE))
radeon_atom_copy_swap(buf, base, num, false);
- return 0;
+done:
+ mutex_unlock(&chan->mutex);
+
+ return r;
}
int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 10dae4106c08..584090ac3eb9 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -1179,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
tmp &= ~GLOBAL_PWRMGT_EN;
WREG32_SMC(GENERAL_PWRMGT, tmp);
- tmp = RREG32(SCLK_PWRMGT_CNTL);
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
tmp &= ~DYNAMIC_PM_EN;
WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index d2fd98968085..0b2471107137 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -80,6 +80,7 @@ extern int sumo_rlc_init(struct radeon_device *rdev);
extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern void si_rlc_reset(struct radeon_device *rdev);
extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
+static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
extern int cik_sdma_resume(struct radeon_device *rdev);
extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
extern void cik_sdma_fini(struct radeon_device *rdev);
@@ -3257,7 +3258,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 hdp_host_path_cntl;
u32 tmp;
- int i, j;
+ int i, j, k;
switch (rdev->family) {
case CHIP_BONAIRE:
@@ -3446,6 +3447,15 @@ static void cik_gpu_init(struct radeon_device *rdev)
rdev->config.cik.max_sh_per_se,
rdev->config.cik.max_backends_per_se);
+ for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
+ for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
+ for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) {
+ rdev->config.cik.active_cus +=
+ hweight32(cik_get_cu_active_bitmap(rdev, i, j));
+ }
+ }
+ }
+
/* set HW defaults for 3D engine */
WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
@@ -3698,7 +3708,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
- radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
return true;
@@ -3818,7 +3828,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
radeon_ring_write(ring, next_rptr);
}
@@ -5396,6 +5406,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
@@ -5408,7 +5419,8 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ BANK_SELECT(4) |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(4));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
@@ -5444,6 +5456,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -7450,7 +7463,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -7476,7 +7489,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
@@ -7502,7 +7515,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_flip(rdev, 2);
+ radeon_crtc_handle_vblank(rdev, 2);
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
@@ -7528,7 +7541,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_flip(rdev, 3);
+ radeon_crtc_handle_vblank(rdev, 3);
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
@@ -7554,7 +7567,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_flip(rdev, 4);
+ radeon_crtc_handle_vblank(rdev, 4);
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
@@ -7580,7 +7593,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_flip(rdev, 5);
+ radeon_crtc_handle_vblank(rdev, 5);
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
@@ -7663,14 +7676,16 @@ restart_ih:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
cik_vm_decode_fault(rdev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 167: /* VCE */
DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 72e464c79a88..8e9d0f1d858e 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -141,7 +141,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
next_rptr += 4;
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
radeon_ring_write(ring, 1); /* number of DWs to follow */
radeon_ring_write(ring, next_rptr);
}
@@ -151,7 +151,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
- radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
radeon_ring_write(ring, ib->length_dw);
}
@@ -203,8 +203,8 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
/* write the fence */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(addr));
+ radeon_ring_write(ring, upper_32_bits(addr));
radeon_ring_write(ring, fence->seq);
/* generate an interrupt */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
@@ -233,7 +233,7 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
radeon_ring_write(ring, addr & 0xfffffff8);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(addr));
return true;
}
@@ -551,10 +551,10 @@ int cik_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
radeon_ring_write(ring, cur_size_in_bytes);
radeon_ring_write(ring, 0); /* src/dst endian swap */
- radeon_ring_write(ring, src_offset & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
- radeon_ring_write(ring, dst_offset & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(src_offset));
+ radeon_ring_write(ring, upper_32_bits(src_offset));
+ radeon_ring_write(ring, lower_32_bits(dst_offset));
+ radeon_ring_write(ring, upper_32_bits(dst_offset));
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
}
@@ -605,7 +605,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
}
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
radeon_ring_write(ring, 1); /* number of DWs to follow */
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring);
@@ -660,7 +660,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
@@ -742,7 +742,26 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
- if (flags & R600_PTE_SYSTEM) {
+ if (flags == R600_PTE_GART) {
+ uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0x1FFFF8)
+ bytes = 0x1FFFF8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+ } else if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index dd7926394a8f..0c6e1b55d968 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -482,6 +482,7 @@
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -1751,12 +1752,12 @@
#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
#define EOP_TCL1_ACTION_EN (1 << 16)
#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
+#define EOP_TCL2_VOLATILE (1 << 24)
#define EOP_CACHE_POLICY(x) ((x) << 25)
/* 0 - LRU
* 1 - Stream
* 2 - Bypass
*/
-#define EOP_TCL2_VOLATILE (1 << 27)
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index aa908c55a513..e48a14037b76 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -1050,7 +1050,7 @@ static const struct cs_extent_def SECT_CONTEXT_defs[] =
{SECT_CONTEXT_def_5, 0x0000a29e, 5 },
{SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
{SECT_CONTEXT_def_7, 0x0000a2de, 290 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const u32 SECT_CLEAR_def_1[] =
{
@@ -1061,7 +1061,7 @@ static const u32 SECT_CLEAR_def_1[] =
static const struct cs_extent_def SECT_CLEAR_defs[] =
{
{SECT_CLEAR_def_1, 0x0000ffc0, 3 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const u32 SECT_CTRLCONST_def_1[] =
{
@@ -1071,11 +1071,11 @@ static const u32 SECT_CTRLCONST_def_1[] =
static const struct cs_extent_def SECT_CTRLCONST_defs[] =
{
{SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const struct cs_section_def cayman_cs_data[] = {
{ SECT_CONTEXT_defs, SECT_CONTEXT },
{ SECT_CLEAR_defs, SECT_CLEAR },
{ SECT_CTRLCONST_defs, SECT_CTRLCONST },
- { 0, SECT_NONE }
+ { NULL, SECT_NONE }
};
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
index c3982f9475fb..f55d06664e31 100644
--- a/drivers/gpu/drm/radeon/clearstate_ci.h
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -936,9 +936,9 @@ static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
{ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
{ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
{ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const struct cs_section_def ci_cs_data[] = {
{ ci_SECT_CONTEXT_defs, SECT_CONTEXT },
- { 0, SECT_NONE }
+ { NULL, SECT_NONE }
};
diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h
index b994cb2a35a0..66e39cdb5cb0 100644
--- a/drivers/gpu/drm/radeon/clearstate_si.h
+++ b/drivers/gpu/drm/radeon/clearstate_si.h
@@ -933,9 +933,9 @@ static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
{si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
{si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
{si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const struct cs_section_def si_cs_data[] = {
{ si_SECT_CONTEXT_defs, SECT_CONTEXT },
- { 0, SECT_NONE }
+ { NULL, SECT_NONE }
};
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 5a9a5f4d7888..47d31e915758 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
new file mode 100644
index 000000000000..51800e340a57
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * Copyright 2014 Rafał Miłecki
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp;
+ u8 *sadb;
+ int sad_count;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
+static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ radeon_connector = to_radeon_connector(connector);
+ break;
+ }
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ if (sad->channels > max_channels) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ max_channels = sad->channels;
+ }
+
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ stereo_freqs |= sad->freq;
+ else
+ break;
+ }
+ }
+
+ value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+
+ WREG32(eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
+ uint32_t offset;
+ ssize_t err;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+
+ r600_audio_set_dto(encoder, mode->clock);
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND); /* send null packets when required */
+
+ WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ }
+
+ if (ASIC_IS_DCE32(rdev)) {
+ dce3_2_afmt_write_speaker_allocation(encoder);
+ dce3_2_afmt_write_sad_regs(encoder);
+ }
+
+ WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+ HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
+ HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
+
+ /* TODO: HDMI0_AUDIO_INFO_UPDATE */
+ WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
+
+ WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
+
+ r600_hdmi_audio_workaround(encoder);
+
+ /* enable audio after to setting up hw */
+ r600_audio_enable(rdev, dig->afmt->pin, true);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0f7a51a3694f..f7ece0ff431b 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002,
0x913c, 0x0000000f, 0x0000000a
};
@@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] =
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002
};
@@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] =
static const u32 supersumo_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] =
static const u32 wrestler_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
- 0x5cc, 0xffffffff, 0x00000001,
+ 0x5c4, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
@@ -1301,36 +1301,6 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
/**
- * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
- *
- * @rdev: radeon_device pointer
- * @crtc: crtc to prepare for pageflip on
- *
- * Pre-pageflip callback (evergreen+).
- * Enables the pageflip irq (vblank irq).
- */
-void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* enable the pflip int */
- radeon_irq_kms_pflip_irq_get(rdev, crtc);
-}
-
-/**
- * evergreen_post_page_flip - pos-pageflip callback.
- *
- * @rdev: radeon_device pointer
- * @crtc: crtc to cleanup pageflip on
- *
- * Post-pageflip callback (evergreen+).
- * Disables the pageflip irq (vblank irq).
- */
-void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* disable the pflip int */
- radeon_irq_kms_pflip_irq_put(rdev, crtc);
-}
-
-/**
* evergreen_page_flip - pageflip callback.
*
* @rdev: radeon_device pointer
@@ -1343,7 +1313,7 @@ void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
* double buffered update to take place.
* Returns the current update pending status.
*/
-u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -1375,9 +1345,23 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+}
+
+/**
+ * evergreen_page_flip_pending - check if page flip is still pending
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to check
+ *
+ * Returns the current update pending status.
+ */
+bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/* Return current update_pending status: */
- return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+ return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) &
+ EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);
}
/* get temperature in millidegrees */
@@ -3353,6 +3337,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
disabled_rb_mask &= ~(1 << i);
}
+ for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
+ u32 simd_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
+ simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds;
+ tmp <<= 16;
+ tmp |= simd_disable_bitmap;
+ }
+ rdev->config.evergreen.active_simds = hweight32(~tmp);
+
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
@@ -4810,7 +4806,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -4836,7 +4832,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
@@ -4862,7 +4858,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_flip(rdev, 2);
+ radeon_crtc_handle_vblank(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
@@ -4888,7 +4884,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_flip(rdev, 3);
+ radeon_crtc_handle_vblank(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
@@ -4914,7 +4910,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_flip(rdev, 4);
+ radeon_crtc_handle_vblank(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
@@ -4940,7 +4936,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_flip(rdev, 5);
+ radeon_crtc_handle_vblank(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
@@ -5070,14 +5066,16 @@ restart_ih:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
cayman_vm_decode_fault(rdev, status, addr);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 05b0c95813fd..1ec0e6e83f9f 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -293,10 +293,13 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
uint32_t offset;
ssize_t err;
+ uint32_t val;
+ int bpc = 8;
if (!dig || !dig->afmt)
return;
@@ -306,6 +309,12 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
return;
offset = dig->afmt->offset;
+ /* hdmi deep color mode general control packets setup, if bpc > 8 */
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ bpc = radeon_crtc->bpc;
+ }
+
/* disable audio prior to setting up hw */
if (ASIC_IS_DCE6(rdev)) {
dig->afmt->pin = dce6_audio_get_pin(rdev);
@@ -322,6 +331,35 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+ val = RREG32(HDMI_CONTROL + offset);
+ val &= ~HDMI_DEEP_COLOR_ENABLE;
+ val &= ~HDMI_DEEP_COLOR_DEPTH_MASK;
+
+ switch (bpc) {
+ case 0:
+ case 6:
+ case 8:
+ case 16:
+ default:
+ DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
+ connector->name, bpc);
+ break;
+ case 10:
+ val |= HDMI_DEEP_COLOR_ENABLE;
+ val |= HDMI_DEEP_COLOR_DEPTH(HDMI_30BIT_DEEP_COLOR);
+ DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
+ connector->name);
+ break;
+ case 12:
+ val |= HDMI_DEEP_COLOR_ENABLE;
+ val |= HDMI_DEEP_COLOR_DEPTH(HDMI_36BIT_DEEP_COLOR);
+ DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
+ connector->name);
+ break;
+ }
+
+ WREG32(HDMI_CONTROL + offset, val);
+
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND | /* send null packets when required */
HDMI_GC_SEND | /* send general control packets */
@@ -348,9 +386,13 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
- WREG32(HDMI_ACR_PACKET_CONTROL + offset,
- HDMI_ACR_SOURCE | /* select SW CTS value */
- HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+ if (bpc > 8)
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+ else
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_SOURCE | /* select SW CTS value */
+ HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
evergreen_hdmi_update_ACR(encoder, mode->clock);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index a0f63ff5a5e9..333d143fca2c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -116,6 +116,8 @@
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x6808
+# define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
# define EVERGREEN_GRPH_ENDIAN_NONE 0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index f9c7963b3ee6..b066d6711b8d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -517,10 +517,11 @@
# define HDMI_ERROR_ACK (1 << 8)
# define HDMI_ERROR_MASK (1 << 9)
# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
-# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
+# define HDMI_DEEP_COLOR_DEPTH(x) (((x) & 3) << 28)
# define HDMI_24BIT_DEEP_COLOR 0
# define HDMI_30BIT_DEEP_COLOR 1
# define HDMI_36BIT_DEEP_COLOR 2
+# define HDMI_DEEP_COLOR_DEPTH_MASK (3 << 28)
#define HDMI_STATUS 0x7034
# define HDMI_ACTIVE_AVMUTE (1 << 0)
# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 3f6e817d97ee..9ef8c38f2d66 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2726,7 +2726,7 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_ds = true;
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
- pi->bapm_enable = false;
+ pi->bapm_enable = true;
pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index d246e043421a..5a33ca681867 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1057,6 +1057,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
disabled_rb_mask &= ~(1 << i);
}
+ for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
+ u32 simd_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
+ simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
+ tmp <<= 16;
+ tmp |= simd_disable_bitmap;
+ }
+ rdev->config.cayman.active_simds = hweight32(~tmp);
+
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
@@ -1228,12 +1240,14 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ BANK_SELECT(6) |
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
@@ -1266,6 +1280,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -1343,7 +1358,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 004c931606c4..01fc4888e6fe 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
}
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index d996033c243e..2e12e4d69253 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -128,6 +128,7 @@
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b6c32640df20..1544efcf1c3a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -142,36 +142,6 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
/**
- * r100_pre_page_flip - pre-pageflip callback.
- *
- * @rdev: radeon_device pointer
- * @crtc: crtc to prepare for pageflip on
- *
- * Pre-pageflip callback (r1xx-r4xx).
- * Enables the pageflip irq (vblank irq).
- */
-void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* enable the pflip int */
- radeon_irq_kms_pflip_irq_get(rdev, crtc);
-}
-
-/**
- * r100_post_page_flip - pos-pageflip callback.
- *
- * @rdev: radeon_device pointer
- * @crtc: crtc to cleanup pageflip on
- *
- * Post-pageflip callback (r1xx-r4xx).
- * Disables the pageflip irq (vblank irq).
- */
-void r100_post_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* disable the pflip int */
- radeon_irq_kms_pflip_irq_put(rdev, crtc);
-}
-
-/**
* r100_page_flip - pageflip callback.
*
* @rdev: radeon_device pointer
@@ -182,9 +152,8 @@ void r100_post_page_flip(struct radeon_device *rdev, int crtc)
* During vblank we take the crtc lock and wait for the update_pending
* bit to go high, when it does, we release the lock, and allow the
* double buffered update to take place.
- * Returns the current update pending status.
*/
-u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
@@ -206,8 +175,24 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+}
+
+/**
+ * r100_page_flip_pending - check if page flip is still pending
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to check
+ *
+ * Check if the last pagefilp is still pending (r1xx-r4xx).
+ * Returns the current update pending status.
+ */
+bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+
/* Return current update_pending status: */
- return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+ return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) &
+ RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET);
}
/**
@@ -697,15 +682,11 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0);
}
-int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr)
{
u32 *gtt = rdev->gart.ptr;
-
- if (i < 0 || i > rdev->gart.num_gpu_pages) {
- return -EINVAL;
- }
gtt[i] = cpu_to_le32(lower_32_bits(addr));
- return 0;
}
void r100_pci_gart_fini(struct radeon_device *rdev)
@@ -794,7 +775,7 @@ int r100_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[1]) {
@@ -803,7 +784,7 @@ int r100_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 206caf9700b7..3c21d77a483d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -72,13 +72,11 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
-int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr)
{
void __iomem *ptr = rdev->gart.ptr;
- if (i < 0 || i > rdev->gart.num_gpu_pages) {
- return -EINVAL;
- }
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24) |
R300_PTE_WRITEABLE | R300_PTE_READABLE;
@@ -86,7 +84,6 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
writel(addr, ((void __iomem *)ptr) + (i * 4));
- return 0;
}
int rv370_pcie_gart_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 1dd0d32993d5..136b7bc7cd20 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -402,6 +402,7 @@
* block and vice versa. This applies to GRPH, CUR, etc.
*/
#define AVIVO_D1GRPH_LUT_SEL 0x6108
+# define AVIVO_LUT_10BIT_BYPASS_EN (1 << 8)
#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index bbc189fd3ddc..c66952d4b00c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1958,6 +1958,9 @@ static void r600_gpu_init(struct radeon_device *rdev)
if (tmp < rdev->config.r600.max_simds) {
rdev->config.r600.max_simds = tmp;
}
+ tmp = rdev->config.r600.max_simds -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+ rdev->config.r600.active_simds = tmp;
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
@@ -2724,7 +2727,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
@@ -2763,7 +2766,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
- radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
return true;
@@ -2824,9 +2827,9 @@ int r600_copy_cpdma(struct radeon_device *rdev,
if (size_in_bytes == 0)
tmp |= PACKET3_CP_DMA_CP_SYNC;
radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
- radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(src_offset));
radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(dst_offset));
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
radeon_ring_write(ring, cur_size_in_bytes);
src_offset += cur_size_in_bytes;
@@ -3876,7 +3879,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -3902,7 +3905,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 85a2bb28aed2..26ef8ced6f89 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -133,7 +133,7 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
/*
* update the N and CTS parameters for a given pixel clock rate
*/
-static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -142,21 +142,33 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
- WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
- WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
-
- WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
- WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
-
- WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
- WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
+ WREG32_P(HDMI0_ACR_32_0 + offset,
+ HDMI0_ACR_CTS_32(acr.cts_32khz),
+ ~HDMI0_ACR_CTS_32_MASK);
+ WREG32_P(HDMI0_ACR_32_1 + offset,
+ HDMI0_ACR_N_32(acr.n_32khz),
+ ~HDMI0_ACR_N_32_MASK);
+
+ WREG32_P(HDMI0_ACR_44_0 + offset,
+ HDMI0_ACR_CTS_44(acr.cts_44_1khz),
+ ~HDMI0_ACR_CTS_44_MASK);
+ WREG32_P(HDMI0_ACR_44_1 + offset,
+ HDMI0_ACR_N_44(acr.n_44_1khz),
+ ~HDMI0_ACR_N_44_MASK);
+
+ WREG32_P(HDMI0_ACR_48_0 + offset,
+ HDMI0_ACR_CTS_48(acr.cts_48khz),
+ ~HDMI0_ACR_CTS_48_MASK);
+ WREG32_P(HDMI0_ACR_48_1 + offset,
+ HDMI0_ACR_N_48(acr.n_48khz),
+ ~HDMI0_ACR_N_48_MASK);
}
/*
* build a HDMI Video Info Frame
*/
-static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
+void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
+ size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -231,7 +243,7 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
/*
* write the audio workaround status to the hardware
*/
-static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -250,7 +262,7 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
value, ~HDMI0_AUDIO_TEST_EN);
}
-static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -320,121 +332,6 @@ static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
}
}
-static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
-{
- struct radeon_device *rdev = encoder->dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
- u32 tmp;
- u8 *sadb;
- int sad_count;
-
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- return;
- }
-
- /* program the speaker allocation */
- tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
- tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
- /* set HDMI mode */
- tmp |= HDMI_CONNECTION;
- if (sad_count)
- tmp |= SPEAKER_ALLOCATION(sadb[0]);
- else
- tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
-}
-
-static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
-{
- struct radeon_device *rdev = encoder->dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
-
- static const u16 eld_reg_to_type[][2] = {
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- };
-
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- return;
- }
- BUG_ON(!sads);
-
- for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 value = 0;
- u8 stereo_freqs = 0;
- int max_channels = -1;
- int j;
-
- for (j = 0; j < sad_count; j++) {
- struct cea_sad *sad = &sads[j];
-
- if (sad->format == eld_reg_to_type[i][1]) {
- if (sad->channels > max_channels) {
- value = MAX_CHANNELS(sad->channels) |
- DESCRIPTOR_BYTE_2(sad->byte2) |
- SUPPORTED_FREQUENCIES(sad->freq);
- max_channels = sad->channels;
- }
-
- if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- stereo_freqs |= sad->freq;
- else
- break;
- }
- }
-
- value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
-
- WREG32(eld_reg_to_type[i][0], value);
- }
-
- kfree(sads);
-}
-
/*
* update the info frames with the data from the current display mode
*/
@@ -447,6 +344,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
uint32_t offset;
+ uint32_t acr_ctl;
ssize_t err;
if (!dig || !dig->afmt)
@@ -463,52 +361,44 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
r600_audio_set_dto(encoder, mode->clock);
- WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
- HDMI0_NULL_SEND); /* send null packets when required */
-
- WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
-
- if (ASIC_IS_DCE32(rdev)) {
- WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
- HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
- AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
- } else {
- WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
- HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
- HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
- HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
- }
-
- if (ASIC_IS_DCE32(rdev)) {
- dce3_2_afmt_write_speaker_allocation(encoder);
- dce3_2_afmt_write_sad_regs(encoder);
- }
-
- WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
- HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
- HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
-
- WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
- HDMI0_NULL_SEND | /* send null packets when required */
- HDMI0_GC_SEND | /* send general control packets */
- HDMI0_GC_CONT); /* send general control packets every frame */
-
- /* TODO: HDMI0_AUDIO_INFO_UPDATE */
- WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
- HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
-
- WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
- HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
- HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-
- WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
+ WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */
+ ~(HDMI0_AUDIO_SAMPLE_SEND |
+ HDMI0_AUDIO_DELAY_EN_MASK |
+ HDMI0_AUDIO_PACKETS_PER_LINE_MASK |
+ HDMI0_60958_CS_UPDATE));
+
+ /* DCE 3.0 uses register that's normally for CRC_CONTROL */
+ acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL :
+ HDMI0_ACR_PACKET_CONTROL;
+ WREG32_P(acr_ctl + offset,
+ HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
+ HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */
+ ~(HDMI0_ACR_SOURCE |
+ HDMI0_ACR_AUTO_SEND));
+
+ WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
+
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+ WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */
+ ~(HDMI0_AVI_INFO_LINE_MASK |
+ HDMI0_AUDIO_INFO_LINE_MASK));
+
+ WREG32_AND(HDMI0_GC + offset,
+ ~HDMI0_GC_AVMUTE); /* unset HDMI0_GC_AVMUTE */
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@@ -523,22 +413,45 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
}
r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+
+ /* fglrx duplicates INFOFRAME_CONTROL0 & INFOFRAME_CONTROL1 ops here */
+
+ WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset,
+ ~(HDMI0_GENERIC0_SEND |
+ HDMI0_GENERIC0_CONT |
+ HDMI0_GENERIC0_UPDATE |
+ HDMI0_GENERIC1_SEND |
+ HDMI0_GENERIC1_CONT |
+ HDMI0_GENERIC0_LINE_MASK |
+ HDMI0_GENERIC1_LINE_MASK));
+
r600_hdmi_update_ACR(encoder, mode->clock);
+ WREG32_P(HDMI0_60958_0 + offset,
+ HDMI0_60958_CS_CHANNEL_NUMBER_L(1),
+ ~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK |
+ HDMI0_60958_CS_CLOCK_ACCURACY_MASK));
+
+ WREG32_P(HDMI0_60958_1 + offset,
+ HDMI0_60958_CS_CHANNEL_NUMBER_R(2),
+ ~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK);
+
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
- r600_hdmi_audio_workaround(encoder);
-
/* enable audio after to setting up hw */
r600_audio_enable(rdev, dig->afmt->pin, true);
}
-/*
- * update settings with current parameters from audio engine
+/**
+ * r600_hdmi_update_audio_settings - Update audio infoframe
+ *
+ * @encoder: drm encoder
+ *
+ * Gets info about current audio stream and updates audio infoframe.
*/
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
@@ -550,7 +463,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
struct hdmi_audio_infoframe frame;
uint32_t offset;
- uint32_t iec;
+ uint32_t value;
ssize_t err;
if (!dig->afmt || !dig->afmt->enabled)
@@ -563,60 +476,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
(int)audio.status_bits, (int)audio.category_code);
- iec = 0;
- if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
- iec |= 1 << 0;
- if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
- iec |= 1 << 1;
- if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
- iec |= 1 << 2;
- if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
- iec |= 1 << 3;
-
- iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
-
- switch (audio.rate) {
- case 32000:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
- break;
- case 44100:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
- break;
- case 48000:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
- break;
- case 88200:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
- break;
- case 96000:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
- break;
- case 176400:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
- break;
- case 192000:
- iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
- break;
- }
-
- WREG32(HDMI0_60958_0 + offset, iec);
-
- iec = 0;
- switch (audio.bits_per_sample) {
- case 16:
- iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
- break;
- case 20:
- iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
- break;
- case 24:
- iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
- break;
- }
- if (audio.status_bits & AUDIO_STATUS_V)
- iec |= 0x5 << 16;
- WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
-
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
DRM_ERROR("failed to setup audio infoframe\n");
@@ -631,8 +490,22 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
return;
}
+ value = RREG32(HDMI0_AUDIO_PACKET_CONTROL + offset);
+ if (value & HDMI0_AUDIO_TEST_EN)
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ value & ~HDMI0_AUDIO_TEST_EN);
+
+ WREG32_OR(HDMI0_CONTROL + offset,
+ HDMI0_ERROR_ACK);
+
+ WREG32_AND(HDMI0_INFOFRAME_CONTROL0 + offset,
+ ~HDMI0_AUDIO_INFO_SOURCE);
+
r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
- r600_hdmi_audio_workaround(encoder);
+
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AUDIO_INFO_CONT |
+ HDMI0_AUDIO_INFO_UPDATE);
}
/*
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 37455f65107f..f94e7a9afe75 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1029,15 +1029,18 @@
#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI0_AUDIO_DELAY_EN_MASK (3 << 4)
# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
# define HDMI0_AUDIO_TEST_EN (1 << 12)
# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+# define HDMI0_AUDIO_PACKETS_PER_LINE_MASK (0x1f << 16)
# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
# define HDMI0_60958_CS_UPDATE (1 << 26)
# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
#define HDMI0_AUDIO_CRC_CONTROL 0x740c
# define HDMI0_AUDIO_CRC_EN (1 << 0)
+#define DCE3_HDMI0_ACR_PACKET_CONTROL 0x740c
#define HDMI0_VBI_PACKET_CONTROL 0x7410
# define HDMI0_NULL_SEND (1 << 0)
# define HDMI0_GC_SEND (1 << 4)
@@ -1054,7 +1057,9 @@
# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
#define HDMI0_INFOFRAME_CONTROL1 0x7418
# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI0_AVI_INFO_LINE_MASK (0x3f << 0)
# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI0_AUDIO_INFO_LINE_MASK (0x3f << 8)
# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
# define HDMI0_GENERIC0_SEND (1 << 0)
@@ -1063,7 +1068,9 @@
# define HDMI0_GENERIC1_SEND (1 << 4)
# define HDMI0_GENERIC1_CONT (1 << 5)
# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI0_GENERIC0_LINE_MASK (0x3f << 16)
# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+# define HDMI0_GENERIC1_LINE_MASK (0x3f << 24)
#define HDMI0_GC 0x7428
# define HDMI0_GC_AVMUTE (1 << 0)
#define HDMI0_AVI_INFO0 0x7454
@@ -1119,16 +1126,22 @@
#define HDMI0_GENERIC1_6 0x74a8
#define HDMI0_ACR_32_0 0x74ac
# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+# define HDMI0_ACR_CTS_32_MASK (0xfffff << 12)
#define HDMI0_ACR_32_1 0x74b0
# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
+# define HDMI0_ACR_N_32_MASK (0xfffff << 0)
#define HDMI0_ACR_44_0 0x74b4
# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+# define HDMI0_ACR_CTS_44_MASK (0xfffff << 12)
#define HDMI0_ACR_44_1 0x74b8
# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
+# define HDMI0_ACR_N_44_MASK (0xfffff << 0)
#define HDMI0_ACR_48_0 0x74bc
# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+# define HDMI0_ACR_CTS_48_MASK (0xfffff << 12)
#define HDMI0_ACR_48_1 0x74c0
# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
+# define HDMI0_ACR_N_48_MASK (0xfffff << 0)
#define HDMI0_ACR_STATUS_0 0x74c4
#define HDMI0_ACR_STATUS_1 0x74c8
#define HDMI0_AUDIO_INFO0 0x74cc
@@ -1148,14 +1161,17 @@
# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK (0xf << 20)
# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+# define HDMI0_60958_CS_CLOCK_ACCURACY_MASK (3 << 28)
#define HDMI0_60958_1 0x74d8
# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK (0xf << 20)
#define HDMI0_ACR_PACKET_CONTROL 0x74dc
# define HDMI0_ACR_SEND (1 << 0)
# define HDMI0_ACR_CONT (1 << 1)
@@ -1166,6 +1182,7 @@
# define HDMI0_ACR_48 3
# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI0_ACR_AUTO_SEND (1 << 12)
+#define DCE3_HDMI0_AUDIO_CRC_CONTROL 0x74dc
#define HDMI0_RAMP_CONTROL0 0x74e0
# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL1 0x74e4
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8149e7cf4303..29d9cc04c04e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -100,6 +100,9 @@ extern int radeon_dpm;
extern int radeon_aspm;
extern int radeon_runtime_pm;
extern int radeon_hard_reset;
+extern int radeon_vm_size;
+extern int radeon_vm_block_size;
+extern int radeon_deep_color;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -676,14 +679,16 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
* IRQS.
*/
-struct radeon_unpin_work {
- struct work_struct work;
- struct radeon_device *rdev;
- int crtc_id;
- struct radeon_fence *fence;
+struct radeon_flip_work {
+ struct work_struct flip_work;
+ struct work_struct unpin_work;
+ struct radeon_device *rdev;
+ int crtc_id;
+ struct drm_framebuffer *fb;
struct drm_pending_vblank_event *event;
- struct radeon_bo *old_rbo;
- u64 new_crtc_base;
+ struct radeon_bo *old_rbo;
+ struct radeon_bo *new_rbo;
+ struct radeon_fence *fence;
};
struct r500_irq_stat_regs {
@@ -745,10 +750,6 @@ union radeon_irq_stat_regs {
struct cik_irq_stat_regs cik;
};
-#define RADEON_MAX_HPD_PINS 7
-#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_AFMT_BLOCKS 7
-
struct radeon_irq {
bool installed;
spinlock_t lock;
@@ -835,13 +836,8 @@ struct radeon_mec {
/* maximum number of VMIDs */
#define RADEON_NUM_VM 16
-/* defines number of bits in page table versus page directory,
- * a page is 4KB so we have 12 bits offset, 9 bits in the page
- * table and the remaining 19 bits are in the page directory */
-#define RADEON_VM_BLOCK_SIZE 9
-
/* number of entries in page table */
-#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
+#define RADEON_VM_PTE_COUNT (1 << radeon_vm_block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define RADEON_VM_PTB_ALIGN_SIZE 32768
@@ -854,6 +850,15 @@ struct radeon_mec {
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
+/* PTE (Page Table Entry) fragment field for different page sizes */
+#define R600_PTE_FRAG_4KB (0 << 7)
+#define R600_PTE_FRAG_64KB (4 << 7)
+#define R600_PTE_FRAG_256KB (6 << 7)
+
+/* flags used for GART page table entries on R600+ */
+#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \
+ | R600_PTE_READABLE | R600_PTE_WRITEABLE)
+
struct radeon_vm_pt {
struct radeon_bo *bo;
uint64_t addr;
@@ -986,8 +991,8 @@ struct radeon_cs_reloc {
struct radeon_bo *robj;
struct ttm_validate_buffer tv;
uint64_t gpu_offset;
- unsigned domain;
- unsigned alt_domain;
+ unsigned prefered_domains;
+ unsigned allowed_domains;
uint32_t tiling_flags;
uint32_t handle;
};
@@ -1771,7 +1776,8 @@ struct radeon_asic {
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
- int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
+ void (*set_page)(struct radeon_device *rdev, unsigned i,
+ uint64_t addr);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
@@ -1883,9 +1889,8 @@ struct radeon_asic {
} dpm;
/* pageflipping */
struct {
- void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
- u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
- void (*post_page_flip)(struct radeon_device *rdev, int crtc);
+ void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
} pflip;
};
@@ -1924,6 +1929,7 @@ struct r600_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
+ unsigned active_simds;
};
struct rv770_asic {
@@ -1949,6 +1955,7 @@ struct rv770_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
+ unsigned active_simds;
};
struct evergreen_asic {
@@ -1975,6 +1982,7 @@ struct evergreen_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
+ unsigned active_simds;
};
struct cayman_asic {
@@ -2013,6 +2021,7 @@ struct cayman_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
+ unsigned active_simds;
};
struct si_asic {
@@ -2043,6 +2052,7 @@ struct si_asic {
unsigned tile_config;
uint32_t tile_mode_array[32];
+ uint32_t active_cus;
};
struct cik_asic {
@@ -2074,6 +2084,7 @@ struct cik_asic {
unsigned tile_config;
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
+ uint32_t active_cus;
};
union radeon_asic_config {
@@ -2745,9 +2756,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
-#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
-#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 42433344cb1b..a9297b2c3524 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -117,9 +117,6 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
/* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
{ PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
PCI_VENDOR_ID_SONY, 0x8175, 1},
- /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
- { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
- PCI_VENDOR_ID_ATI, 0x0152, 2},
{ 0, 0, 0, 0, 0, 0, 0 },
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e5f0177bea1e..34b9aa9e3c06 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -248,9 +248,8 @@ static struct radeon_asic r100_asic = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -315,9 +314,8 @@ static struct radeon_asic r200_asic = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -396,9 +394,8 @@ static struct radeon_asic r300_asic = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -463,9 +460,8 @@ static struct radeon_asic r300_asic_pcie = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -530,9 +526,8 @@ static struct radeon_asic r420_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -597,9 +592,8 @@ static struct radeon_asic rs400_asic = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .page_flip_pending = &r100_page_flip_pending,
},
};
@@ -666,9 +660,8 @@ static struct radeon_asic rs600_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -735,9 +728,8 @@ static struct radeon_asic rs690_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -802,9 +794,8 @@ static struct radeon_asic rv515_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -869,9 +860,8 @@ static struct radeon_asic r520_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -968,9 +958,8 @@ static struct radeon_asic r600_asic = {
.get_temperature = &rv6xx_get_temp,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -1059,9 +1048,8 @@ static struct radeon_asic rv6xx_asic = {
.force_performance_level = &rv6xx_dpm_force_performance_level,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -1150,9 +1138,8 @@ static struct radeon_asic rs780_asic = {
.force_performance_level = &rs780_dpm_force_performance_level,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rs600_page_flip_pending,
},
};
@@ -1201,7 +1188,7 @@ static struct radeon_asic rv770_asic = {
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &r600_hdmi_enable,
- .hdmi_setmode = &r600_hdmi_setmode,
+ .hdmi_setmode = &dce3_1_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1256,9 +1243,8 @@ static struct radeon_asic rv770_asic = {
.vblank_too_short = &rv770_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rv770_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .page_flip_pending = &rv770_page_flip_pending,
},
};
@@ -1375,9 +1361,8 @@ static struct radeon_asic evergreen_asic = {
.vblank_too_short = &cypress_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1467,9 +1452,8 @@ static struct radeon_asic sumo_asic = {
.force_performance_level = &sumo_dpm_force_performance_level,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1560,9 +1544,8 @@ static struct radeon_asic btc_asic = {
.vblank_too_short = &btc_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1704,9 +1687,8 @@ static struct radeon_asic cayman_asic = {
.vblank_too_short = &ni_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1805,9 +1787,8 @@ static struct radeon_asic trinity_asic = {
.enable_bapm = &trinity_dpm_enable_bapm,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -1936,9 +1917,8 @@ static struct radeon_asic si_asic = {
.vblank_too_short = &ni_dpm_vblank_too_short,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -2049,8 +2029,8 @@ static struct radeon_asic ci_asic = {
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &cik_copy_cpdma,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &cik_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -2099,9 +2079,8 @@ static struct radeon_asic ci_asic = {
.powergate_uvd = &ci_dpm_powergate_uvd,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
@@ -2204,9 +2183,8 @@ static struct radeon_asic kv_asic = {
.enable_bapm = &kv_dpm_enable_bapm,
},
.pflip = {
- .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .page_flip_pending = &evergreen_page_flip_pending,
},
};
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d55a3a39e82..01e7c0ad8f01 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,7 +67,8 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
-int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@@ -135,9 +136,9 @@ extern void r100_pm_prepare(struct radeon_device *rdev);
extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
-extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
-extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
-extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void r100_page_flip(struct radeon_device *rdev, int crtc,
+ u64 crtc_base);
+extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc);
extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
@@ -171,7 +172,8 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
-extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -206,7 +208,8 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
-int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -229,7 +232,8 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
-int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
@@ -241,9 +245,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev);
-extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
-extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
-extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void rs600_page_flip(struct radeon_device *rdev, int crtc,
+ u64 crtc_base);
+extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -387,6 +391,11 @@ void r600_rlc_stop(struct radeon_device *rdev);
int r600_audio_init(struct radeon_device *rdev);
struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
+void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
+ size_t size);
+void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
+void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
@@ -447,7 +456,8 @@ void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
void rv770_pm_misc(struct radeon_device *rdev);
-u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
@@ -458,6 +468,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
u32 rv770_get_xclk(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
+/* hdmi */
+void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
/* rv7xx pm */
int rv770_dpm_init(struct radeon_device *rdev);
int rv770_dpm_enable(struct radeon_device *rdev);
@@ -513,9 +525,9 @@ extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void btc_pm_init_profile(struct radeon_device *rdev);
int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
-extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
-extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void evergreen_page_flip(struct radeon_device *rdev, int crtc,
+ u64 crtc_base);
+extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 30844814c25a..173f378428a9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1227,11 +1227,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
if (rdev->clock.default_dispclk == 0) {
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE6(rdev))
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ else if (ASIC_IS_DCE5(rdev))
rdev->clock.default_dispclk = 54000; /* 540 Mhz */
else
rdev->clock.default_dispclk = 60000; /* 600 Mhz */
}
+ /* set a reasonable default for DP */
+ if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) {
+ DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+ rdev->clock.default_dispclk / 100);
+ rdev->clock.default_dispclk = 60000;
+ }
rdev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
rdev->clock.current_dispclk = rdev->clock.default_dispclk;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 9ab30976287d..6a03624fadaa 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -626,7 +626,7 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
vhdr->DeviceID != rdev->pdev->device) {
DRM_INFO("ACPI VFCT table is not for this card\n");
goto out_unmap;
- };
+ }
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
DRM_ERROR("ACPI VFCT image truncated\n");
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ea50e0ae7bf7..44831197e82e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -48,6 +48,7 @@ void radeon_connector_hotplug(struct drm_connector *connector)
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
/* if the connector is already off, don't turn it back on */
+ /* FIXME: This access isn't protected by any locks. */
if (connector->dpms != DRM_MODE_DPMS_ON)
return;
@@ -100,6 +101,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int bpc = 8;
+ int mode_clock, max_tmds_clock;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
@@ -145,6 +147,64 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
}
break;
}
+
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* hdmi deep color only implemented on DCE4+ */
+ if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
+ DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
+ connector->name, bpc);
+ bpc = 8;
+ }
+
+ /*
+ * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
+ * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
+ * 12 bpc is always supported on hdmi deep color sinks, as this is
+ * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum.
+ */
+ if (bpc > 12) {
+ DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n",
+ connector->name, bpc);
+ bpc = 12;
+ }
+
+ /* Any defined maximum tmds clock limit we must not exceed? */
+ if (connector->max_tmds_clock > 0) {
+ /* mode_clock is clock in kHz for mode to be modeset on this connector */
+ mode_clock = radeon_connector->pixelclock_for_modeset;
+
+ /* Maximum allowable input clock in kHz */
+ max_tmds_clock = connector->max_tmds_clock * 1000;
+
+ DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
+ connector->name, mode_clock, max_tmds_clock);
+
+ /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
+ if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
+ if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
+ (mode_clock * 5/4 <= max_tmds_clock))
+ bpc = 10;
+ else
+ bpc = 8;
+
+ DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n",
+ connector->name, bpc);
+ }
+
+ if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) {
+ bpc = 8;
+ DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
+ connector->name, bpc);
+ }
+ }
+ }
+
+ if ((radeon_deep_color == 0) && (bpc > 8))
+ bpc = 8;
+
+ DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
+ connector->name, connector->display_info.bpc, bpc);
+
return bpc;
}
@@ -260,13 +320,17 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
continue;
if (priority == true) {
- DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
- DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
+ conflict->name);
+ DRM_DEBUG_KMS("in favor of %s\n",
+ connector->name);
conflict->status = connector_status_disconnected;
radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
} else {
- DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
- DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
+ DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n",
+ connector->name);
+ DRM_DEBUG_KMS("in favor of %s\n",
+ conflict->name);
current_status = connector_status_disconnected;
}
break;
@@ -787,7 +851,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
- drm_get_connector_name(connector));
+ connector->name);
ret = connector_status_connected;
} else {
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
@@ -1010,12 +1074,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
- drm_get_connector_name(connector));
+ connector->name);
/* rs690 seems to have a problem with connectors not existing and always
* return a block of 0's. If we see this just stop polling on this output */
if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
ret = connector_status_disconnected;
- DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
+ DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
+ connector->name);
radeon_connector->ddc_bus = NULL;
} else {
ret = connector_status_connected;
@@ -1226,17 +1291,15 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
- if (ASIC_IS_DCE6(rdev)) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else
+ else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* HDMI 1.3+ supports max clock of 340 Mhz */
+ if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
- } else
+ else
+ return MODE_OK;
+ } else {
return MODE_CLOCK_HIGH;
+ }
}
/* check against the max pixel clock */
@@ -1387,7 +1450,7 @@ bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE5(rdev) &&
- (rdev->clock.dp_extclk >= 53900) &&
+ (rdev->clock.default_dispclk >= 53900) &&
radeon_connector_encoder_is_hbr2(connector)) {
return true;
}
@@ -1487,6 +1550,8 @@ out:
static int radeon_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
@@ -1517,14 +1582,23 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
}
- return MODE_OK;
} else {
if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return radeon_dp_mode_valid_helper(connector, mode);
- else
- return MODE_OK;
+ } else {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* HDMI 1.3+ supports max clock of 340 Mhz */
+ if (mode->clock > 340000)
+ return MODE_CLOCK_HIGH;
+ } else {
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ }
+ }
}
+
+ return MODE_OK;
}
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41ecf8a60611..71a143461478 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -140,10 +140,10 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
/* TODO: is this still needed for NI+ ? */
- p->relocs[i].domain =
+ p->relocs[i].prefered_domains =
RADEON_GEM_DOMAIN_VRAM;
- p->relocs[i].alt_domain =
+ p->relocs[i].allowed_domains =
RADEON_GEM_DOMAIN_VRAM;
/* prioritize this over any other relocation */
@@ -158,10 +158,10 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return -EINVAL;
}
- p->relocs[i].domain = domain;
+ p->relocs[i].prefered_domains = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT;
- p->relocs[i].alt_domain = domain;
+ p->relocs[i].allowed_domains = domain;
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2cd144c378d6..03686fab842d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1052,6 +1052,43 @@ static void radeon_check_arguments(struct radeon_device *rdev)
radeon_agpmode = 0;
break;
}
+
+ if (!radeon_check_pot_argument(radeon_vm_size)) {
+ dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
+ radeon_vm_size);
+ radeon_vm_size = 4096;
+ }
+
+ if (radeon_vm_size < 4) {
+ dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+ radeon_vm_size);
+ radeon_vm_size = 4096;
+ }
+
+ /*
+ * Max GPUVM size for Cayman, SI and CI are 40 bits.
+ */
+ if (radeon_vm_size > 1024*1024) {
+ dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+ radeon_vm_size);
+ radeon_vm_size = 4096;
+ }
+
+ /* defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
+ * page table and the remaining bits are in the page directory */
+ if (radeon_vm_block_size < 9) {
+ dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+ radeon_vm_block_size);
+ radeon_vm_block_size = 9;
+ }
+
+ if (radeon_vm_block_size > 24 ||
+ radeon_vm_size < (1ull << radeon_vm_block_size)) {
+ dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+ radeon_vm_block_size);
+ radeon_vm_block_size = 9;
+ }
}
/**
@@ -1126,12 +1163,13 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- bool can_switch;
- spin_lock(&dev->count_lock);
- can_switch = (dev->open_count == 0);
- spin_unlock(&dev->count_lock);
- return can_switch;
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return dev->open_count == 0;
}
static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
@@ -1196,17 +1234,16 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
+ radeon_check_arguments(rdev);
/* Adjust VM size here.
- * Currently set to 4GB ((1 << 20) 4k pages).
- * Max GPUVM size for cayman and SI is 40 bits.
+ * Max GPUVM size for cayman+ is 40 bits.
*/
- rdev->vm_manager.max_pfn = 1 << 20;
+ rdev->vm_manager.max_pfn = radeon_vm_size << 8;
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r)
return r;
- radeon_check_arguments(rdev);
/* all of the newer IGP chips have an internal gart
* However some rs4xx report as AGP, so remove that here.
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 356b733caafe..13896edcf0b6 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -66,7 +66,8 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
(radeon_crtc->lut_b[i] << 0));
}
- WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
+ /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
+ WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1);
}
static void dce4_crtc_load_lut(struct drm_crtc *crtc)
@@ -249,16 +250,21 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
drm_crtc_cleanup(crtc);
+ destroy_workqueue(radeon_crtc->flip_queue);
kfree(radeon_crtc);
}
-/*
- * Handle unpin events outside the interrupt handler proper.
+/**
+ * radeon_unpin_work_func - unpin old buffer object
+ *
+ * @__work - kernel work item
+ *
+ * Unpin the old frame buffer object outside of the interrupt handler
*/
static void radeon_unpin_work_func(struct work_struct *__work)
{
- struct radeon_unpin_work *work =
- container_of(__work, struct radeon_unpin_work, work);
+ struct radeon_flip_work *work =
+ container_of(__work, struct radeon_flip_work, unpin_work);
int r;
/* unpin of the old buffer */
@@ -276,10 +282,9 @@ static void radeon_unpin_work_func(struct work_struct *__work)
kfree(work);
}
-void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
- struct radeon_unpin_work *work;
unsigned long flags;
u32 update_pending;
int vpos, hpos;
@@ -289,24 +294,16 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
return;
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
- work = radeon_crtc->unpin_work;
- if (work == NULL ||
- (work->fence && !radeon_fence_signaled(work->fence))) {
+ if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+ DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+ "RADEON_FLIP_SUBMITTED(%d)\n",
+ radeon_crtc->flip_status,
+ RADEON_FLIP_SUBMITTED);
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
- /* New pageflip, or just completion of a previous one? */
- if (!radeon_crtc->deferred_flip_completion) {
- /* do the flip (mmio) */
- update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
- } else {
- /* This is just a completion of a flip queued in crtc
- * at last invocation. Make sure we go directly to
- * completion routine.
- */
- update_pending = 0;
- radeon_crtc->deferred_flip_completion = 0;
- }
+
+ update_pending = radeon_page_flip_pending(rdev, crtc_id);
/* Has the pageflip already completed in crtc, or is it certain
* to complete in this vblank?
@@ -324,19 +321,43 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
*/
update_pending = 0;
}
- if (update_pending) {
- /* crtc didn't flip in this target vblank interval,
- * but flip is pending in crtc. It will complete it
- * in next vblank interval, so complete the flip at
- * next vblank irq.
- */
- radeon_crtc->deferred_flip_completion = 1;
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ if (!update_pending)
+ radeon_crtc_handle_flip(rdev, crtc_id);
+}
+
+/**
+ * radeon_crtc_handle_flip - page flip completed
+ *
+ * @rdev: radeon device pointer
+ * @crtc_id: crtc number this event is for
+ *
+ * Called when we are sure that a page flip for this crtc is completed.
+ */
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ struct radeon_flip_work *work;
+ unsigned long flags;
+
+ /* this can happen at init */
+ if (radeon_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ work = radeon_crtc->flip_work;
+ if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+ DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+ "RADEON_FLIP_SUBMITTED(%d)\n",
+ radeon_crtc->flip_status,
+ RADEON_FLIP_SUBMITTED);
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
- /* Pageflip (will be) certainly completed in this vblank. Clean up. */
- radeon_crtc->unpin_work = NULL;
+ /* Pageflip completed. Clean up. */
+ radeon_crtc->flip_status = RADEON_FLIP_NONE;
+ radeon_crtc->flip_work = NULL;
/* wakeup userspace */
if (work->event)
@@ -346,84 +367,70 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
radeon_fence_unref(&work->fence);
- radeon_post_page_flip(work->rdev, work->crtc_id);
- schedule_work(&work->work);
+ radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
+ queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+/**
+ * radeon_flip_work_func - page flip framebuffer
+ *
+ * @work - kernel work item
+ *
+ * Wait for the buffer object to become idle and do the actual page flip
+ */
+static void radeon_flip_work_func(struct work_struct *__work)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_framebuffer *old_radeon_fb;
- struct radeon_framebuffer *new_radeon_fb;
- struct drm_gem_object *obj;
- struct radeon_bo *rbo;
- struct radeon_unpin_work *work;
- unsigned long flags;
- u32 tiling_flags, pitch_pixels;
- u64 base;
- int r;
+ struct radeon_flip_work *work =
+ container_of(__work, struct radeon_flip_work, flip_work);
+ struct radeon_device *rdev = work->rdev;
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL)
- return -ENOMEM;
+ struct drm_crtc *crtc = &radeon_crtc->base;
+ struct drm_framebuffer *fb = work->fb;
- work->event = event;
- work->rdev = rdev;
- work->crtc_id = radeon_crtc->crtc_id;
- old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- new_radeon_fb = to_radeon_framebuffer(fb);
- /* schedule unpin of the old buffer */
- obj = old_radeon_fb->obj;
- /* take a reference to the old object */
- drm_gem_object_reference(obj);
- rbo = gem_to_radeon_bo(obj);
- work->old_rbo = rbo;
- obj = new_radeon_fb->obj;
- rbo = gem_to_radeon_bo(obj);
+ uint32_t tiling_flags, pitch_pixels;
+ uint64_t base;
- spin_lock(&rbo->tbo.bdev->fence_lock);
- if (rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
- spin_unlock(&rbo->tbo.bdev->fence_lock);
+ unsigned long flags;
+ int r;
- INIT_WORK(&work->work, radeon_unpin_work_func);
+ down_read(&rdev->exclusive_lock);
+ while (work->fence) {
+ r = radeon_fence_wait(work->fence, false);
+ if (r == -EDEADLK) {
+ up_read(&rdev->exclusive_lock);
+ r = radeon_gpu_reset(rdev);
+ down_read(&rdev->exclusive_lock);
+ }
- /* We borrow the event spin lock for protecting unpin_work */
- spin_lock_irqsave(&dev->event_lock, flags);
- if (radeon_crtc->unpin_work) {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- r = -EBUSY;
- goto unlock_free;
+ if (r) {
+ DRM_ERROR("failed to wait on page flip fence (%d)!\n",
+ r);
+ goto cleanup;
+ } else
+ radeon_fence_unref(&work->fence);
}
- radeon_crtc->unpin_work = work;
- radeon_crtc->deferred_flip_completion = 0;
- spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
- work->old_rbo, rbo);
+ work->old_rbo, work->new_rbo);
- r = radeon_bo_reserve(rbo, false);
+ r = radeon_bo_reserve(work->new_rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
- goto pflip_cleanup;
+ goto cleanup;
}
/* Only 27 bit offset for legacy CRTC */
- r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) {
- radeon_bo_unreserve(rbo);
+ radeon_bo_unreserve(work->new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
- goto pflip_cleanup;
+ goto cleanup;
}
- radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
- radeon_bo_unreserve(rbo);
+ radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(work->new_rbo);
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
@@ -461,44 +468,109 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
base &= ~7;
}
- spin_lock_irqsave(&dev->event_lock, flags);
- work->new_crtc_base = base;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- /* update crtc fb */
- crtc->primary->fb = fb;
-
- r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+ r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
if (r) {
DRM_ERROR("failed to get vblank before flip\n");
- goto pflip_cleanup1;
+ goto pflip_cleanup;
}
+ /* We borrow the event spin lock for protecting flip_work */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
/* set the proper interrupt */
- radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
- return 0;
+ /* do the flip (mmio) */
+ radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
+
+ radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ up_read(&rdev->exclusive_lock);
+
+ return;
-pflip_cleanup1:
- if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
+pflip_cleanup:
+ if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
- goto pflip_cleanup;
+ goto cleanup;
}
- if (unlikely(radeon_bo_unpin(rbo) != 0)) {
+ if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
- radeon_bo_unreserve(rbo);
+ radeon_bo_unreserve(work->new_rbo);
-pflip_cleanup:
- spin_lock_irqsave(&dev->event_lock, flags);
- radeon_crtc->unpin_work = NULL;
-unlock_free:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
+cleanup:
+ drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
+ up_read(&rdev->exclusive_lock);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_flip_work *work;
+ unsigned long flags;
- return r;
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ INIT_WORK(&work->flip_work, radeon_flip_work_func);
+ INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ work->fb = fb;
+ work->event = event;
+
+ /* schedule unpin of the old buffer */
+ old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ obj = old_radeon_fb->obj;
+
+ /* take a reference to the old object */
+ drm_gem_object_reference(obj);
+ work->old_rbo = gem_to_radeon_bo(obj);
+
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ obj = new_radeon_fb->obj;
+ work->new_rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
+ if (work->new_rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
+ spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
+
+ /* We borrow the event spin lock for protecting flip_work */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ radeon_fence_unref(&work->fence);
+ kfree(work);
+ return -EBUSY;
+ }
+ radeon_crtc->flip_status = RADEON_FLIP_PENDING;
+ radeon_crtc->flip_work = work;
+
+ /* update crtc fb */
+ crtc->primary->fb = fb;
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ queue_work(radeon_crtc->flip_queue, &work->flip_work);
+
+ return 0;
}
static int
@@ -568,6 +640,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
+ radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc");
rdev->mode_info.crtcs[index] = radeon_crtc;
if (rdev->family >= CHIP_BONAIRE) {
@@ -661,7 +734,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
- DRM_INFO(" %s\n", drm_get_connector_name(connector));
+ DRM_INFO(" %s\n", connector->name);
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c00a2f585185..cb1421369e3a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -81,9 +81,10 @@
* 2.37.0 - allow GS ring setup on r6xx/r7xx
* 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
+ * 2.39.0 - Add INFO query for number of active CUs
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 38
+#define KMS_DRIVER_MINOR 39
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -172,6 +173,9 @@ int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
+int radeon_vm_size = 4096;
+int radeon_vm_block_size = 9;
+int radeon_deep_color = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -239,6 +243,15 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
module_param_named(hard_reset, radeon_hard_reset, int, 0444);
+MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+module_param_named(vm_size, radeon_vm_size, int, 0444);
+
+MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
+module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
+
+MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
+module_param_named(deep_color, radeon_deep_color, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
@@ -519,7 +532,6 @@ static struct drm_driver kms_driver = {
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
DRIVER_PRIME | DRIVER_RENDER,
- .dev_priv_size = 0,
.load = radeon_driver_load_kms,
.open = radeon_driver_open_kms,
.preclose = radeon_driver_preclose_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index a77b1c13ea43..913787085dfa 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -819,15 +819,35 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
return 0;
}
+/**
+ * radeon_debugfs_gpu_reset - manually trigger a gpu reset
+ *
+ * Manually trigger a gpu reset at the next fence wait.
+ */
+static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ down_read(&rdev->exclusive_lock);
+ seq_printf(m, "%d\n", rdev->needs_reset);
+ rdev->needs_reset = true;
+ up_read(&rdev->exclusive_lock);
+
+ return 0;
+}
+
static struct drm_info_list radeon_debugfs_fence_list[] = {
{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
+ {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
};
#endif
int radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 7b944142a9fd..add622008407 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -94,6 +94,8 @@ static int pre_xfer(struct i2c_adapter *i2c_adap)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
+ mutex_lock(&i2c->mutex);
+
/* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before
* doing DDC - do this for all r200s/r300s/r400s for safety sake
@@ -170,6 +172,8 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
+
+ mutex_unlock(&i2c->mutex);
}
static int get_clock(void *i2c_priv)
@@ -813,6 +817,8 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct radeon_i2c_bus_rec *rec = &i2c->rec;
int ret = 0;
+ mutex_lock(&i2c->mutex);
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -879,6 +885,8 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
break;
}
+ mutex_unlock(&i2c->mutex);
+
return ret;
}
@@ -919,6 +927,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->adapter.dev.parent = &dev->pdev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
+ mutex_init(&i2c->mutex);
if (rec->mm_i2c ||
(rec->hw_capable &&
radeon_hw_i2c &&
@@ -979,7 +988,7 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
return;
i2c_del_adapter(&i2c->adapter);
if (i2c->has_aux)
- drm_dp_aux_unregister_i2c_bus(&i2c->aux);
+ drm_dp_aux_unregister(&i2c->aux);
kfree(i2c);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index bdb0f93e73bc..0b98ea134579 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -399,7 +399,7 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 089c9ffb0aa9..16807afab362 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -287,7 +287,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
rdev->irq.installed = true;
- r = drm_irq_install(rdev->ddev);
+ r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
if (r) {
rdev->irq.installed = false;
flush_work(&rdev->hotplug_work);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index eaaedba04675..35d931881b4b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -513,6 +513,22 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
value_size = sizeof(uint64_t);
value64 = atomic64_read(&rdev->gtt_usage);
break;
+ case RADEON_INFO_ACTIVE_CU_COUNT:
+ if (rdev->family >= CHIP_BONAIRE)
+ *value = rdev->config.cik.active_cus;
+ else if (rdev->family >= CHIP_TAHITI)
+ *value = rdev->config.si.active_cus;
+ else if (rdev->family >= CHIP_CAYMAN)
+ *value = rdev->config.cayman.active_simds;
+ else if (rdev->family >= CHIP_CEDAR)
+ *value = rdev->config.evergreen.active_simds;
+ else if (rdev->family >= CHIP_RV770)
+ *value = rdev->config.rv770.active_simds;
+ else if (rdev->family >= CHIP_R600)
+ *value = rdev->config.r600.active_simds;
+ else
+ *value = 1;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
@@ -859,4 +875,4 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
-int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
+int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6ddf31a2d34e..0592ddb0904b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,6 +46,10 @@ struct radeon_device;
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
+#define RADEON_MAX_HPD_PINS 7
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 7
+
enum radeon_rmx_type {
RMX_OFF,
RMX_FULL,
@@ -191,6 +195,7 @@ struct radeon_i2c_chan {
struct radeon_i2c_bus_rec rec;
struct drm_dp_aux aux;
bool has_aux;
+ struct mutex mutex;
};
/* mostly for macs, but really any system without connector tables */
@@ -232,8 +237,8 @@ struct radeon_mode_info {
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
- struct radeon_crtc *crtcs[6];
- struct radeon_afmt *afmt[7];
+ struct radeon_crtc *crtcs[RADEON_MAX_CRTCS];
+ struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -301,6 +306,12 @@ struct radeon_atom_ss {
uint16_t amount;
};
+enum radeon_flip_status {
+ RADEON_FLIP_NONE,
+ RADEON_FLIP_PENDING,
+ RADEON_FLIP_SUBMITTED
+};
+
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
@@ -324,8 +335,9 @@ struct radeon_crtc {
struct drm_display_mode native_mode;
int pll_id;
/* page flipping */
- struct radeon_unpin_work *unpin_work;
- int deferred_flip_completion;
+ struct workqueue_struct *flip_queue;
+ struct radeon_flip_work *flip_work;
+ enum radeon_flip_status flip_status;
/* pll sharing */
struct radeon_atom_ss ss;
bool ss_enabled;
@@ -505,6 +517,7 @@ struct radeon_connector {
struct radeon_i2c_chan *router_bus;
enum radeon_connector_audio audio;
enum radeon_connector_dither dither;
+ int pixelclock_for_modeset;
};
struct radeon_framebuffer {
@@ -906,6 +919,7 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 4faa4d6f9bb4..6c717b257d6d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -446,7 +446,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
list_for_each_entry(lobj, head, tv.head) {
bo = lobj->robj;
if (!bo->pin_count) {
- u32 domain = lobj->domain;
+ u32 domain = lobj->prefered_domains;
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
* into account. We don't want to disallow buffer moves
* completely.
*/
- if ((lobj->alt_domain & current_domain) != 0 &&
+ if ((lobj->allowed_domains & current_domain) != 0 &&
(domain & current_domain) == 0 && /* will be moved */
bytes_moved > bytes_moved_threshold) {
/* don't move it */
@@ -476,8 +476,9 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
initial_bytes_moved;
if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
- domain = lobj->alt_domain;
+ if (r != -ERESTARTSYS &&
+ domain != lobj->allowed_domains) {
+ domain = lobj->allowed_domains;
goto retry;
}
ttm_eu_backoff_reservation(ticket, head);
@@ -730,7 +731,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0))
return r;
spin_lock(&bo->tbo.bdev->fence_lock);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 9e7b25a0629d..5a873f31a171 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -65,7 +65,7 @@ static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 2bdae61c0ac0..e447e390d09a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -73,8 +73,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
rdev->pm.dpm.ac_power = true;
else
rdev->pm.dpm.ac_power = false;
- if (rdev->asic->dpm.enable_bapm)
- radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ if (rdev->family == CHIP_ARUBA) {
+ if (rdev->asic->dpm.enable_bapm)
+ radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ }
mutex_unlock(&rdev->pm.mutex);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
@@ -984,6 +986,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
if (enable) {
mutex_lock(&rdev->pm.mutex);
rdev->pm.dpm.uvd_active = true;
+ /* disable this for now */
+#if 0
if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -993,6 +997,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
else
+#endif
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
rdev->pm.dpm.state = dpm_state;
mutex_unlock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 956ab7f14e16..23bb64fd775f 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3054,7 +3054,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
value = 0;
else
- value = drm_dev_to_irq(dev);
+ value = dev->pdev->irq;
break;
case RADEON_PARAM_GART_BASE:
value = dev_priv->gart_vm_start;
@@ -3258,4 +3258,4 @@ struct drm_ioctl_desc radeon_ioctls[] = {
DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
};
-int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
+int radeon_max_ioctl = ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1b65ae2433cd..a4ad270e8261 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -812,7 +812,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
(rdev->pm.dpm.hd != hd)) {
rdev->pm.dpm.sd = sd;
rdev->pm.dpm.hd = hd;
- streams_changed = true;
+ /* disable this for now */
+ /*streams_changed = true;*/
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 3971d968af6c..aa21c31a846c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev)
case CHIP_BONAIRE:
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_HAWAII:
case CHIP_MULLINS:
fw_name = FIRMWARE_BONAIRE;
break;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index c11b71d249e3..eecff6bbd341 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -59,7 +59,7 @@
*/
static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
{
- return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
+ return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
}
/**
@@ -140,8 +140,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
/* add the vm page table to the list */
list[0].gobj = NULL;
list[0].robj = vm->page_directory;
- list[0].domain = RADEON_GEM_DOMAIN_VRAM;
- list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM;
+ list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+ list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
list[0].tiling_flags = 0;
list[0].handle = 0;
@@ -153,8 +153,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].gobj = NULL;
list[idx].robj = vm->page_tables[i].bo;
- list[idx].domain = RADEON_GEM_DOMAIN_VRAM;
- list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM;
+ list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+ list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tiling_flags = 0;
list[idx].handle = 0;
@@ -474,8 +474,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
bo_va->valid = false;
list_move(&bo_va->vm_list, head);
- soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
- eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+ soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
+ eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
+
+ BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
if (eoffset > vm->max_pde_used)
vm->max_pde_used = eoffset;
@@ -493,7 +495,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
mutex_unlock(&vm->mutex);
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
- RADEON_GPU_PAGE_SIZE, false,
+ RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
if (r)
return r;
@@ -583,10 +585,9 @@ static uint32_t radeon_vm_page_flags(uint32_t flags)
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm)
{
- static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
-
struct radeon_bo *pd = vm->page_directory;
uint64_t pd_addr = radeon_bo_gpu_offset(pd);
+ uint32_t incr = RADEON_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct radeon_ib ib;
@@ -660,6 +661,84 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
}
/**
+ * radeon_vm_frag_ptes - add fragment information to PTEs
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB for the update
+ * @pe_start: first PTE to handle
+ * @pe_end: last PTE to handle
+ * @addr: addr those PTEs should point to
+ * @flags: hw mapping flags
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_frag_ptes(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe_start, uint64_t pe_end,
+ uint64_t addr, uint32_t flags)
+{
+ /**
+ * The MC L1 TLB supports variable sized pages, based on a fragment
+ * field in the PTE. When this field is set to a non-zero value, page
+ * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+ * flags are considered valid for all PTEs within the fragment range
+ * and corresponding mappings are assumed to be physically contiguous.
+ *
+ * The L1 TLB can store a single PTE for the whole fragment,
+ * significantly increasing the space available for translation
+ * caching. This leads to large improvements in throughput when the
+ * TLB is under pressure.
+ *
+ * The L2 TLB distributes small and large fragments into two
+ * asymmetric partitions. The large fragment cache is significantly
+ * larger. Thus, we try to use large fragments wherever possible.
+ * Userspace can support this by aligning virtual base address and
+ * allocation size to the fragment size.
+ */
+
+ /* NI is optimized for 256KB fragments, SI and newer for 64KB */
+ uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
+ R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
+ uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
+
+ uint64_t frag_start = ALIGN(pe_start, frag_align);
+ uint64_t frag_end = pe_end & ~(frag_align - 1);
+
+ unsigned count;
+
+ /* system pages are non continuously */
+ if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
+ (frag_start >= frag_end)) {
+
+ count = (pe_end - pe_start) / 8;
+ radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ return;
+ }
+
+ /* handle the 4K area at the beginning */
+ if (pe_start != frag_start) {
+ count = (frag_start - pe_start) / 8;
+ radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ addr += RADEON_GPU_PAGE_SIZE * count;
+ }
+
+ /* handle the area in the middle */
+ count = (frag_end - frag_start) / 8;
+ radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags | frag_flags);
+
+ /* handle the 4K area at the end */
+ if (frag_end != pe_end) {
+ addr += RADEON_GPU_PAGE_SIZE * count;
+ count = (pe_end - frag_end) / 8;
+ radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ }
+}
+
+/**
* radeon_vm_update_ptes - make sure that page tables are valid
*
* @rdev: radeon_device pointer
@@ -679,8 +758,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
- static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
-
+ uint64_t mask = RADEON_VM_PTE_COUNT - 1;
uint64_t last_pte = ~0, last_dst = ~0;
unsigned count = 0;
uint64_t addr;
@@ -690,7 +768,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
- uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
+ uint64_t pt_idx = addr >> radeon_vm_block_size;
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes;
uint64_t pte;
@@ -708,10 +786,9 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
if ((last_pte + 8 * count) != pte) {
if (count) {
- radeon_asic_vm_set_page(rdev, ib, last_pte,
- last_dst, count,
- RADEON_GPU_PAGE_SIZE,
- flags);
+ radeon_vm_frag_ptes(rdev, ib, last_pte,
+ last_pte + 8 * count,
+ last_dst, flags);
}
count = nptes;
@@ -726,9 +803,9 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
}
if (count) {
- radeon_asic_vm_set_page(rdev, ib, last_pte,
- last_dst, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_frag_ptes(rdev, ib, last_pte,
+ last_pte + 8 * count,
+ last_dst, flags);
}
}
@@ -796,13 +873,13 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
/* padding, etc. */
ndw = 64;
- if (RADEON_VM_BLOCK_SIZE > 11)
+ if (radeon_vm_block_size > 11)
/* reserve space for one header for every 2k dwords */
ndw += (nptes >> 11) * 4;
else
/* reserve space for one header for
every (1 << BLOCK_SIZE) entries */
- ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
+ ndw += (nptes >> radeon_vm_block_size) * 4;
/* reserve space for pte addresses */
ndw += nptes * 2;
@@ -892,6 +969,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
*/
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
+ const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
+ RADEON_VM_PTE_COUNT * 8);
unsigned pd_size, pd_entries, pts_size;
int r;
@@ -913,7 +992,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
return -ENOMEM;
}
- r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
+ r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, NULL,
&vm->page_directory);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 130d5cc50d43..a0f96decece3 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,21 +212,16 @@ void rs400_gart_fini(struct radeon_device *rdev)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
{
uint32_t entry;
u32 *gtt = rdev->gart.ptr;
- if (i < 0 || i > rdev->gart.num_gpu_pages) {
- return -EINVAL;
- }
-
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4) |
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
gtt[i] = entry;
- return 0;
}
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 72d3616de08e..d1a35cb1c91d 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -109,19 +109,7 @@ void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
}
-void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* enable the pflip int */
- radeon_irq_kms_pflip_irq_get(rdev, crtc);
-}
-
-void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
-{
- /* disable the pflip int */
- radeon_irq_kms_pflip_irq_put(rdev, crtc);
-}
-
-u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -148,9 +136,15 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+}
+
+bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/* Return current update_pending status: */
- return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+ return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
+ AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
}
void avivo_program_fmt(struct drm_encoder *encoder)
@@ -632,24 +626,16 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-#define R600_PTE_VALID (1 << 0)
-#define R600_PTE_SYSTEM (1 << 1)
-#define R600_PTE_SNOOPED (1 << 2)
-#define R600_PTE_READABLE (1 << 5)
-#define R600_PTE_WRITEABLE (1 << 6)
-
-int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
- if (i < 0 || i > rdev->gart.num_gpu_pages) {
- return -EINVAL;
- }
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+ if (addr == rdev->dummy_page.addr)
+ addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ else
+ addr |= R600_PTE_GART;
writeq(addr, ptr + (i * 8));
- return 0;
}
int rs600_irq_set(struct radeon_device *rdev)
@@ -787,7 +773,7 @@ int rs600_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
}
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[1]) {
@@ -796,7 +782,7 @@ int rs600_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
}
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index fef310773aad..da8703d8d455 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -801,7 +801,7 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
return reference_clock;
}
-u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -835,9 +835,15 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+}
+
+bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/* Return current update_pending status: */
- return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+ return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
+ AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
}
/* get temperature in millidegrees */
@@ -1321,6 +1327,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
if (tmp < rdev->config.rv770.max_simds) {
rdev->config.rv770.max_simds = tmp;
}
+ tmp = rdev->config.rv770.max_simds -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+ rdev->config.rv770.active_simds = tmp;
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index da041a43d82e..3c76e1dcdf04 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
- /* disable ss, causes hangs on some cayman boards */
- if (rdev->family == CHIP_CAYMAN) {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- }
-
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 22a63c98ba14..eba0225259a4 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -71,6 +71,7 @@ MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
+static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
@@ -2900,7 +2901,7 @@ static void si_gpu_init(struct radeon_device *rdev)
u32 sx_debug_1;
u32 hdp_host_path_cntl;
u32 tmp;
- int i, j;
+ int i, j, k;
switch (rdev->family) {
case CHIP_TAHITI:
@@ -3098,6 +3099,14 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.max_sh_per_se,
rdev->config.si.max_cu_per_sh);
+ for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
+ for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
+ for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
+ rdev->config.si.active_cus +=
+ hweight32(si_get_cu_active_bitmap(rdev, i, j));
+ }
+ }
+ }
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
@@ -3186,7 +3195,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
- radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
@@ -3219,7 +3228,7 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (1 << 8));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
radeon_ring_write(ring, next_rptr);
}
@@ -4044,18 +4053,21 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+ BANK_SELECT(4) |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(4));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
@@ -4092,6 +4104,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -6151,7 +6164,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -6177,7 +6190,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
@@ -6203,7 +6216,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_flip(rdev, 2);
+ radeon_crtc_handle_vblank(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
@@ -6229,7 +6242,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_flip(rdev, 3);
+ radeon_crtc_handle_vblank(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
@@ -6255,7 +6268,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_flip(rdev, 4);
+ radeon_crtc_handle_vblank(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
@@ -6281,7 +6294,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_flip(rdev, 5);
+ radeon_crtc_handle_vblank(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
@@ -6363,14 +6376,16 @@ restart_ih:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ if (addr == 0x0 && status == 0x0)
+ break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
si_vm_decode_fault(rdev, status, addr);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index de0ca070122f..e24c94b6d14d 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -79,7 +79,25 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
- if (flags & R600_PTE_SYSTEM) {
+ if (flags == R600_PTE_GART) {
+ uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0xFFFF8)
+ bytes = 0xFFFF8;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, bytes);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+ } else if (flags & R600_PTE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
@@ -202,8 +220,8 @@ int si_copy_dma(struct radeon_device *rdev,
cur_size_in_bytes = 0xFFFFF;
size_in_bytes -= cur_size_in_bytes;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
- radeon_ring_write(ring, dst_offset & 0xffffffff);
- radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(dst_offset));
+ radeon_ring_write(ring, lower_32_bits(src_offset));
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
src_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 9a3567bedaae..58918868f894 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1948,6 +1948,10 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_cape_verde;
break;
+ case 0x682C:
+ si_pi->cac_weights = cac_weights_cape_verde_pro;
+ si_pi->dte_data = dte_data_sun_xt;
+ break;
case 0x6825:
case 0x6827:
si_pi->cac_weights = cac_weights_heathrow;
@@ -1971,10 +1975,9 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
si_pi->dte_data = dte_data_venus_xt;
break;
case 0x6823:
- si_pi->cac_weights = cac_weights_chelsea_pro;
- si_pi->dte_data = dte_data_venus_pro;
- break;
case 0x682B:
+ case 0x6822:
+ case 0x682A:
si_pi->cac_weights = cac_weights_chelsea_pro;
si_pi->dte_data = dte_data_venus_pro;
break;
@@ -1988,6 +1991,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x6601:
case 0x6621:
case 0x6603:
+ case 0x6605:
si_pi->cac_weights = cac_weights_mars_pro;
si_pi->lcac_config = lcac_mars_pro;
si_pi->cac_override = cac_override_oland;
@@ -1998,6 +2002,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x6600:
case 0x6606:
case 0x6620:
+ case 0x6604:
si_pi->cac_weights = cac_weights_mars_xt;
si_pi->lcac_config = lcac_mars_pro;
si_pi->cac_override = cac_override_oland;
@@ -2006,6 +2011,8 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
update_dte_from_pl2 = true;
break;
case 0x6611:
+ case 0x6613:
+ case 0x6608:
si_pi->cac_weights = cac_weights_oland_pro;
si_pi->lcac_config = lcac_mars_pro;
si_pi->cac_override = cac_override_oland;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7321283602ce..fd414d34d885 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -362,6 +362,7 @@
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2a2822c03329..20da6ff183df 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,7 +1874,15 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- pi->enable_bapm = false;
+ /* There are stability issues reported on latops with
+ * bapm installed when switching between AC and battery
+ * power. At the same time, some desktop boards hang
+ * if it's not enabled and dpm is enabled.
+ */
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ pi->enable_bapm = false;
+ else
+ pi->enable_bapm = true;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index d1771004cb52..8bfdadd56598 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -45,7 +45,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
- radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index d8e835ac2c5e..2e3d7b5b0ad7 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -1,6 +1,7 @@
config DRM_RCAR_DU
tristate "DRM Support for R-Car Display Unit"
depends on DRM && ARM
+ depends on ARCH_SHMOBILE || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
@@ -12,6 +13,7 @@ config DRM_RCAR_DU
config DRM_RCAR_LVDS
bool "R-Car DU LVDS Encoder Support"
depends on DRM_RCAR_DU
+ depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST
help
Enable support the R-Car Display Unit embedded LVDS encoders
(currently only on R8A7790).
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 4f3ba93cd91d..289048d1c7b2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -57,15 +57,8 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
return 1;
}
-static int rcar_du_lvds_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_lvds_connector_get_modes,
- .mode_valid = rcar_du_lvds_connector_mode_valid,
.best_encoder = rcar_du_connector_best_encoder,
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 41d563adfeaa..ccfe64c7188f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -25,15 +25,8 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
return 0;
}
-static int rcar_du_vga_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_vga_connector_get_modes,
- .mode_valid = rcar_du_vga_connector_mode_valid,
.best_encoder = rcar_du_connector_best_encoder,
};
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index d2b2df9e26f3..c97cdc9ab239 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1079,4 +1079,4 @@ const struct drm_ioctl_desc savage_ioctls[] = {
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
};
-int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
+int savage_max_ioctl = ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 2ee44ca9d67f..a50fe0eeaa0d 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,6 +1,7 @@
config DRM_SHMOBILE
tristate "DRM Support for SH Mobile"
- depends on DRM && (ARM || SUPERH)
+ depends on DRM && ARM
+ depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index e9e5e6d368cc..faf176b2daf9 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -674,12 +674,6 @@ static int shmob_drm_connector_get_modes(struct drm_connector *connector)
return 1;
}
-static int shmob_drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static struct drm_encoder *
shmob_drm_connector_best_encoder(struct drm_connector *connector)
{
@@ -690,7 +684,6 @@ shmob_drm_connector_best_encoder(struct drm_connector *connector)
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = shmob_drm_connector_get_modes,
- .mode_valid = shmob_drm_connector_mode_valid,
.best_encoder = shmob_drm_connector_best_encoder,
};
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index c839c9c89efb..82c84c7fd4f6 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -185,7 +185,7 @@ static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
goto done;
}
- ret = drm_irq_install(dev);
+ ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
if (ret < 0) {
dev_err(&pdev->dev, "failed to install IRQ handler\n");
goto done;
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 0573be0d2933..77f288e4a0a6 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -359,4 +359,4 @@ const struct drm_ioctl_desc sis_ioctls[] = {
DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
};
-int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
+int sis_max_ioctl = ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index d43f21bb4596..2c66a8db9da4 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -1,7 +1,6 @@
ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
tegra-drm-y := \
- bus.o \
drm.o \
gem.o \
fb.o \
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
deleted file mode 100644
index 71cef5c13dc8..000000000000
--- a/drivers/gpu/drm/tegra/bus.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2013 NVIDIA Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "drm.h"
-
-static int drm_host1x_set_busid(struct drm_device *dev,
- struct drm_master *master)
-{
- const char *device = dev_name(dev->dev);
- const char *driver = dev->driver->name;
- const char *bus = dev->dev->bus->name;
- int length;
-
- master->unique_len = strlen(bus) + 1 + strlen(device);
- master->unique_size = master->unique_len;
-
- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
- if (!master->unique)
- return -ENOMEM;
-
- snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device);
-
- length = strlen(driver) + 1 + master->unique_len;
-
- dev->devname = kmalloc(length + 1, GFP_KERNEL);
- if (!dev->devname)
- return -ENOMEM;
-
- snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique);
-
- return 0;
-}
-
-static struct drm_bus drm_host1x_bus = {
- .bus_type = DRIVER_BUS_HOST1X,
- .set_busid = drm_host1x_set_busid,
-};
-
-int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
-{
- struct drm_device *drm;
- int ret;
-
- driver->bus = &drm_host1x_bus;
-
- drm = drm_dev_alloc(driver, &device->dev);
- if (!drm)
- return -ENOMEM;
-
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto err_free;
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
- driver->major, driver->minor, driver->patchlevel,
- driver->date, drm->primary->index);
-
- return 0;
-
-err_free:
- drm_dev_unref(drm);
- return ret;
-}
-
-void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device)
-{
- struct tegra_drm *tegra = dev_get_drvdata(&device->dev);
-
- drm_put_dev(tegra->drm);
-}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index edb871d7d395..ef40381f3909 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -17,6 +17,7 @@
struct tegra_dc_soc_info {
bool supports_interlacing;
+ bool supports_cursor;
};
struct tegra_plane {
@@ -29,6 +30,254 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
return container_of(plane, struct tegra_plane, base);
}
+static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap)
+{
+ /* assume no swapping of fetched data */
+ if (swap)
+ *swap = BYTE_SWAP_NOSWAP;
+
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ return WIN_COLOR_DEPTH_R8G8B8A8;
+
+ case DRM_FORMAT_XRGB8888:
+ return WIN_COLOR_DEPTH_B8G8R8A8;
+
+ case DRM_FORMAT_RGB565:
+ return WIN_COLOR_DEPTH_B5G6R5;
+
+ case DRM_FORMAT_UYVY:
+ return WIN_COLOR_DEPTH_YCbCr422;
+
+ case DRM_FORMAT_YUYV:
+ if (swap)
+ *swap = BYTE_SWAP_SWAP2;
+
+ return WIN_COLOR_DEPTH_YCbCr422;
+
+ case DRM_FORMAT_YUV420:
+ return WIN_COLOR_DEPTH_YCbCr420P;
+
+ case DRM_FORMAT_YUV422:
+ return WIN_COLOR_DEPTH_YCbCr422P;
+
+ default:
+ break;
+ }
+
+ WARN(1, "unsupported pixel format %u, using default\n", format);
+ return WIN_COLOR_DEPTH_B8G8R8A8;
+}
+
+static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
+{
+ switch (format) {
+ case WIN_COLOR_DEPTH_YCbCr422:
+ case WIN_COLOR_DEPTH_YUV422:
+ if (planar)
+ *planar = false;
+
+ return true;
+
+ case WIN_COLOR_DEPTH_YCbCr420P:
+ case WIN_COLOR_DEPTH_YUV420P:
+ case WIN_COLOR_DEPTH_YCbCr422P:
+ case WIN_COLOR_DEPTH_YUV422P:
+ case WIN_COLOR_DEPTH_YCbCr422R:
+ case WIN_COLOR_DEPTH_YUV422R:
+ case WIN_COLOR_DEPTH_YCbCr422RA:
+ case WIN_COLOR_DEPTH_YUV422RA:
+ if (planar)
+ *planar = true;
+
+ return true;
+ }
+
+ return false;
+}
+
+static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
+ unsigned int bpp)
+{
+ fixed20_12 outf = dfixed_init(out);
+ fixed20_12 inf = dfixed_init(in);
+ u32 dda_inc;
+ int max;
+
+ if (v)
+ max = 15;
+ else {
+ switch (bpp) {
+ case 2:
+ max = 8;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ /* fallthrough */
+ case 4:
+ max = 4;
+ break;
+ }
+ }
+
+ outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
+ inf.full -= dfixed_const(1);
+
+ dda_inc = dfixed_div(inf, outf);
+ dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+ return dda_inc;
+}
+
+static inline u32 compute_initial_dda(unsigned int in)
+{
+ fixed20_12 inf = dfixed_init(in);
+ return dfixed_frac(inf);
+}
+
+static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+ const struct tegra_dc_window *window)
+{
+ unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
+ unsigned long value;
+ bool yuv, planar;
+
+ /*
+ * For YUV planar modes, the number of bytes per pixel takes into
+ * account only the luma component and therefore is 1.
+ */
+ yuv = tegra_dc_format_is_yuv(window->format, &planar);
+ if (!yuv)
+ bpp = window->bits_per_pixel / 8;
+ else
+ bpp = planar ? 1 : 2;
+
+ value = WINDOW_A_SELECT << index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, window->swap, DC_WIN_BYTE_SWAP);
+
+ value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
+ tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+ value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
+ tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+ h_offset = window->src.x * bpp;
+ v_offset = window->src.y;
+ h_size = window->src.w * bpp;
+ v_size = window->src.h;
+
+ value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
+ tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+ /*
+ * For DDA computations the number of bytes per pixel for YUV planar
+ * modes needs to take into account all Y, U and V components.
+ */
+ if (yuv && planar)
+ bpp = 2;
+
+ h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
+ v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
+
+ value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+ tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+ h_dda = compute_initial_dda(window->src.x);
+ v_dda = compute_initial_dda(window->src.y);
+
+ tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+ tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
+
+ if (yuv && planar) {
+ tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
+ tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
+ value = window->stride[1] << 16 | window->stride[0];
+ tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
+ } else {
+ tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
+ }
+
+ if (window->bottom_up)
+ v_offset += window->src.h - 1;
+
+ tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
+ if (window->tiled) {
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ } else {
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
+ value = WIN_ENABLE;
+
+ if (yuv) {
+ /* setup default colorspace conversion coefficients */
+ tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
+ tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
+ tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
+ tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
+ tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
+ tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
+
+ value |= CSC_ENABLE;
+ } else if (window->bits_per_pixel < 24) {
+ value |= COLOR_EXPAND;
+ }
+
+ if (window->bottom_up)
+ value |= V_DIRECTION;
+
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ /*
+ * Disable blending and assume Window A is the bottom-most window,
+ * Window C is the top-most window and Window B is in the middle.
+ */
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
+
+ switch (index) {
+ case 0:
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 1:
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 2:
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
+ break;
+ }
+
+ tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x,
int crtc_y, unsigned int crtc_w,
@@ -49,7 +298,7 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
window.dst.y = crtc_y;
window.dst.w = crtc_w;
window.dst.h = crtc_h;
- window.format = tegra_dc_format(fb->pixel_format);
+ window.format = tegra_dc_format(fb->pixel_format, &window.swap);
window.bits_per_pixel = fb->bits_per_pixel;
window.bottom_up = tegra_fb_is_bottom_up(fb);
window.tiled = tegra_fb_is_tiled(fb);
@@ -117,6 +366,7 @@ static const uint32_t plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
};
@@ -150,9 +400,9 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
struct drm_framebuffer *fb)
{
- unsigned int format = tegra_dc_format(fb->pixel_format);
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
+ unsigned int format, swap;
unsigned long value;
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -162,7 +412,10 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
+
+ format = tegra_dc_format(fb->pixel_format, &swap);
tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP);
if (tegra_fb_is_tiled(fb)) {
value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
@@ -177,13 +430,13 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
/* make sure bottom-up buffers are properly displayed */
if (tegra_fb_is_bottom_up(fb)) {
value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
- value |= INVERT_V;
+ value |= V_DIRECTION;
tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
v_offset += fb->height - 1;
} else {
value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
- value &= ~INVERT_V;
+ value &= ~V_DIRECTION;
tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
}
@@ -225,6 +478,109 @@ void tegra_dc_disable_vblank(struct tegra_dc *dc)
spin_unlock_irqrestore(&dc->lock, flags);
}
+static int tegra_dc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file,
+ uint32_t handle, uint32_t width,
+ uint32_t height, int32_t hot_x, int32_t hot_y)
+{
+ unsigned long value = CURSOR_CLIP_DISPLAY;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo = NULL;
+
+ if (!dc->soc->supports_cursor)
+ return -ENXIO;
+
+ if (width != height)
+ return -EINVAL;
+
+ switch (width) {
+ case 32:
+ value |= CURSOR_SIZE_32x32;
+ break;
+
+ case 64:
+ value |= CURSOR_SIZE_64x64;
+ break;
+
+ case 128:
+ value |= CURSOR_SIZE_128x128;
+
+ case 256:
+ value |= CURSOR_SIZE_256x256;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (handle) {
+ gem = drm_gem_object_lookup(crtc->dev, file, handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+ }
+
+ if (bo) {
+ unsigned long addr = (bo->paddr & 0xfffffc00) >> 10;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ unsigned long high = (bo->paddr & 0xfffffffc) >> 32;
+#endif
+
+ tegra_dc_writel(dc, value | addr, DC_DISP_CURSOR_START_ADDR);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ tegra_dc_writel(dc, high, DC_DISP_CURSOR_START_ADDR_HI);
+#endif
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= CURSOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
+ value &= ~CURSOR_DST_BLEND_MASK;
+ value &= ~CURSOR_SRC_BLEND_MASK;
+ value |= CURSOR_MODE_NORMAL;
+ value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
+ value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
+ value |= CURSOR_ALPHA;
+ tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
+ } else {
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~CURSOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ }
+
+ tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
+static int tegra_dc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned long value;
+
+ if (!dc->soc->supports_cursor)
+ return -ENXIO;
+
+ value = ((y & 0x3fff) << 16) | (x & 0x3fff);
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
+
+ tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* XXX: only required on generations earlier than Tegra124? */
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ return 0;
+}
+
static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
{
struct drm_device *drm = dc->base.dev;
@@ -301,6 +657,8 @@ static void tegra_dc_destroy(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs tegra_crtc_funcs = {
+ .cursor_set2 = tegra_dc_cursor_set2,
+ .cursor_move = tegra_dc_cursor_move,
.page_flip = tegra_dc_page_flip,
.set_config = drm_crtc_helper_set_config,
.destroy = tegra_dc_destroy,
@@ -334,52 +692,11 @@ static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
- unsigned int bpp)
-{
- fixed20_12 outf = dfixed_init(out);
- fixed20_12 inf = dfixed_init(in);
- u32 dda_inc;
- int max;
-
- if (v)
- max = 15;
- else {
- switch (bpp) {
- case 2:
- max = 8;
- break;
-
- default:
- WARN_ON_ONCE(1);
- /* fallthrough */
- case 4:
- max = 4;
- break;
- }
- }
-
- outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
- inf.full -= dfixed_const(1);
-
- dda_inc = dfixed_div(inf, outf);
- dda_inc = min_t(u32, dda_inc, dfixed_const(max));
-
- return dda_inc;
-}
-
-static inline u32 compute_initial_dda(unsigned int in)
-{
- fixed20_12 inf = dfixed_init(in);
- return dfixed_frac(inf);
-}
-
static int tegra_dc_set_timings(struct tegra_dc *dc,
struct drm_display_mode *mode)
{
- /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */
- unsigned int h_ref_to_sync = 0;
- unsigned int v_ref_to_sync = 0;
+ unsigned int h_ref_to_sync = 1;
+ unsigned int v_ref_to_sync = 1;
unsigned long value;
tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
@@ -406,13 +723,14 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
}
static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- unsigned long *div)
+ struct drm_display_mode *mode)
{
- unsigned long pclk = mode->clock * 1000, rate;
+ unsigned long pclk = mode->clock * 1000;
struct tegra_dc *dc = to_tegra_dc(crtc);
struct tegra_output *output = NULL;
struct drm_encoder *encoder;
+ unsigned int div;
+ u32 value;
long err;
list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
@@ -425,221 +743,23 @@ static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
return -ENODEV;
/*
- * This assumes that the display controller will divide its parent
- * clock by 2 to generate the pixel clock.
+ * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+ * respectively, each of which divides the base pll_d by 2.
*/
- err = tegra_output_setup_clock(output, dc->clk, pclk * 2);
+ err = tegra_output_setup_clock(output, dc->clk, pclk, &div);
if (err < 0) {
dev_err(dc->dev, "failed to setup clock: %ld\n", err);
return err;
}
- rate = clk_get_rate(dc->clk);
- *div = (rate * 2 / pclk) - 2;
-
- DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
-
- return 0;
-}
-
-static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
-{
- switch (format) {
- case WIN_COLOR_DEPTH_YCbCr422:
- case WIN_COLOR_DEPTH_YUV422:
- if (planar)
- *planar = false;
-
- return true;
-
- case WIN_COLOR_DEPTH_YCbCr420P:
- case WIN_COLOR_DEPTH_YUV420P:
- case WIN_COLOR_DEPTH_YCbCr422P:
- case WIN_COLOR_DEPTH_YUV422P:
- case WIN_COLOR_DEPTH_YCbCr422R:
- case WIN_COLOR_DEPTH_YUV422R:
- case WIN_COLOR_DEPTH_YCbCr422RA:
- case WIN_COLOR_DEPTH_YUV422RA:
- if (planar)
- *planar = true;
-
- return true;
- }
-
- return false;
-}
-
-int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
- const struct tegra_dc_window *window)
-{
- unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
- unsigned long value;
- bool yuv, planar;
-
- /*
- * For YUV planar modes, the number of bytes per pixel takes into
- * account only the luma component and therefore is 1.
- */
- yuv = tegra_dc_format_is_yuv(window->format, &planar);
- if (!yuv)
- bpp = window->bits_per_pixel / 8;
- else
- bpp = planar ? 1 : 2;
-
- value = WINDOW_A_SELECT << index;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
-
- tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
- tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
-
- value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
- tegra_dc_writel(dc, value, DC_WIN_POSITION);
-
- value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
- tegra_dc_writel(dc, value, DC_WIN_SIZE);
-
- h_offset = window->src.x * bpp;
- v_offset = window->src.y;
- h_size = window->src.w * bpp;
- v_size = window->src.h;
-
- value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
- tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
-
- /*
- * For DDA computations the number of bytes per pixel for YUV planar
- * modes needs to take into account all Y, U and V components.
- */
- if (yuv && planar)
- bpp = 2;
-
- h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
- v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
-
- value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
- tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
-
- h_dda = compute_initial_dda(window->src.x);
- v_dda = compute_initial_dda(window->src.y);
-
- tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
- tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
-
- tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
- tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
-
- tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
-
- if (yuv && planar) {
- tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
- tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
- value = window->stride[1] << 16 | window->stride[0];
- tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
- } else {
- tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
- }
-
- if (window->bottom_up)
- v_offset += window->src.h - 1;
+ DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), div);
- tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
- tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
-
- if (window->tiled) {
- value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
- DC_WIN_BUFFER_ADDR_MODE_TILE;
- } else {
- value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
- DC_WIN_BUFFER_ADDR_MODE_LINEAR;
- }
-
- tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
-
- value = WIN_ENABLE;
-
- if (yuv) {
- /* setup default colorspace conversion coefficients */
- tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
- tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
- tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
- tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
- tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
- tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
- tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
- tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
-
- value |= CSC_ENABLE;
- } else if (window->bits_per_pixel < 24) {
- value |= COLOR_EXPAND;
- }
-
- if (window->bottom_up)
- value |= INVERT_V;
-
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
-
- /*
- * Disable blending and assume Window A is the bottom-most window,
- * Window C is the top-most window and Window B is in the middle.
- */
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
-
- switch (index) {
- case 0:
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
- break;
-
- case 1:
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
- break;
-
- case 2:
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
- break;
- }
-
- tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
+ value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
return 0;
}
-unsigned int tegra_dc_format(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_XBGR8888:
- return WIN_COLOR_DEPTH_R8G8B8A8;
-
- case DRM_FORMAT_XRGB8888:
- return WIN_COLOR_DEPTH_B8G8R8A8;
-
- case DRM_FORMAT_RGB565:
- return WIN_COLOR_DEPTH_B5G6R5;
-
- case DRM_FORMAT_UYVY:
- return WIN_COLOR_DEPTH_YCbCr422;
-
- case DRM_FORMAT_YUV420:
- return WIN_COLOR_DEPTH_YCbCr420P;
-
- case DRM_FORMAT_YUV422:
- return WIN_COLOR_DEPTH_YCbCr422P;
-
- default:
- break;
- }
-
- WARN(1, "unsupported pixel format %u, using default\n", format);
- return WIN_COLOR_DEPTH_B8G8R8A8;
-}
-
static int tegra_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted,
@@ -648,12 +768,12 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
struct tegra_bo *bo = tegra_fb_get_plane(crtc->primary->fb, 0);
struct tegra_dc *dc = to_tegra_dc(crtc);
struct tegra_dc_window window;
- unsigned long div, value;
+ u32 value;
int err;
drm_vblank_pre_modeset(crtc->dev, dc->pipe);
- err = tegra_crtc_setup_clk(crtc, mode, &div);
+ err = tegra_crtc_setup_clk(crtc, mode);
if (err) {
dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
return err;
@@ -669,9 +789,6 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL);
}
- value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
- tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
-
/* setup window parameters */
memset(&window, 0, sizeof(window));
window.src.x = 0;
@@ -682,7 +799,8 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
window.dst.y = 0;
window.dst.w = mode->hdisplay;
window.dst.h = mode->vdisplay;
- window.format = tegra_dc_format(crtc->primary->fb->pixel_format);
+ window.format = tegra_dc_format(crtc->primary->fb->pixel_format,
+ &window.swap);
window.bits_per_pixel = crtc->primary->fb->bits_per_pixel;
window.stride[0] = crtc->primary->fb->pitches[0];
window.base[0] = bo->paddr;
@@ -728,10 +846,6 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
- value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-
/* initialize timer */
value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
@@ -991,6 +1105,8 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
DUMP_REG(DC_DISP_SD_BL_CONTROL);
DUMP_REG(DC_DISP_SD_HW_K_VALUES);
DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR_HI);
+ DUMP_REG(DC_DISP_BLEND_CURSOR_CONTROL);
DUMP_REG(DC_WIN_WIN_OPTIONS);
DUMP_REG(DC_WIN_BYTE_SWAP);
DUMP_REG(DC_WIN_BUFFER_CONTROL);
@@ -1096,26 +1212,26 @@ static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
static int tegra_dc_init(struct host1x_client *client)
{
- struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
- drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
+ drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
- err = tegra_dc_rgb_init(tegra->drm, dc);
+ err = tegra_dc_rgb_init(drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
return err;
}
- err = tegra_dc_add_planes(tegra->drm, dc);
+ err = tegra_dc_add_planes(drm, dc);
if (err < 0)
return err;
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_dc_debugfs_init(dc, tegra->drm->primary);
+ err = tegra_dc_debugfs_init(dc, drm->primary);
if (err < 0)
dev_err(dc->dev, "debugfs setup failed: %d\n", err);
}
@@ -1160,14 +1276,17 @@ static const struct host1x_client_ops dc_client_ops = {
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.supports_interlacing = false,
+ .supports_cursor = false,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.supports_interlacing = false,
+ .supports_cursor = false,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.supports_interlacing = true,
+ .supports_cursor = true,
};
static const struct of_device_id tegra_dc_of_match[] = {
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index c94101494826..78c5feff95d2 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -67,10 +67,12 @@
#define WIN_A_ACT_REQ (1 << 1)
#define WIN_B_ACT_REQ (1 << 2)
#define WIN_C_ACT_REQ (1 << 3)
+#define CURSOR_ACT_REQ (1 << 7)
#define GENERAL_UPDATE (1 << 8)
#define WIN_A_UPDATE (1 << 9)
#define WIN_B_UPDATE (1 << 10)
#define WIN_C_UPDATE (1 << 11)
+#define CURSOR_UPDATE (1 << 15)
#define NC_HOST_TRIG (1 << 24)
#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
@@ -116,9 +118,10 @@
#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
#define DC_DISP_DISP_WIN_OPTIONS 0x402
-#define HDMI_ENABLE (1 << 30)
-#define DSI_ENABLE (1 << 29)
-#define SOR_ENABLE (1 << 25)
+#define HDMI_ENABLE (1 << 30)
+#define DSI_ENABLE (1 << 29)
+#define SOR_ENABLE (1 << 25)
+#define CURSOR_ENABLE (1 << 16)
#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
@@ -266,6 +269,14 @@
#define DC_DISP_CURSOR_BACKGROUND 0x43d
#define DC_DISP_CURSOR_START_ADDR 0x43e
+#define CURSOR_CLIP_DISPLAY (0 << 28)
+#define CURSOR_CLIP_WIN_A (1 << 28)
+#define CURSOR_CLIP_WIN_B (2 << 28)
+#define CURSOR_CLIP_WIN_C (3 << 28)
+#define CURSOR_SIZE_32x32 (0 << 24)
+#define CURSOR_SIZE_64x64 (1 << 24)
+#define CURSOR_SIZE_128x128 (2 << 24)
+#define CURSOR_SIZE_256x256 (3 << 24)
#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
#define DC_DISP_CURSOR_POSITION 0x440
@@ -302,6 +313,19 @@
#define INTERLACE_START (1 << 1)
#define INTERLACE_ENABLE (1 << 0)
+#define DC_DISP_CURSOR_START_ADDR_HI 0x4ec
+#define DC_DISP_BLEND_CURSOR_CONTROL 0x4f1
+#define CURSOR_MODE_LEGACY (0 << 24)
+#define CURSOR_MODE_NORMAL (1 << 24)
+#define CURSOR_DST_BLEND_ZERO (0 << 16)
+#define CURSOR_DST_BLEND_K1 (1 << 16)
+#define CURSOR_DST_BLEND_NEG_K1_TIMES_SRC (2 << 16)
+#define CURSOR_DST_BLEND_MASK (3 << 16)
+#define CURSOR_SRC_BLEND_K1 (0 << 8)
+#define CURSOR_SRC_BLEND_K1_TIMES_SRC (1 << 8)
+#define CURSOR_SRC_BLEND_MASK (3 << 8)
+#define CURSOR_ALPHA 0xff
+
#define DC_WIN_CSC_YOF 0x611
#define DC_WIN_CSC_KYRGB 0x612
#define DC_WIN_CSC_KUR 0x613
@@ -312,7 +336,8 @@
#define DC_WIN_CSC_KVB 0x618
#define DC_WIN_WIN_OPTIONS 0x700
-#define INVERT_V (1 << 2)
+#define H_DIRECTION (1 << 0)
+#define V_DIRECTION (1 << 2)
#define COLOR_EXPAND (1 << 6)
#define CSC_ENABLE (1 << 18)
#define WIN_ENABLE (1 << 30)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 005c19bd92df..3f132e356e9c 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
@@ -41,6 +42,7 @@ struct tegra_dpaux {
struct regulator *vdd;
struct completion complete;
+ struct work_struct work;
struct list_head list;
};
@@ -49,6 +51,11 @@ static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
return container_of(aux, struct tegra_dpaux, aux);
}
+static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work)
+{
+ return container_of(work, struct tegra_dpaux, work);
+}
+
static inline unsigned long tegra_dpaux_readl(struct tegra_dpaux *dpaux,
unsigned long offset)
{
@@ -231,6 +238,14 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
return ret;
}
+static void tegra_dpaux_hotplug(struct work_struct *work)
+{
+ struct tegra_dpaux *dpaux = work_to_dpaux(work);
+
+ if (dpaux->output)
+ drm_helper_hpd_irq_event(dpaux->output->connector.dev);
+}
+
static irqreturn_t tegra_dpaux_irq(int irq, void *data)
{
struct tegra_dpaux *dpaux = data;
@@ -241,16 +256,8 @@ static irqreturn_t tegra_dpaux_irq(int irq, void *data)
value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX);
tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
- if (value & DPAUX_INTR_PLUG_EVENT) {
- if (dpaux->output) {
- drm_helper_hpd_irq_event(dpaux->output->connector.dev);
- }
- }
-
- if (value & DPAUX_INTR_UNPLUG_EVENT) {
- if (dpaux->output)
- drm_helper_hpd_irq_event(dpaux->output->connector.dev);
- }
+ if (value & (DPAUX_INTR_PLUG_EVENT | DPAUX_INTR_UNPLUG_EVENT))
+ schedule_work(&dpaux->work);
if (value & DPAUX_INTR_IRQ_EVENT) {
/* TODO: handle this */
@@ -273,6 +280,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (!dpaux)
return -ENOMEM;
+ INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
init_completion(&dpaux->complete);
INIT_LIST_HEAD(&dpaux->list);
dpaux->dev = &pdev->dev;
@@ -332,7 +340,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->aux.transfer = tegra_dpaux_transfer;
dpaux->aux.dev = &pdev->dev;
- err = drm_dp_aux_register_i2c_bus(&dpaux->aux);
+ err = drm_dp_aux_register(&dpaux->aux);
if (err < 0)
return err;
@@ -355,12 +363,14 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
- drm_dp_aux_unregister_i2c_bus(&dpaux->aux);
+ drm_dp_aux_unregister(&dpaux->aux);
mutex_lock(&dpaux_lock);
list_del(&dpaux->list);
mutex_unlock(&dpaux_lock);
+ cancel_work_sync(&dpaux->work);
+
clk_disable_unprepare(dpaux->clk_parent);
reset_control_assert(dpaux->rst);
clk_disable_unprepare(dpaux->clk);
@@ -404,6 +414,7 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
unsigned long timeout;
int err;
+ output->connector.polled = DRM_CONNECTOR_POLL_HPD;
dpaux->output = output;
err = regulator_enable(dpaux->vdd);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 6f5b6e2f552e..3396f9f6a9f7 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -33,7 +33,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (!tegra)
return -ENOMEM;
- dev_set_drvdata(drm->dev, tegra);
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
drm->dev_private = tegra;
@@ -640,14 +639,40 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
-static int host1x_drm_probe(struct host1x_device *device)
+static int host1x_drm_probe(struct host1x_device *dev)
{
- return drm_host1x_init(&tegra_drm_driver, device);
+ struct drm_driver *driver = &tegra_drm_driver;
+ struct drm_device *drm;
+ int err;
+
+ drm = drm_dev_alloc(driver, &dev->dev);
+ if (!drm)
+ return -ENOMEM;
+
+ drm_dev_set_unique(drm, dev_name(&dev->dev));
+ dev_set_drvdata(&dev->dev, drm);
+
+ err = drm_dev_register(drm, 0);
+ if (err < 0)
+ goto unref;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+ driver->major, driver->minor, driver->patchlevel,
+ driver->date, drm->primary->index);
+
+ return 0;
+
+unref:
+ drm_dev_unref(drm);
+ return err;
}
-static int host1x_drm_remove(struct host1x_device *device)
+static int host1x_drm_remove(struct host1x_device *dev)
{
- drm_host1x_exit(&tegra_drm_driver, device);
+ struct drm_device *drm = dev_get_drvdata(&dev->dev);
+
+ drm_dev_unregister(drm);
+ drm_dev_unref(drm);
return 0;
}
@@ -666,6 +691,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra114-gr3d", },
{ .compatible = "nvidia,tegra124-dc", },
{ .compatible = "nvidia,tegra124-sor", },
+ { .compatible = "nvidia,tegra124-hdmi", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 126332c3ecbb..6b8fe9d86ed4 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -80,13 +80,13 @@ host1x_to_drm_client(struct host1x_client *client)
return container_of(client, struct tegra_drm_client, base);
}
-extern int tegra_drm_register_client(struct tegra_drm *tegra,
- struct tegra_drm_client *client);
-extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
- struct tegra_drm_client *client);
+int tegra_drm_register_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client);
+int tegra_drm_unregister_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client);
-extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
-extern int tegra_drm_exit(struct tegra_drm *tegra);
+int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
+int tegra_drm_exit(struct tegra_drm *tegra);
struct tegra_dc_soc_info;
struct tegra_output;
@@ -156,6 +156,7 @@ struct tegra_dc_window {
} dst;
unsigned int bits_per_pixel;
unsigned int format;
+ unsigned int swap;
unsigned int stride[2];
unsigned long base[3];
bool bottom_up;
@@ -163,19 +164,15 @@ struct tegra_dc_window {
};
/* from dc.c */
-extern unsigned int tegra_dc_format(uint32_t format);
-extern int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
- const struct tegra_dc_window *window);
-extern void tegra_dc_enable_vblank(struct tegra_dc *dc);
-extern void tegra_dc_disable_vblank(struct tegra_dc *dc);
-extern void tegra_dc_cancel_page_flip(struct drm_crtc *crtc,
- struct drm_file *file);
+void tegra_dc_enable_vblank(struct tegra_dc *dc);
+void tegra_dc_disable_vblank(struct tegra_dc *dc);
+void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
struct tegra_output_ops {
int (*enable)(struct tegra_output *output);
int (*disable)(struct tegra_output *output);
int (*setup_clock)(struct tegra_output *output, struct clk *clk,
- unsigned long pclk);
+ unsigned long pclk, unsigned int *div);
int (*check_mode)(struct tegra_output *output,
struct drm_display_mode *mode,
enum drm_mode_status *status);
@@ -233,10 +230,11 @@ static inline int tegra_output_disable(struct tegra_output *output)
}
static inline int tegra_output_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk)
+ struct clk *clk, unsigned long pclk,
+ unsigned int *div)
{
if (output && output->ops && output->ops->setup_clock)
- return output->ops->setup_clock(output, clk, pclk);
+ return output->ops->setup_clock(output, clk, pclk, div);
return output ? -ENOSYS : -EINVAL;
}
@@ -251,27 +249,21 @@ static inline int tegra_output_check_mode(struct tegra_output *output,
return output ? -ENOSYS : -EINVAL;
}
-/* from bus.c */
-int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device);
-void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device);
-
/* from rgb.c */
-extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
-extern int tegra_dc_rgb_remove(struct tegra_dc *dc);
-extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
-extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
+int tegra_dc_rgb_probe(struct tegra_dc *dc);
+int tegra_dc_rgb_remove(struct tegra_dc *dc);
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+int tegra_dc_rgb_exit(struct tegra_dc *dc);
/* from output.c */
-extern int tegra_output_probe(struct tegra_output *output);
-extern int tegra_output_remove(struct tegra_output *output);
-extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
-extern int tegra_output_exit(struct tegra_output *output);
+int tegra_output_probe(struct tegra_output *output);
+int tegra_output_remove(struct tegra_output *output);
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
+int tegra_output_exit(struct tegra_output *output);
/* from dpaux.c */
-
struct tegra_dpaux;
struct drm_dp_link;
-struct drm_dp_aux;
struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np);
enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux);
@@ -288,10 +280,10 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index);
bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
-extern int tegra_drm_fb_init(struct drm_device *drm);
-extern void tegra_drm_fb_exit(struct drm_device *drm);
+int tegra_drm_fb_init(struct drm_device *drm);
+void tegra_drm_fb_exit(struct drm_device *drm);
#ifdef CONFIG_DRM_TEGRA_FBDEV
-extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
+void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
#endif
extern struct platform_driver tegra_dc_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 0e599f0417c0..bd56f2affa78 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
@@ -43,11 +45,15 @@ struct tegra_dsi {
struct drm_minor *minor;
struct dentry *debugfs;
+ unsigned long flags;
enum mipi_dsi_pixel_format format;
unsigned int lanes;
struct tegra_mipi_device *mipi;
struct mipi_dsi_host host;
+
+ struct regulator *vdd;
+ bool enabled;
};
static inline struct tegra_dsi *
@@ -244,8 +250,10 @@ static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
#define PKT_LP (1 << 30)
#define NUM_PKT_SEQ 12
-/* non-burst mode with sync-end */
-static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = {
+/*
+ * non-burst mode with sync pulses
+ */
+static const u32 pkt_seq_video_non_burst_sync_pulses[NUM_PKT_SEQ] = {
[ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
@@ -280,6 +288,36 @@ static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = {
PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
};
+/*
+ * non-burst mode with sync events
+ */
+static const u32 pkt_seq_video_non_burst_sync_events[NUM_PKT_SEQ] = {
+ [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
+ PKT_LP,
+ [ 1] = 0,
+ [ 2] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
+ PKT_LP,
+ [ 3] = 0,
+ [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
+ PKT_LP,
+ [ 5] = 0,
+ [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) |
+ PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3),
+ [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
+ [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
+ PKT_LP,
+ [ 9] = 0,
+ [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) |
+ PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3),
+ [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
+};
+
static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
{
struct mipi_dphy_timing timing;
@@ -361,28 +399,70 @@ static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format,
return 0;
}
+static int tegra_dsi_get_format(enum mipi_dsi_pixel_format format,
+ enum tegra_dsi_format *fmt)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB888:
+ *fmt = TEGRA_DSI_FORMAT_24P;
+ break;
+
+ case MIPI_DSI_FMT_RGB666:
+ *fmt = TEGRA_DSI_FORMAT_18NP;
+ break;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ *fmt = TEGRA_DSI_FORMAT_18P;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ *fmt = TEGRA_DSI_FORMAT_16P;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int tegra_output_dsi_enable(struct tegra_output *output)
{
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct drm_display_mode *mode = &dc->base.mode;
unsigned int hact, hsw, hbp, hfp, i, mul, div;
struct tegra_dsi *dsi = to_dsi(output);
- /* FIXME: don't hardcode this */
- const u32 *pkt_seq = pkt_seq_vnb_syne;
+ enum tegra_dsi_format format;
unsigned long value;
+ const u32 *pkt_seq;
int err;
+ if (dsi->enabled)
+ return 0;
+
+ if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n");
+ pkt_seq = pkt_seq_video_non_burst_sync_pulses;
+ } else {
+ DRM_DEBUG_KMS("Non-burst video mode with sync events\n");
+ pkt_seq = pkt_seq_video_non_burst_sync_events;
+ }
+
err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
if (err < 0)
return err;
+ err = tegra_dsi_get_format(dsi->format, &format);
+ if (err < 0)
+ return err;
+
err = clk_enable(dsi->clk);
if (err < 0)
return err;
reset_control_deassert(dsi->rst);
- value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(dsi->format) |
+ value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) |
DSI_CONTROL_LANES(dsi->lanes - 1) |
DSI_CONTROL_SOURCE(dc->pipe);
tegra_dsi_writel(dsi, value, DSI_CONTROL);
@@ -454,6 +534,8 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
value |= DSI_POWER_CONTROL_ENABLE;
tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+ dsi->enabled = true;
+
return 0;
}
@@ -463,9 +545,12 @@ static int tegra_output_dsi_disable(struct tegra_output *output)
struct tegra_dsi *dsi = to_dsi(output);
unsigned long value;
+ if (!dsi->enabled)
+ return 0;
+
/* disable DSI controller */
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
- value &= DSI_POWER_CONTROL_ENABLE;
+ value &= ~DSI_POWER_CONTROL_ENABLE;
tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
/*
@@ -492,30 +577,44 @@ static int tegra_output_dsi_disable(struct tegra_output *output)
clk_disable(dsi->clk);
+ dsi->enabled = false;
+
return 0;
}
static int tegra_output_dsi_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk)
+ struct clk *clk, unsigned long pclk,
+ unsigned int *divp)
{
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct drm_display_mode *mode = &dc->base.mode;
unsigned int timeout, mul, div, vrefresh;
struct tegra_dsi *dsi = to_dsi(output);
unsigned long bclk, plld, value;
- struct clk *base;
int err;
err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
if (err < 0)
return err;
+ DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, dsi->lanes);
vrefresh = drm_mode_vrefresh(mode);
+ DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh);
- pclk = mode->htotal * mode->vtotal * vrefresh;
+ /* compute byte clock */
bclk = (pclk * mul) / (div * dsi->lanes);
- plld = DIV_ROUND_UP(bclk * 8, 1000000);
- pclk = (plld * 1000000) / 2;
+
+ /*
+ * Compute bit clock and round up to the next MHz.
+ */
+ plld = DIV_ROUND_UP(bclk * 8, 1000000) * 1000000;
+
+ /*
+ * We divide the frequency by two here, but we make up for that by
+ * setting the shift clock divider (further below) to half of the
+ * correct value.
+ */
+ plld /= 2;
err = clk_set_parent(clk, dsi->clk_parent);
if (err < 0) {
@@ -523,20 +622,26 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output,
return err;
}
- base = clk_get_parent(dsi->clk_parent);
-
- /*
- * This assumes that the parent clock is pll_d_out0 or pll_d2_out
- * respectively, each of which divides the base pll_d by 2.
- */
- err = clk_set_rate(base, pclk * 2);
+ err = clk_set_rate(dsi->clk_parent, plld);
if (err < 0) {
dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n",
- pclk * 2);
+ plld);
return err;
}
/*
+ * Derive pixel clock from bit clock using the shift clock divider.
+ * Note that this is only half of what we would expect, but we need
+ * that to make up for the fact that we divided the bit clock by a
+ * factor of two above.
+ *
+ * It's not clear exactly why this is necessary, but the display is
+ * not working properly otherwise. Perhaps the PLLs cannot generate
+ * frequencies sufficiently high.
+ */
+ *divp = ((8 * mul) / (div * dsi->lanes)) - 2;
+
+ /*
* XXX: Move the below somewhere else so that we don't need to have
* access to the vrefresh in this function?
*/
@@ -610,61 +715,32 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
static int tegra_dsi_init(struct host1x_client *client)
{
- struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
- unsigned long value, i;
int err;
dsi->output.type = TEGRA_OUTPUT_DSI;
dsi->output.dev = client->dev;
dsi->output.ops = &dsi_ops;
- err = tegra_output_init(tegra->drm, &dsi->output);
+ err = tegra_output_init(drm, &dsi->output);
if (err < 0) {
dev_err(client->dev, "output setup failed: %d\n", err);
return err;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_dsi_debugfs_init(dsi, tegra->drm->primary);
+ err = tegra_dsi_debugfs_init(dsi, drm->primary);
if (err < 0)
dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
}
- /*
- * enable high-speed mode, checksum generation, ECC generation and
- * disable raw mode
- */
- value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
- value |= DSI_HOST_CONTROL_ECC | DSI_HOST_CONTROL_CS |
- DSI_HOST_CONTROL_HS;
- value &= ~DSI_HOST_CONTROL_RAW;
- tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
-
- tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
- tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
-
- tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
-
- for (i = 0; i < 8; i++) {
- tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
- tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
- }
-
- for (i = 0; i < 12; i++)
- tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
-
- tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
-
err = tegra_dsi_pad_calibrate(dsi);
if (err < 0) {
dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
return err;
}
- tegra_dsi_writel(dsi, DSI_POWER_CONTROL_ENABLE, DSI_POWER_CONTROL);
- usleep_range(300, 1000);
-
return 0;
}
@@ -715,66 +791,13 @@ static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
return 0;
}
-static void tegra_dsi_initialize(struct tegra_dsi *dsi)
-{
- unsigned int i;
-
- tegra_dsi_writel(dsi, 0, DSI_POWER_CONTROL);
-
- tegra_dsi_writel(dsi, 0, DSI_INT_ENABLE);
- tegra_dsi_writel(dsi, 0, DSI_INT_STATUS);
- tegra_dsi_writel(dsi, 0, DSI_INT_MASK);
-
- tegra_dsi_writel(dsi, 0, DSI_HOST_CONTROL);
- tegra_dsi_writel(dsi, 0, DSI_CONTROL);
-
- tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
- tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
-
- tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
-
- for (i = 0; i < 8; i++) {
- tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
- tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
- }
-
- for (i = 0; i < 12; i++)
- tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
-
- tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
-
- for (i = 0; i < 4; i++)
- tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1 + i);
-
- tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_0);
- tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_1);
- tegra_dsi_writel(dsi, 0x000000ff, DSI_PHY_TIMING_2);
- tegra_dsi_writel(dsi, 0x00000000, DSI_BTA_TIMING);
-
- tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_0);
- tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_1);
- tegra_dsi_writel(dsi, 0, DSI_TO_TALLY);
-
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_CD);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CD_STATUS);
- tegra_dsi_writel(dsi, 0, DSI_VIDEO_MODE_CONTROL);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
-
- tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
- tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
- tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
-}
-
static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct tegra_dsi *dsi = host_to_tegra(host);
struct tegra_output *output = &dsi->output;
+ dsi->flags = device->mode_flags;
dsi->format = device->format;
dsi->lanes = device->lanes;
@@ -829,6 +852,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
* attaches to the DSI host, the parameters will be taken from
* the attached device.
*/
+ dsi->flags = MIPI_DSI_MODE_VIDEO;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 4;
@@ -872,6 +896,18 @@ static int tegra_dsi_probe(struct platform_device *pdev)
return err;
}
+ dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
+ if (IS_ERR(dsi->vdd)) {
+ dev_err(&pdev->dev, "cannot get VDD supply\n");
+ return PTR_ERR(dsi->vdd);
+ }
+
+ err = regulator_enable(dsi->vdd);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot enable VDD supply\n");
+ return err;
+ }
+
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
@@ -883,8 +919,6 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
- tegra_dsi_initialize(dsi);
-
dsi->mipi = tegra_mipi_request(&pdev->dev);
if (IS_ERR(dsi->mipi))
return PTR_ERR(dsi->mipi);
@@ -929,9 +963,11 @@ static int tegra_dsi_remove(struct platform_device *pdev)
mipi_dsi_host_unregister(&dsi->host);
tegra_mipi_free(dsi->mipi);
+ regulator_disable(dsi->vdd);
clk_disable_unprepare(dsi->clk_parent);
clk_disable_unprepare(dsi->clk_lp);
clk_disable_unprepare(dsi->clk);
+ reset_control_assert(dsi->rst);
err = tegra_output_remove(&dsi->output);
if (err < 0) {
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index 1db5cc24ea91..5ce610d08d77 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -117,4 +117,14 @@
#define DSI_INIT_SEQ_DATA_14 0x5e
#define DSI_INIT_SEQ_DATA_15 0x5f
+/*
+ * pixel format as used in the DSI_CONTROL_FORMAT field
+ */
+enum tegra_dsi_format {
+ TEGRA_DSI_FORMAT_16P,
+ TEGRA_DSI_FORMAT_18NP,
+ TEGRA_DSI_FORMAT_18P,
+ TEGRA_DSI_FORMAT_24P,
+};
+
#endif
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index f7fca09d4921..9798a7080322 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -346,11 +346,8 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
{
- if (fbdev) {
- drm_modeset_lock_all(fbdev->base.dev);
- drm_fb_helper_restore_fbdev_mode(&fbdev->base);
- drm_modeset_unlock_all(fbdev->base.dev);
- }
+ if (fbdev)
+ drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->base);
}
static void tegra_fb_output_poll_changed(struct drm_device *drm)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index bcf9895cef9f..aa85b7b26f10 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -169,7 +169,8 @@ err:
return ERR_PTR(ret);
}
-struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf)
+static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
+ struct dma_buf *buf)
{
struct dma_buf_attachment *attach;
struct tegra_bo *bo;
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 2c7ca748edf5..7c53941f2a9e 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -28,7 +28,7 @@ static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
static int gr2d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->parent);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr2d *gr2d = to_gr2d(drm);
@@ -42,17 +42,17 @@ static int gr2d_init(struct host1x_client *client)
return -ENOMEM;
}
- return tegra_drm_register_client(tegra, drm);
+ return tegra_drm_register_client(dev->dev_private, drm);
}
static int gr2d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->parent);
struct gr2d *gr2d = to_gr2d(drm);
int err;
- err = tegra_drm_unregister_client(tegra, drm);
+ err = tegra_drm_unregister_client(dev->dev_private, drm);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 0cbb24b1ae04..30f5ba9bd6d0 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -37,7 +37,7 @@ static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
static int gr3d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->parent);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr3d *gr3d = to_gr3d(drm);
@@ -51,17 +51,17 @@ static int gr3d_init(struct host1x_client *client)
return -ENOMEM;
}
- return tegra_drm_register_client(tegra, drm);
+ return tegra_drm_register_client(dev->dev_private, drm);
}
static int gr3d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->parent);
struct gr3d *gr3d = to_gr3d(drm);
int err;
- err = tegra_drm_unregister_client(tegra, drm);
+ err = tegra_drm_unregister_client(dev->dev_private, drm);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 6928015d11a4..ba067bb767e3 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -42,8 +42,9 @@ struct tegra_hdmi {
struct device *dev;
bool enabled;
- struct regulator *vdd;
+ struct regulator *hdmi;
struct regulator *pll;
+ struct regulator *vdd;
void __iomem *regs;
unsigned int irq;
@@ -317,6 +318,85 @@ static const struct tmds_config tegra114_tmds_config[] = {
},
};
+static const struct tmds_config tegra124_tmds_config[] = {
+ { /* 480p/576p / 25.2MHz/27MHz modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 720p / 74.25MHz modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_15_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 1080p / 148.5MHz modes */
+ .pclk = 148500000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_10_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 225/297MHz modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
+ | SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
+ },
+};
+
static const struct tegra_hdmi_audio_config *
tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
{
@@ -716,13 +796,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
return err;
}
- /*
- * This assumes that the display controller will divide its parent
- * clock by 2 to generate the pixel clock.
- */
- err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
+ err = regulator_enable(hdmi->vdd);
if (err < 0) {
- dev_err(hdmi->dev, "failed to setup clock: %d\n", err);
+ dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
return err;
}
@@ -730,7 +806,7 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
if (err < 0)
return err;
- err = clk_enable(hdmi->clk);
+ err = clk_prepare_enable(hdmi->clk);
if (err < 0) {
dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
return err;
@@ -740,6 +816,17 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
usleep_range(1000, 2000);
reset_control_deassert(hdmi->rst);
+ /* power up sequence */
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
+ value &= ~SOR_PLL_PDBG;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0);
+
+ usleep_range(10, 20);
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
+ value &= ~SOR_PLL_PWR;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0);
+
tegra_dc_writel(dc, VSYNC_H_POSITION(1),
DC_DISP_DISP_TIMING_OPTIONS);
tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
@@ -838,9 +925,13 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
- value = 0x1c800;
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_CSTM);
value &= ~SOR_CSTM_ROTCLK(~0);
value |= SOR_CSTM_ROTCLK(2);
+ value |= SOR_CSTM_PLLDIV;
+ value &= ~SOR_CSTM_LVDS_ENABLE;
+ value &= ~SOR_CSTM_MODE_MASK;
+ value |= SOR_CSTM_MODE_TMDS;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
/* start SOR */
@@ -930,10 +1021,18 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
* sure it's only executed when the output is attached to one.
*/
if (dc) {
+ /*
+ * XXX: We can't do this here because it causes HDMI to go
+ * into an erroneous state with the result that HDMI won't
+ * properly work once disabled. See also a similar symptom
+ * for the SOR output.
+ */
+ /*
value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ */
value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
value &= ~DISP_CTRL_MODE_MASK;
@@ -947,8 +1046,9 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
}
+ clk_disable_unprepare(hdmi->clk);
reset_control_assert(hdmi->rst);
- clk_disable(hdmi->clk);
+ regulator_disable(hdmi->vdd);
regulator_disable(hdmi->pll);
hdmi->enabled = false;
@@ -957,10 +1057,10 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
}
static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk)
+ struct clk *clk, unsigned long pclk,
+ unsigned int *div)
{
struct tegra_hdmi *hdmi = to_hdmi(output);
- struct clk *base;
int err;
err = clk_set_parent(clk, hdmi->clk_parent);
@@ -969,17 +1069,12 @@ static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
return err;
}
- base = clk_get_parent(hdmi->clk_parent);
-
- /*
- * This assumes that the parent clock is pll_d_out0 or pll_d2_out
- * respectively, each of which divides the base pll_d by 2.
- */
- err = clk_set_rate(base, pclk * 2);
+ err = clk_set_rate(hdmi->clk_parent, pclk);
if (err < 0)
- dev_err(output->dev,
- "failed to set base clock rate to %lu Hz\n",
- pclk * 2);
+ dev_err(output->dev, "failed to set clock rate to %lu Hz\n",
+ pclk);
+
+ *div = 0;
return 0;
}
@@ -1017,7 +1112,7 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
struct tegra_hdmi *hdmi = node->info_ent->data;
int err;
- err = clk_enable(hdmi->clk);
+ err = clk_prepare_enable(hdmi->clk);
if (err)
return err;
@@ -1186,7 +1281,7 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
#undef DUMP_REG
- clk_disable(hdmi->clk);
+ clk_disable_unprepare(hdmi->clk);
return 0;
}
@@ -1252,33 +1347,33 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
static int tegra_hdmi_init(struct host1x_client *client)
{
- struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
int err;
- err = regulator_enable(hdmi->vdd);
- if (err < 0) {
- dev_err(client->dev, "failed to enable VDD regulator: %d\n",
- err);
- return err;
- }
-
hdmi->output.type = TEGRA_OUTPUT_HDMI;
hdmi->output.dev = client->dev;
hdmi->output.ops = &hdmi_ops;
- err = tegra_output_init(tegra->drm, &hdmi->output);
+ err = tegra_output_init(drm, &hdmi->output);
if (err < 0) {
dev_err(client->dev, "output setup failed: %d\n", err);
return err;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_hdmi_debugfs_init(hdmi, tegra->drm->primary);
+ err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
if (err < 0)
dev_err(client->dev, "debugfs setup failed: %d\n", err);
}
+ err = regulator_enable(hdmi->hdmi);
+ if (err < 0) {
+ dev_err(client->dev, "failed to enable HDMI regulator: %d\n",
+ err);
+ return err;
+ }
+
return 0;
}
@@ -1287,6 +1382,8 @@ static int tegra_hdmi_exit(struct host1x_client *client)
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
int err;
+ regulator_disable(hdmi->hdmi);
+
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_hdmi_debugfs_exit(hdmi);
if (err < 0)
@@ -1306,8 +1403,6 @@ static int tegra_hdmi_exit(struct host1x_client *client)
return err;
}
- regulator_disable(hdmi->vdd);
-
return 0;
}
@@ -1340,7 +1435,16 @@ static const struct tegra_hdmi_config tegra114_hdmi_config = {
.has_sor_io_peak_current = true,
};
+static const struct tegra_hdmi_config tegra124_hdmi_config = {
+ .tmds = tegra124_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra124_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = true,
+};
+
static const struct of_device_id tegra_hdmi_of_match[] = {
+ { .compatible = "nvidia,tegra124-hdmi", .data = &tegra124_hdmi_config },
{ .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
{ .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
{ .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
@@ -1381,28 +1485,20 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return PTR_ERR(hdmi->rst);
}
- err = clk_prepare(hdmi->clk);
- if (err < 0)
- return err;
-
hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(hdmi->clk_parent))
return PTR_ERR(hdmi->clk_parent);
- err = clk_prepare(hdmi->clk_parent);
- if (err < 0)
- return err;
-
err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
if (err < 0) {
dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
return err;
}
- hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(hdmi->vdd)) {
- dev_err(&pdev->dev, "failed to get VDD regulator\n");
- return PTR_ERR(hdmi->vdd);
+ hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi");
+ if (IS_ERR(hdmi->hdmi)) {
+ dev_err(&pdev->dev, "failed to get HDMI regulator\n");
+ return PTR_ERR(hdmi->hdmi);
}
hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
@@ -1411,6 +1507,12 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return PTR_ERR(hdmi->pll);
}
+ hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(hdmi->vdd)) {
+ dev_err(&pdev->dev, "failed to get VDD regulator\n");
+ return PTR_ERR(hdmi->vdd);
+ }
+
hdmi->output.dev = &pdev->dev;
err = tegra_output_probe(&hdmi->output);
@@ -1462,8 +1564,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
return err;
}
- clk_unprepare(hdmi->clk_parent);
- clk_unprepare(hdmi->clk);
+ clk_disable_unprepare(hdmi->clk_parent);
+ clk_disable_unprepare(hdmi->clk);
return 0;
}
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 0aebc485f7fa..919a19df4e1b 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -190,6 +190,11 @@
#define HDMI_NV_PDISP_SOR_CSTM 0x5a
#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+#define SOR_CSTM_PLLDIV (1 << 21)
+#define SOR_CSTM_LVDS_ENABLE (1 << 16)
+#define SOR_CSTM_MODE_LVDS (0 << 12)
+#define SOR_CSTM_MODE_TMDS (1 << 12)
+#define SOR_CSTM_MODE_MASK (3 << 12)
#define HDMI_NV_PDISP_SOR_LVDS 0x5b
#define HDMI_NV_PDISP_SOR_CRCA 0x5c
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 0266fb40479e..d6af9be48f42 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -159,11 +159,38 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
}
static int tegra_output_rgb_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk)
+ struct clk *clk, unsigned long pclk,
+ unsigned int *div)
{
struct tegra_rgb *rgb = to_rgb(output);
+ int err;
+
+ err = clk_set_parent(clk, rgb->clk_parent);
+ if (err < 0) {
+ dev_err(output->dev, "failed to set parent: %d\n", err);
+ return err;
+ }
- return clk_set_parent(clk, rgb->clk_parent);
+ /*
+ * We may not want to change the frequency of the parent clock, since
+ * it may be a parent for other peripherals. This is due to the fact
+ * that on Tegra20 there's only a single clock dedicated to display
+ * (pll_d_out0), whereas later generations have a second one that can
+ * be used to independently drive a second output (pll_d2_out0).
+ *
+ * As a way to support multiple outputs on Tegra20 as well, pll_p is
+ * typically used as the parent clock for the display controllers.
+ * But this comes at a cost: pll_p is the parent of several other
+ * peripherals, so its frequency shouldn't change out of the blue.
+ *
+ * The best we can do at this point is to use the shift clock divider
+ * and hope that the desired frequency can be matched (or at least
+ * matched sufficiently close that the panel will still work).
+ */
+
+ *div = ((clk_get_rate(clk) * 2) / pclk) - 2;
+
+ return 0;
}
static int tegra_output_rgb_check_mode(struct tegra_output *output,
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 49ef5729f435..27c979b50111 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -33,7 +34,23 @@ struct tegra_sor {
struct tegra_dpaux *dpaux;
+ struct mutex lock;
bool enabled;
+
+ struct dentry *debugfs;
+};
+
+struct tegra_sor_config {
+ u32 bits_per_pixel;
+
+ u32 active_polarity;
+ u32 active_count;
+ u32 tu_size;
+ u32 active_frac;
+ u32 watermark;
+
+ u32 hblank_symbols;
+ u32 vblank_symbols;
};
static inline struct tegra_sor *
@@ -289,34 +306,232 @@ static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout)
return -ETIMEDOUT;
}
+struct tegra_sor_params {
+ /* number of link clocks per line */
+ unsigned int num_clocks;
+ /* ratio between input and output */
+ u64 ratio;
+ /* precision factor */
+ u64 precision;
+
+ unsigned int active_polarity;
+ unsigned int active_count;
+ unsigned int active_frac;
+ unsigned int tu_size;
+ unsigned int error;
+};
+
+static int tegra_sor_compute_params(struct tegra_sor *sor,
+ struct tegra_sor_params *params,
+ unsigned int tu_size)
+{
+ u64 active_sym, active_count, frac, approx;
+ u32 active_polarity, active_frac = 0;
+ const u64 f = params->precision;
+ s64 error;
+
+ active_sym = params->ratio * tu_size;
+ active_count = div_u64(active_sym, f) * f;
+ frac = active_sym - active_count;
+
+ /* fraction < 0.5 */
+ if (frac >= (f / 2)) {
+ active_polarity = 1;
+ frac = f - frac;
+ } else {
+ active_polarity = 0;
+ }
+
+ if (frac != 0) {
+ frac = div_u64(f * f, frac); /* 1/fraction */
+ if (frac <= (15 * f)) {
+ active_frac = div_u64(frac, f);
+
+ /* round up */
+ if (active_polarity)
+ active_frac++;
+ } else {
+ active_frac = active_polarity ? 1 : 15;
+ }
+ }
+
+ if (active_frac == 1)
+ active_polarity = 0;
+
+ if (active_polarity == 1) {
+ if (active_frac) {
+ approx = active_count + (active_frac * (f - 1)) * f;
+ approx = div_u64(approx, active_frac * f);
+ } else {
+ approx = active_count + f;
+ }
+ } else {
+ if (active_frac)
+ approx = active_count + div_u64(f, active_frac);
+ else
+ approx = active_count;
+ }
+
+ error = div_s64(active_sym - approx, tu_size);
+ error *= params->num_clocks;
+
+ if (error <= 0 && abs64(error) < params->error) {
+ params->active_count = div_u64(active_count, f);
+ params->active_polarity = active_polarity;
+ params->active_frac = active_frac;
+ params->error = abs64(error);
+ params->tu_size = tu_size;
+
+ if (error == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static int tegra_sor_calc_config(struct tegra_sor *sor,
+ struct drm_display_mode *mode,
+ struct tegra_sor_config *config,
+ struct drm_dp_link *link)
+{
+ const u64 f = 100000, link_rate = link->rate * 1000;
+ const u64 pclk = mode->clock * 1000;
+ u64 input, output, watermark, num;
+ struct tegra_sor_params params;
+ u32 num_syms_per_line;
+ unsigned int i;
+
+ if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel)
+ return -EINVAL;
+
+ output = link_rate * 8 * link->num_lanes;
+ input = pclk * config->bits_per_pixel;
+
+ if (input >= output)
+ return -ERANGE;
+
+ memset(&params, 0, sizeof(params));
+ params.ratio = div64_u64(input * f, output);
+ params.num_clocks = div_u64(link_rate * mode->hdisplay, pclk);
+ params.precision = f;
+ params.error = 64 * f;
+ params.tu_size = 64;
+
+ for (i = params.tu_size; i >= 32; i--)
+ if (tegra_sor_compute_params(sor, &params, i))
+ break;
+
+ if (params.active_frac == 0) {
+ config->active_polarity = 0;
+ config->active_count = params.active_count;
+
+ if (!params.active_polarity)
+ config->active_count--;
+
+ config->tu_size = params.tu_size;
+ config->active_frac = 1;
+ } else {
+ config->active_polarity = params.active_polarity;
+ config->active_count = params.active_count;
+ config->active_frac = params.active_frac;
+ config->tu_size = params.tu_size;
+ }
+
+ dev_dbg(sor->dev,
+ "polarity: %d active count: %d tu size: %d active frac: %d\n",
+ config->active_polarity, config->active_count,
+ config->tu_size, config->active_frac);
+
+ watermark = params.ratio * config->tu_size * (f - params.ratio);
+ watermark = div_u64(watermark, f);
+
+ watermark = div_u64(watermark + params.error, f);
+ config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
+ num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
+ (link->num_lanes * 8);
+
+ if (config->watermark > 30) {
+ config->watermark = 30;
+ dev_err(sor->dev,
+ "unable to compute TU size, forcing watermark to %u\n",
+ config->watermark);
+ } else if (config->watermark > num_syms_per_line) {
+ config->watermark = num_syms_per_line;
+ dev_err(sor->dev, "watermark too high, forcing to %u\n",
+ config->watermark);
+ }
+
+ /* compute the number of symbols per horizontal blanking interval */
+ num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
+ config->hblank_symbols = div_u64(num, pclk);
+
+ if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ config->hblank_symbols -= 3;
+
+ config->hblank_symbols -= 12 / link->num_lanes;
+
+ /* compute the number of symbols per vertical blanking interval */
+ num = (mode->hdisplay - 25) * link_rate;
+ config->vblank_symbols = div_u64(num, pclk);
+ config->vblank_symbols -= 36 / link->num_lanes + 4;
+
+ dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
+ config->vblank_symbols);
+
+ return 0;
+}
+
static int tegra_output_sor_enable(struct tegra_output *output)
{
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct drm_display_mode *mode = &dc->base.mode;
unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_config config;
+ struct drm_dp_link link;
+ struct drm_dp_aux *aux;
unsigned long value;
- int err;
+ int err = 0;
+
+ mutex_lock(&sor->lock);
if (sor->enabled)
- return 0;
+ goto unlock;
err = clk_prepare_enable(sor->clk);
if (err < 0)
- return err;
+ goto unlock;
reset_control_deassert(sor->rst);
+ /* FIXME: properly convert to struct drm_dp_aux */
+ aux = (struct drm_dp_aux *)sor->dpaux;
+
if (sor->dpaux) {
err = tegra_dpaux_enable(sor->dpaux);
if (err < 0)
dev_err(sor->dev, "failed to enable DP: %d\n", err);
+
+ err = drm_dp_link_probe(aux, &link);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to probe eDP link: %d\n",
+ err);
+ return err;
+ }
}
err = clk_set_parent(sor->clk, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ memset(&config, 0, sizeof(config));
+ config.bits_per_pixel = 24; /* XXX: don't hardcode? */
+
+ err = tegra_sor_calc_config(sor, mode, &config, &link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to compute link configuration: %d\n",
+ err);
+
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
@@ -385,7 +600,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
if (err < 0) {
dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
- return err;
+ goto unlock;
}
usleep_range(5, 100);
@@ -419,15 +634,29 @@ static int tegra_output_sor_enable(struct tegra_output *output)
if (err < 0)
dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
- /* power dplanes (XXX parameterize based on link?) */
+ /* power DP lanes */
value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
- value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
- SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
+
+ if (link.num_lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2;
+
+ if (link.num_lanes <= 1)
+ value &= ~SOR_DP_PADCTL_PD_TXD_1;
+ else
+ value |= SOR_DP_PADCTL_PD_TXD_1;
+
+ if (link.num_lanes == 0)
+ value &= ~SOR_DP_PADCTL_PD_TXD_0;
+ else
+ value |= SOR_DP_PADCTL_PD_TXD_0;
+
tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(4);
+ value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
/* start lane sequencer */
@@ -443,10 +672,10 @@ static int tegra_output_sor_enable(struct tegra_output *output)
usleep_range(250, 1000);
}
- /* set link bandwidth (2.7 GHz, XXX: parameterize based on link?) */
+ /* set link bandwidth */
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
+ value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
/* set linkctl */
@@ -454,7 +683,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
value |= SOR_DP_LINKCTL_ENABLE;
value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
- value |= SOR_DP_LINKCTL_TU_SIZE(59); /* XXX: don't hardcode? */
+ value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
@@ -470,28 +699,31 @@ static int tegra_output_sor_enable(struct tegra_output *output)
value = tegra_sor_readl(sor, SOR_DP_CONFIG_0);
value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
- value |= SOR_DP_CONFIG_WATERMARK(14); /* XXX: don't hardcode? */
+ value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
- value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(47); /* XXX: don't hardcode? */
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count);
value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
- value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(9); /* XXX: don't hardcode? */
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac);
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY; /* XXX: don't hardcode? */
+ if (config.active_polarity)
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+ else
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
- value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE; /* XXX: don't hardcode? */
+ value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
tegra_sor_writel(sor, value, SOR_DP_CONFIG_0);
value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
- value |= 137; /* XXX: don't hardcode? */
+ value |= config.hblank_symbols & 0xffff;
tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
- value |= 2368; /* XXX: don't hardcode? */
+ value |= config.vblank_symbols & 0xffff;
tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
/* enable pad calibration logic */
@@ -500,30 +732,27 @@ static int tegra_output_sor_enable(struct tegra_output *output)
tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
if (sor->dpaux) {
- /* FIXME: properly convert to struct drm_dp_aux */
- struct drm_dp_aux *aux = (struct drm_dp_aux *)sor->dpaux;
- struct drm_dp_link link;
u8 rate, lanes;
err = drm_dp_link_probe(aux, &link);
if (err < 0) {
dev_err(sor->dev, "failed to probe eDP link: %d\n",
err);
- return err;
+ goto unlock;
}
err = drm_dp_link_power_up(aux, &link);
if (err < 0) {
dev_err(sor->dev, "failed to power up eDP link: %d\n",
err);
- return err;
+ goto unlock;
}
err = drm_dp_link_configure(aux, &link);
if (err < 0) {
dev_err(sor->dev, "failed to configure eDP link: %d\n",
err);
- return err;
+ goto unlock;
}
rate = drm_dp_link_rate_to_bw_code(link.rate);
@@ -558,7 +787,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
if (err < 0) {
dev_err(sor->dev, "DP fast link training failed: %d\n",
err);
- return err;
+ goto unlock;
}
dev_dbg(sor->dev, "fast link training succeeded\n");
@@ -567,7 +796,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
err = tegra_sor_power_up(sor, 250);
if (err < 0) {
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
- return err;
+ goto unlock;
}
/* start display controller in continuous mode */
@@ -586,12 +815,26 @@ static int tegra_output_sor_enable(struct tegra_output *output)
* configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
* raster, associate with display controller)
*/
- value = SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 |
- SOR_STATE_ASY_VSYNCPOL |
+ value = SOR_STATE_ASY_VSYNCPOL |
SOR_STATE_ASY_HSYNCPOL |
SOR_STATE_ASY_PROTOCOL_DP_A |
SOR_STATE_ASY_CRC_MODE_COMPLETE |
SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+ switch (config.bits_per_pixel) {
+ case 24:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+
+ case 18:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
tegra_sor_writel(sor, value, SOR_STATE_1);
/*
@@ -620,11 +863,8 @@ static int tegra_output_sor_enable(struct tegra_output *output)
value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0));
- /* XXX interlaced mode */
- tegra_sor_writel(sor, 0x00000001, SOR_HEAD_STATE_5(0));
-
/* CSTM (LVDS, link A/B, upper) */
- value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_B | SOR_CSTM_LINK_ACT_B |
+ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
SOR_CSTM_UPPER;
tegra_sor_writel(sor, value, SOR_CSTM);
@@ -632,7 +872,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
err = tegra_sor_setup_pwm(sor, 250);
if (err < 0) {
dev_err(sor->dev, "failed to setup PWM: %d\n", err);
- return err;
+ goto unlock;
}
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
@@ -644,18 +884,20 @@ static int tegra_output_sor_enable(struct tegra_output *output)
err = tegra_sor_attach(sor);
if (err < 0) {
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
- return err;
+ goto unlock;
}
err = tegra_sor_wakeup(sor);
if (err < 0) {
dev_err(sor->dev, "failed to enable DC: %d\n", err);
- return err;
+ goto unlock;
}
sor->enabled = true;
- return 0;
+unlock:
+ mutex_unlock(&sor->lock);
+ return err;
}
static int tegra_sor_detach(struct tegra_sor *sor)
@@ -740,7 +982,7 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
/* stop lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
@@ -783,15 +1025,17 @@ static int tegra_output_sor_disable(struct tegra_output *output)
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct tegra_sor *sor = to_sor(output);
unsigned long value;
- int err;
+ int err = 0;
+
+ mutex_lock(&sor->lock);
if (!sor->enabled)
- return 0;
+ goto unlock;
err = tegra_sor_detach(sor);
if (err < 0) {
dev_err(sor->dev, "failed to detach SOR: %d\n", err);
- return err;
+ goto unlock;
}
tegra_sor_writel(sor, 0, SOR_STATE_1);
@@ -832,21 +1076,21 @@ static int tegra_output_sor_disable(struct tegra_output *output)
err = tegra_sor_power_down(sor);
if (err < 0) {
dev_err(sor->dev, "failed to power down SOR: %d\n", err);
- return err;
+ goto unlock;
}
if (sor->dpaux) {
err = tegra_dpaux_disable(sor->dpaux);
if (err < 0) {
dev_err(sor->dev, "failed to disable DP: %d\n", err);
- return err;
+ goto unlock;
}
}
err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
if (err < 0) {
dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
- return err;
+ goto unlock;
}
reset_control_assert(sor->rst);
@@ -854,18 +1098,18 @@ static int tegra_output_sor_disable(struct tegra_output *output)
sor->enabled = false;
- return 0;
+unlock:
+ mutex_unlock(&sor->lock);
+ return err;
}
static int tegra_output_sor_setup_clock(struct tegra_output *output,
- struct clk *clk, unsigned long pclk)
+ struct clk *clk, unsigned long pclk,
+ unsigned int *div)
{
struct tegra_sor *sor = to_sor(output);
int err;
- /* round to next MHz */
- pclk = DIV_ROUND_UP(pclk / 2, 1000000) * 1000000;
-
err = clk_set_parent(clk, sor->clk_parent);
if (err < 0) {
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
@@ -874,11 +1118,12 @@ static int tegra_output_sor_setup_clock(struct tegra_output *output,
err = clk_set_rate(sor->clk_parent, pclk);
if (err < 0) {
- dev_err(sor->dev, "failed to set base clock rate to %lu Hz\n",
- pclk * 2);
+ dev_err(sor->dev, "failed to set clock rate to %lu Hz\n", pclk);
return err;
}
+ *div = 0;
+
return 0;
}
@@ -914,9 +1159,124 @@ static const struct tegra_output_ops sor_ops = {
.detect = tegra_output_sor_detect,
};
+static int tegra_sor_crc_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static int tegra_sor_crc_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
+{
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_CRC_A);
+ if (value & SOR_CRC_A_VALID)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
+ size_t size, loff_t *ppos)
+{
+ struct tegra_sor *sor = file->private_data;
+ ssize_t num, err;
+ char buf[10];
+ u32 value;
+
+ mutex_lock(&sor->lock);
+
+ if (!sor->enabled) {
+ err = -EAGAIN;
+ goto unlock;
+ }
+
+ value = tegra_sor_readl(sor, SOR_STATE_1);
+ value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+ tegra_sor_writel(sor, value, SOR_STATE_1);
+
+ value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
+ value |= SOR_CRC_CNTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_CRC_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_TEST);
+ value &= ~SOR_TEST_CRC_POST_SERIALIZE;
+ tegra_sor_writel(sor, value, SOR_TEST);
+
+ err = tegra_sor_crc_wait(sor, 100);
+ if (err < 0)
+ goto unlock;
+
+ tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A);
+ value = tegra_sor_readl(sor, SOR_CRC_B);
+
+ num = scnprintf(buf, sizeof(buf), "%08x\n", value);
+
+ err = simple_read_from_buffer(buffer, size, ppos, buf, num);
+
+unlock:
+ mutex_unlock(&sor->lock);
+ return err;
+}
+
+static const struct file_operations tegra_sor_crc_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_sor_crc_open,
+ .read = tegra_sor_crc_read,
+ .release = tegra_sor_crc_release,
+};
+
+static int tegra_sor_debugfs_init(struct tegra_sor *sor,
+ struct drm_minor *minor)
+{
+ struct dentry *entry;
+ int err = 0;
+
+ sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
+ if (!sor->debugfs)
+ return -ENOMEM;
+
+ entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
+ &tegra_sor_crc_fops);
+ if (!entry) {
+ dev_err(sor->dev,
+ "cannot create /sys/kernel/debug/dri/%s/sor/crc\n",
+ minor->debugfs_root->d_name.name);
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ return err;
+
+remove:
+ debugfs_remove(sor->debugfs);
+ sor->debugfs = NULL;
+ return err;
+}
+
+static int tegra_sor_debugfs_exit(struct tegra_sor *sor)
+{
+ debugfs_remove_recursive(sor->debugfs);
+ sor->debugfs = NULL;
+
+ return 0;
+}
+
static int tegra_sor_init(struct host1x_client *client)
{
- struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
@@ -928,12 +1288,18 @@ static int tegra_sor_init(struct host1x_client *client)
sor->output.dev = sor->dev;
sor->output.ops = &sor_ops;
- err = tegra_output_init(tegra->drm, &sor->output);
+ err = tegra_output_init(drm, &sor->output);
if (err < 0) {
dev_err(sor->dev, "output setup failed: %d\n", err);
return err;
}
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_sor_debugfs_init(sor, drm->primary);
+ if (err < 0)
+ dev_err(sor->dev, "debugfs setup failed: %d\n", err);
+ }
+
if (sor->dpaux) {
err = tegra_dpaux_attach(sor->dpaux, &sor->output);
if (err < 0) {
@@ -964,6 +1330,12 @@ static int tegra_sor_exit(struct host1x_client *client)
}
}
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_sor_debugfs_exit(sor);
+ if (err < 0)
+ dev_err(sor->dev, "debugfs cleanup failed: %d\n", err);
+ }
+
err = tegra_output_exit(&sor->output);
if (err < 0) {
dev_err(sor->dev, "output cleanup failed: %d\n", err);
@@ -1045,6 +1417,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
+ mutex_init(&sor->lock);
+
err = host1x_client_register(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index f4156d54cd05..a5f8853fedb5 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -47,6 +47,7 @@
#define SOR_HEAD_STATE_4(x) (0x0d + (x))
#define SOR_HEAD_STATE_5(x) (0x0f + (x))
#define SOR_CRC_CNTRL 0x11
+#define SOR_CRC_CNTRL_ENABLE (1 << 0)
#define SOR_DP_DEBUG_MVID 0x12
#define SOR_CLK_CNTRL 0x13
@@ -69,6 +70,7 @@
#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
#define SOR_TEST 0x16
+#define SOR_TEST_CRC_POST_SERIALIZE (1 << 23)
#define SOR_TEST_ATTACHED (1 << 10)
#define SOR_TEST_HEAD_MODE_MASK (3 << 8)
#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8)
@@ -115,6 +117,8 @@
#define SOR_LVDS 0x1c
#define SOR_CRC_A 0x1d
+#define SOR_CRC_A_VALID (1 << 0)
+#define SOR_CRC_A_RESET (1 << 0)
#define SOR_CRC_B 0x1e
#define SOR_BLANK 0x1f
#define SOR_SEQ_CTL 0x20
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 171a8203892c..b20b69488dc9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -268,7 +268,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
}
pm_runtime_get_sync(dev->dev);
- ret = drm_irq_install(dev);
+ ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
pm_runtime_put_sync(dev->dev);
if (ret < 0) {
dev_err(dev->dev, "failed to install IRQ handler\n");
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index afdf383f630a..7094b92d1ec7 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -294,6 +294,7 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = udl;
if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) {
+ ret = -ENODEV;
DRM_ERROR("firmware not recognized. Assume incompatible device\n");
goto err;
}
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index a18479c6b6da..6fc0648dd37f 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -737,4 +737,4 @@ const struct drm_ioctl_desc via_ioctls[] = {
DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
};
-int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
+int via_max_ioctl = ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 927889105483..d70b1e1544bf 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context)
/* Linux specific until context tracking code gets ported to BSD */
/* Last context, perform cleanup */
- if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
+ if (list_is_singular(&dev->ctxlist)) {
DRM_DEBUG("Last Context\n");
drm_irq_uninstall(dev);
via_cleanup_futex(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index b71bcd0bfbbf..67720f70fe29 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,11 +1,14 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && FB
+ depends on DRM && PCI
select FB_DEFERRED_IO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select DRM_TTM
+ # Only needed for the transitional use of drm_crtc_init - can be removed
+ # again once vmwgfx sets up the primary plane itself.
+ select DRM_KMS_HELPER
help
Choose this option if you would like to run 3D acceleration
in a VMware virtual machine.
@@ -14,7 +17,7 @@ config DRM_VMWGFX
The compiled module will be called "vmwgfx.ko".
config DRM_VMWGFX_FBCON
- depends on DRM_VMWGFX
+ depends on DRM_VMWGFX && FB
bool "Enable framebuffer console under vmwgfx by default"
help
Choose this option if you are shipping a new vmwgfx
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4a223bbea3b3..246a62bab378 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -806,7 +806,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = drm_irq_install(dev);
+ ret = drm_irq_install(dev, dev->pdev->irq);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
@@ -1417,7 +1417,7 @@ static struct drm_driver driver = {
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls,
- .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
+ .num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_create = vmw_master_create,
.master_destroy = vmw_master_destroy,
.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a89ad938eacf..b031b48dbb3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
- vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index a2dde5ad8138..8f3edc4710f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -187,7 +187,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
- mutex_unlock(&crtc->mutex);
+ drm_modeset_unlock(&crtc->mutex);
drm_modeset_lock_all(dev_priv->dev);
/* A lot of the code assumes this */
@@ -252,7 +252,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
ret = 0;
out:
drm_modeset_unlock_all(dev_priv->dev);
- mutex_lock(&crtc->mutex);
+ drm_modeset_lock(&crtc->mutex, NULL);
return ret;
}
@@ -273,7 +273,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
- mutex_unlock(&crtc->mutex);
+ drm_modeset_unlock(&crtc->mutex);
drm_modeset_lock_all(dev_priv->dev);
vmw_cursor_update_position(dev_priv, shown,
@@ -281,7 +281,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
du->cursor_y + du->hotspot_y);
drm_modeset_unlock_all(dev_priv->dev);
- mutex_lock(&crtc->mutex);
+ drm_modeset_lock(&crtc->mutex, NULL);
return 0;
}
@@ -2001,7 +2001,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
if (du->pref_mode)
list_move(&du->pref_mode->head, &connector->probed_modes);
- drm_mode_connector_list_update(connector);
+ drm_mode_connector_list_update(connector, true);
return 1;
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index ccdd2e6da5e3..aaf54859adb0 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -216,8 +216,8 @@ int host1x_device_exit(struct host1x_device *device)
}
EXPORT_SYMBOL(host1x_device_exit);
-static int host1x_register_client(struct host1x *host1x,
- struct host1x_client *client)
+static int host1x_add_client(struct host1x *host1x,
+ struct host1x_client *client)
{
struct host1x_device *device;
struct host1x_subdev *subdev;
@@ -238,8 +238,8 @@ static int host1x_register_client(struct host1x *host1x,
return -ENODEV;
}
-static int host1x_unregister_client(struct host1x *host1x,
- struct host1x_client *client)
+static int host1x_del_client(struct host1x *host1x,
+ struct host1x_client *client)
{
struct host1x_device *device, *dt;
struct host1x_subdev *subdev;
@@ -503,7 +503,7 @@ int host1x_client_register(struct host1x_client *client)
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {
- err = host1x_register_client(host1x, client);
+ err = host1x_add_client(host1x, client);
if (!err) {
mutex_unlock(&devices_lock);
return 0;
@@ -529,7 +529,7 @@ int host1x_client_unregister(struct host1x_client *client)
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {
- err = host1x_unregister_client(host1x, client);
+ err = host1x_del_client(host1x, client);
if (!err) {
mutex_unlock(&devices_lock);
return 0;
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
new file mode 100644
index 000000000000..2f228a2f2a48
--- /dev/null
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -0,0 +1,7 @@
+config IMX_IPUV3_CORE
+ tristate "IPUv3 core support"
+ depends on SOC_IMX5 || SOC_IMX6Q || SOC_IMX6SL || ARCH_MULTIPLATFORM
+ depends on RESET_CONTROLLER
+ help
+ Choose this if you have a i.MX5/6 system and want to use the Image
+ Processing Unit. This option only enables IPU base support.
diff --git a/drivers/staging/imx-drm/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 28ed72e98a96..1887972b4ac2 100644
--- a/drivers/staging/imx-drm/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,3 +1,3 @@
-obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += imx-ipu-v3.o
+obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
-imx-ipu-v3-objs := ipu-common.o ipu-dc.o ipu-di.o ipu-dp.o ipu-dmfc.o
+imx-ipu-v3-objs := ipu-common.o ipu-dc.o ipu-di.o ipu-dp.o ipu-dmfc.o ipu-smfc.o
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index a1f7b2001c8a..04e7b2eafbdd 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -31,7 +31,7 @@
#include <drm/drm_fourcc.h>
-#include "imx-ipu-v3.h"
+#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
@@ -661,6 +661,39 @@ int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
}
EXPORT_SYMBOL_GPL(ipu_module_disable);
+int ipu_csi_enable(struct ipu_soc *ipu, int csi)
+{
+ return ipu_module_enable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_enable);
+
+int ipu_csi_disable(struct ipu_soc *ipu, int csi)
+{
+ return ipu_module_disable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_disable);
+
+int ipu_smfc_enable(struct ipu_soc *ipu)
+{
+ return ipu_module_enable(ipu, IPU_CONF_SMFC_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_enable);
+
+int ipu_smfc_disable(struct ipu_soc *ipu)
+{
+ return ipu_module_disable(ipu, IPU_CONF_SMFC_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_disable);
+
+int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned int chno = channel->num;
+
+ return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
+
void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
{
struct ipu_soc *ipu = channel->ipu;
@@ -896,8 +929,17 @@ static int ipu_submodules_init(struct ipu_soc *ipu,
goto err_dp;
}
+ ret = ipu_smfc_init(ipu, dev, ipu_base +
+ devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
+ if (ret) {
+ unit = "smfc";
+ goto err_smfc;
+ }
+
return 0;
+err_smfc:
+ ipu_dp_exit(ipu);
err_dp:
ipu_dmfc_exit(ipu);
err_dmfc:
@@ -977,6 +1019,7 @@ EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
static void ipu_submodules_exit(struct ipu_soc *ipu)
{
+ ipu_smfc_exit(ipu);
ipu_dp_exit(ipu);
ipu_dmfc_exit(ipu);
ipu_dc_exit(ipu);
@@ -1001,6 +1044,7 @@ static void platform_device_unregister_children(struct platform_device *pdev)
struct ipu_platform_reg {
struct ipu_client_platformdata pdata;
const char *name;
+ int reg_offset;
};
static const struct ipu_platform_reg client_reg[] = {
@@ -1022,13 +1066,29 @@ static const struct ipu_platform_reg client_reg[] = {
.dma[1] = -EINVAL,
},
.name = "imx-ipuv3-crtc",
+ }, {
+ .pdata = {
+ .csi = 0,
+ .dma[0] = IPUV3_CHANNEL_CSI0,
+ .dma[1] = -EINVAL,
+ },
+ .reg_offset = IPU_CM_CSI0_REG_OFS,
+ .name = "imx-ipuv3-camera",
+ }, {
+ .pdata = {
+ .csi = 1,
+ .dma[0] = IPUV3_CHANNEL_CSI1,
+ .dma[1] = -EINVAL,
+ },
+ .reg_offset = IPU_CM_CSI1_REG_OFS,
+ .name = "imx-ipuv3-camera",
},
};
static DEFINE_MUTEX(ipu_client_id_mutex);
static int ipu_client_id;
-static int ipu_add_client_devices(struct ipu_soc *ipu)
+static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
{
struct device *dev = ipu->dev;
unsigned i;
@@ -1042,9 +1102,19 @@ static int ipu_add_client_devices(struct ipu_soc *ipu)
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev;
-
- pdev = platform_device_register_data(dev, reg->name,
- id++, &reg->pdata, sizeof(reg->pdata));
+ struct resource res;
+
+ if (reg->reg_offset) {
+ memset(&res, 0, sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
+ res.end = res.start + PAGE_SIZE - 1;
+ pdev = platform_device_register_resndata(dev, reg->name,
+ id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
+ } else {
+ pdev = platform_device_register_data(dev, reg->name,
+ id++, &reg->pdata, sizeof(reg->pdata));
+ }
if (IS_ERR(pdev))
goto err_register;
@@ -1241,7 +1311,7 @@ static int ipu_probe(struct platform_device *pdev)
if (ret)
goto failed_submodules_init;
- ret = ipu_add_client_devices(ipu);
+ ret = ipu_add_client_devices(ipu, ipu_base);
if (ret) {
dev_err(&pdev->dev, "adding client devices failed with %d\n",
ret);
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 784a4a13eac3..2326c752d89b 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -21,8 +21,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include "../imx-drm.h"
-#include "imx-ipu-v3.h"
+#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
#define DC_MAP_CONF_PTR(n) (0x108 + ((n) & ~0x1) * 2)
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 849b3e120ef0..c490ba4384fc 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -20,7 +20,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
-#include "imx-ipu-v3.h"
+#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
struct ipu_di {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 59f182b28fc1..042c3958e2a0 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -17,7 +17,7 @@
#include <linux/errno.h>
#include <linux/io.h>
-#include "imx-ipu-v3.h"
+#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
#define DMFC_RD_CHAN 0x0000
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index d90f82a87d19..98686edbcdbb 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -19,7 +19,7 @@
#include <linux/io.h>
#include <linux/err.h>
-#include "imx-ipu-v3.h"
+#include <video/imx-ipu-v3.h>
#include "ipu-prv.h"
#define DP_SYNC 0
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index bfc1b3366488..c93f50ec04f7 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -22,7 +22,7 @@ struct ipu_soc;
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include "imx-ipu-v3.h"
+#include <video/imx-ipu-v3.h>
#define IPUV3_CHANNEL_CSI0 0
#define IPUV3_CHANNEL_CSI1 1
@@ -151,6 +151,8 @@ struct ipuv3_channel {
struct ipu_dc_priv;
struct ipu_dmfc_priv;
struct ipu_di;
+struct ipu_smfc_priv;
+
struct ipu_devtype;
struct ipu_soc {
@@ -178,6 +180,7 @@ struct ipu_soc {
struct ipu_dp_priv *dp_priv;
struct ipu_dmfc_priv *dmfc_priv;
struct ipu_di *di_priv[2];
+ struct ipu_smfc_priv *smfc_priv;
};
void ipu_srm_dp_sync_update(struct ipu_soc *ipu);
@@ -206,4 +209,7 @@ void ipu_dc_exit(struct ipu_soc *ipu);
int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
void ipu_cpmem_exit(struct ipu_soc *ipu);
+int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+void ipu_smfc_exit(struct ipu_soc *ipu);
+
#endif /* __IPU_PRV_H__ */
diff --git a/drivers/gpu/ipu-v3/ipu-smfc.c b/drivers/gpu/ipu-v3/ipu-smfc.c
new file mode 100644
index 000000000000..e4f85ad286fc
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-smfc.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#define DEBUG
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <video/imx-ipu-v3.h>
+
+#include "ipu-prv.h"
+
+struct ipu_smfc_priv {
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+/*SMFC Registers */
+#define SMFC_MAP 0x0000
+#define SMFC_WMC 0x0004
+#define SMFC_BS 0x0008
+
+int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize)
+{
+ struct ipu_smfc_priv *smfc = ipu->smfc_priv;
+ unsigned long flags;
+ u32 val, shift;
+
+ spin_lock_irqsave(&smfc->lock, flags);
+
+ shift = channel * 4;
+ val = readl(smfc->base + SMFC_BS);
+ val &= ~(0xf << shift);
+ val |= burstsize << shift;
+ writel(val, smfc->base + SMFC_BS);
+
+ spin_unlock_irqrestore(&smfc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize);
+
+int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id)
+{
+ struct ipu_smfc_priv *smfc = ipu->smfc_priv;
+ unsigned long flags;
+ u32 val, shift;
+
+ spin_lock_irqsave(&smfc->lock, flags);
+
+ shift = channel * 3;
+ val = readl(smfc->base + SMFC_MAP);
+ val &= ~(0x7 << shift);
+ val |= ((csi_id << 2) | mipi_id) << shift;
+ writel(val, smfc->base + SMFC_MAP);
+
+ spin_unlock_irqrestore(&smfc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_map_channel);
+
+int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base)
+{
+ struct ipu_smfc_priv *smfc;
+
+ smfc = devm_kzalloc(dev, sizeof(*smfc), GFP_KERNEL);
+ if (!smfc)
+ return -ENOMEM;
+
+ ipu->smfc_priv = smfc;
+ spin_lock_init(&smfc->lock);
+
+ smfc->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!smfc->base)
+ return -ENOMEM;
+
+ pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, smfc->base);
+
+ return 0;
+}
+
+void ipu_smfc_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index ec0ae2d1686a..6866448083b2 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -623,7 +623,8 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
ret = dev->bus->pm->runtime_suspend(dev);
if (ret)
return ret;
-
+ if (vgasr_priv.handler->switchto)
+ vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
return 0;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 800c8b60f7a2..5e79c6ad914f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -810,7 +810,7 @@ config HID_ZYDACRON
config HID_SENSOR_HUB
tristate "HID Sensors framework support"
- depends on HID
+ depends on HID && HAS_IOMEM
select MFD_CORE
default n
---help---
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6d00bb9366fa..48b66bbffc94 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -323,6 +323,7 @@
#define USB_VENDOR_ID_ETURBOTOUCH 0x22b9
#define USB_DEVICE_ID_ETURBOTOUCH 0x0006
+#define USB_DEVICE_ID_ETURBOTOUCH_2968 0x2968
#define USB_VENDOR_ID_EZKEY 0x0518
#define USB_DEVICE_ID_BTC_8193 0x0002
@@ -715,6 +716,8 @@
#define USB_VENDOR_ID_PENMOUNT 0x14e1
#define USB_DEVICE_ID_PENMOUNT_PCI 0x3500
+#define USB_DEVICE_ID_PENMOUNT_1610 0x1610
+#define USB_DEVICE_ID_PENMOUNT_1640 0x1640
#define USB_VENDOR_ID_PETALYNX 0x18b1
#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 24883b4d1a49..cc2bd2022198 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -52,7 +52,7 @@ static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range);
static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf);
static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
-static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IRWXO, lg4ff_range_show, lg4ff_range_store);
+static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IROTH, lg4ff_range_show, lg4ff_range_store);
struct lg4ff_device_entry {
__u32 product_id;
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index c930ab8554ea..7f965e231433 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -501,7 +501,7 @@ static ssize_t picolcd_fb_update_rate_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show,
+static DEVICE_ATTR(fb_update_rate, 0664, picolcd_fb_update_rate_show,
picolcd_fb_update_rate_store);
/* initialize Framebuffer device */
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 2451c7e5febd..578bbe65902b 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -428,6 +428,7 @@ static int rmi_raw_event(struct hid_device *hdev,
return 0;
}
+#ifdef CONFIG_PM
static int rmi_post_reset(struct hid_device *hdev)
{
return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
@@ -437,6 +438,7 @@ static int rmi_post_resume(struct hid_device *hdev)
{
return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
}
+#endif /* CONFIG_PM */
#define RMI4_MAX_PAGE 0xff
#define RMI4_PAGE_SIZE 0x0100
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index a8d5c8faf8cf..e244e449cbba 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -159,17 +159,18 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
{
struct hid_sensor_hub_callbacks_list *callback;
struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
+ unsigned long flags;
- spin_lock(&pdata->dyn_callback_lock);
+ spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
list_for_each_entry(callback, &pdata->dyn_callback_list, list)
if (callback->usage_id == usage_id &&
callback->hsdev == hsdev) {
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return -EINVAL;
}
callback = kzalloc(sizeof(*callback), GFP_ATOMIC);
if (!callback) {
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return -ENOMEM;
}
callback->hsdev = hsdev;
@@ -177,7 +178,7 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
callback->usage_id = usage_id;
callback->priv = NULL;
list_add_tail(&callback->list, &pdata->dyn_callback_list);
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return 0;
}
@@ -188,8 +189,9 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
{
struct hid_sensor_hub_callbacks_list *callback;
struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
+ unsigned long flags;
- spin_lock(&pdata->dyn_callback_lock);
+ spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
list_for_each_entry(callback, &pdata->dyn_callback_list, list)
if (callback->usage_id == usage_id &&
callback->hsdev == hsdev) {
@@ -197,7 +199,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
kfree(callback);
break;
}
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return 0;
}
@@ -378,15 +380,16 @@ static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message)
{
struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
struct hid_sensor_hub_callbacks_list *callback;
+ unsigned long flags;
hid_dbg(hdev, " sensor_hub_suspend\n");
- spin_lock(&pdata->dyn_callback_lock);
+ spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
if (callback->usage_callback->suspend)
callback->usage_callback->suspend(
callback->hsdev, callback->priv);
}
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return 0;
}
@@ -395,15 +398,16 @@ static int sensor_hub_resume(struct hid_device *hdev)
{
struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
struct hid_sensor_hub_callbacks_list *callback;
+ unsigned long flags;
hid_dbg(hdev, " sensor_hub_resume\n");
- spin_lock(&pdata->dyn_callback_lock);
+ spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
if (callback->usage_callback->resume)
callback->usage_callback->resume(
callback->hsdev, callback->priv);
}
- spin_unlock(&pdata->dyn_callback_lock);
+ spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
return 0;
}
@@ -632,6 +636,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
if (name == NULL) {
hid_err(hdev, "Failed MFD device name\n");
ret = -ENOMEM;
+ kfree(hsdev);
goto err_no_mem;
}
sd->hid_sensor_hub_client_devs[
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 59badc10a08c..31e6727cd009 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -49,6 +49,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
@@ -76,6 +77,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig
index 71b9f9ab86e4..bc60dec3f586 100644
--- a/drivers/hsi/clients/Kconfig
+++ b/drivers/hsi/clients/Kconfig
@@ -15,7 +15,7 @@ config NOKIA_MODEM
config SSI_PROTOCOL
tristate "SSI protocol"
- depends on HSI && PHONET && (OMAP_SSI=y || OMAP_SSI=m)
+ depends on HSI && PHONET && OMAP_SSI
help
If you say Y here, you will enable the SSI protocol aka McSAAB.
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index b8693f0b27fe..29aea0b93360 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -1116,8 +1116,7 @@ static int __init ssi_port_probe(struct platform_device *pd)
dev_dbg(&pd->dev, "init ssi port...\n");
- err = ref_module(THIS_MODULE, ssi->owner);
- if (err) {
+ if (!try_module_get(ssi->owner)) {
dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n",
err);
return -ENODEV;
@@ -1254,6 +1253,7 @@ static int __exit ssi_port_remove(struct platform_device *pd)
omap_ssi->port[omap_port->port_id] = NULL;
platform_set_drvdata(pd, NULL);
+ module_put(ssi->owner);
pm_runtime_disable(&pd->dev);
return 0;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 6c8b032cacba..ed9350d42764 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -404,7 +404,7 @@ static u32 next_vp;
* performance critical channels (IDE, SCSI and Network) will be uniformly
* distributed across all available CPUs.
*/
-static void init_vp_index(struct vmbus_channel *channel, uuid_le *type_guid)
+static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
{
u32 cur_cpu;
int i;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index e84f4526eb36..ae22e3c1fc4c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -339,9 +339,13 @@ static void process_chn_event(u32 relid)
*/
do {
- hv_begin_read(&channel->inbound);
+ if (read_state)
+ hv_begin_read(&channel->inbound);
channel->onchannel_callback(arg);
- bytes_to_read = hv_end_read(&channel->inbound);
+ if (read_state)
+ bytes_to_read = hv_end_read(&channel->inbound);
+ else
+ bytes_to_read = 0;
} while (read_state && (bytes_to_read != 0));
} else {
pr_err("no channel callback for relid - %u\n", relid);
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index ea852537307e..521c14625b3a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -127,6 +127,17 @@ kvp_work_func(struct work_struct *dummy)
kvp_respond_to_host(NULL, HV_E_FAIL);
}
+static void poll_channel(struct vmbus_channel *channel)
+{
+ if (channel->target_cpu != smp_processor_id())
+ smp_call_function_single(channel->target_cpu,
+ hv_kvp_onchannelcallback,
+ channel, true);
+ else
+ hv_kvp_onchannelcallback(channel);
+}
+
+
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
{
int ret = 1;
@@ -155,7 +166,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
kvp_register(dm_reg_value);
kvp_transaction.active = false;
if (kvp_transaction.kvp_context)
- hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
+ poll_channel(kvp_transaction.kvp_context);
}
return ret;
}
@@ -568,7 +579,7 @@ response_done:
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
-
+ poll_channel(channel);
}
/*
@@ -603,7 +614,7 @@ void hv_kvp_onchannelcallback(void *context)
return;
}
- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
if (recvlen > 0) {
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index dd761806f0e8..3b9c9ef0deb8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -319,7 +319,7 @@ static int util_probe(struct hv_device *dev,
(struct hv_util_service *)dev_id->driver_data;
int ret;
- srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
+ srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
if (srv->util_init) {
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 18d1a8404cbc..22b750749a39 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -649,9 +649,9 @@ extern struct vmbus_connection vmbus_connection;
/* General vmbus interface */
-struct hv_device *vmbus_device_create(uuid_le *type,
- uuid_le *instance,
- struct vmbus_channel *channel);
+struct hv_device *vmbus_device_create(const uuid_le *type,
+ const uuid_le *instance,
+ struct vmbus_channel *channel);
int vmbus_device_register(struct hv_device *child_device_obj);
void vmbus_device_unregister(struct hv_device *device_obj);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8e53a3c2607e..4d6b26979fbd 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -435,7 +435,7 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
return ret;
}
-static uuid_le null_guid;
+static const uuid_le null_guid;
static inline bool is_null_guid(const __u8 *guid)
{
@@ -450,7 +450,7 @@ static inline bool is_null_guid(const __u8 *guid)
*/
static const struct hv_vmbus_device_id *hv_vmbus_get_id(
const struct hv_vmbus_device_id *id,
- __u8 *guid)
+ const __u8 *guid)
{
for (; !is_null_guid(id->guid); id++)
if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
@@ -779,9 +779,9 @@ EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
*/
-struct hv_device *vmbus_device_create(uuid_le *type,
- uuid_le *instance,
- struct vmbus_channel *channel)
+struct hv_device *vmbus_device_create(const uuid_le *type,
+ const uuid_le *instance,
+ struct vmbus_channel *channel)
{
struct hv_device *child_device_obj;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 00343166feb1..02d3d85829f3 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1052,7 +1052,7 @@ config SENSORS_PC87427
will be called pc87427.
config SENSORS_NTC_THERMISTOR
- tristate "NTC thermistor support"
+ tristate "NTC thermistor support from Murata"
depends on !OF || IIO=n || IIO
help
This driver supports NTC thermistors sensor reading and its
@@ -1060,7 +1060,8 @@ config SENSORS_NTC_THERMISTOR
send notifications about the temperature.
Currently, this driver supports
- NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333.
+ NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333
+ from Murata.
This driver can also be built as a module. If so, the module
will be called ntc-thermistor.
@@ -1124,6 +1125,16 @@ config SENSORS_SHT21
This driver can also be built as a module. If so, the module
will be called sht21.
+config SENSORS_SHTC1
+ tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
+ depends on I2C
+ help
+ If you say yes here you get support for the Sensiron SHTC1 and SHTW1
+ humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called shtc1.
+
config SENSORS_S3C
tristate "Samsung built-in ADC"
depends on S3C_ADC
@@ -1166,6 +1177,7 @@ config SENSORS_DME1737
config SENSORS_EMC1403
tristate "SMSC EMC1403/23 thermal sensor"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the SMSC EMC1403/23
temperature monitoring chip.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 11798ad7e801..3dc0f02f71d2 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -126,6 +126,7 @@ obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o
obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SHT21) += sht21.o
+obj-$(CONFIG_SENSORS_SHTC1) += shtc1.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMM665) += smm665.o
obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 5ffd81f19d01..0625e50d7a6e 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -239,50 +239,50 @@ static ssize_t adc128_show_alarm(struct device *dev,
return sprintf(buf, "%u\n", !!(alarms & mask));
}
-static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 0, 0);
+static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO,
+ adc128_show_in, NULL, 0, 0);
static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 0, 1);
static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 0, 2);
-static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 1, 0);
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO,
+ adc128_show_in, NULL, 1, 0);
static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 1, 1);
static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 1, 2);
-static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 2, 0);
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO,
+ adc128_show_in, NULL, 2, 0);
static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 2, 1);
static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 2, 2);
-static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 3, 0);
+static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO,
+ adc128_show_in, NULL, 3, 0);
static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 3, 1);
static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 3, 2);
-static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 4, 0);
+static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO,
+ adc128_show_in, NULL, 4, 0);
static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 4, 1);
static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 4, 2);
-static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 5, 0);
+static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO,
+ adc128_show_in, NULL, 5, 0);
static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 5, 1);
static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 5, 2);
-static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO,
- adc128_show_in, adc128_set_in, 6, 0);
+static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO,
+ adc128_show_in, NULL, 6, 0);
static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 6, 1);
static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 3eb4281689b5..d74241bb278c 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev,
struct adm1021_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long temp;
- int err;
+ int reg_val, err;
err = kstrtol(buf, 10, &temp);
if (err)
@@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev,
temp /= 1000;
mutex_lock(&data->update_lock);
- data->temp_max[index] = clamp_val(temp, -128, 127);
+ reg_val = clamp_val(temp, -128, 127);
+ data->temp_max[index] = reg_val * 1000;
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
- data->temp_max[index]);
+ reg_val);
mutex_unlock(&data->update_lock);
return count;
@@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev,
struct adm1021_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long temp;
- int err;
+ int reg_val, err;
err = kstrtol(buf, 10, &temp);
if (err)
@@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev,
temp /= 1000;
mutex_lock(&data->update_lock);
- data->temp_min[index] = clamp_val(temp, -128, 127);
+ reg_val = clamp_val(temp, -128, 127);
+ data->temp_min[index] = reg_val * 1000;
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
- data->temp_min[index]);
+ reg_val);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 78339e880bd6..2804571b269e 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev,
/* Update the value */
reg = (reg & 0x3F) | (val << 6);
+ /* Update the cache */
+ data->fan_div[attr->index] = reg;
+
/* Write value */
i2c_smbus_write_byte_data(client,
ADM1029_REG_FAN_DIV[attr->index], reg);
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index a8a540ca8c34..51c1a5a165ab 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ val = clamp_val(val, 0, 127000);
mutex_lock(&data->update_lock);
data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
@@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ val = clamp_val(val, 0, 127000);
mutex_lock(&data->update_lock);
data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
data->pwm[nr]);
@@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, 127000);
mutex_lock(&data->update_lock);
data->temp_min[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, 127000);
mutex_lock(&data->update_lock);
data->temp_max[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, 127000);
mutex_lock(&data->update_lock);
data->temp_crit[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index eea817296513..9f2be3dd28f3 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
get_temp_alarm, NULL, IDX_TEMP1_MAX);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
get_temp_alarm, NULL, IDX_TEMP1_CRIT);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
get_temp, NULL, IDX_TEMP2_INPUT);
static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
set_temp, IDX_TEMP2_MIN);
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 6edce42c61d5..2ae8a304b5ef 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -45,30 +45,6 @@ MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
-static int atxp1_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int atxp1_remove(struct i2c_client *client);
-static struct atxp1_data *atxp1_update_device(struct device *dev);
-static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info);
-
-static const struct i2c_device_id atxp1_id[] = {
- { "atxp1", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, atxp1_id);
-
-static struct i2c_driver atxp1_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "atxp1",
- },
- .probe = atxp1_probe,
- .remove = atxp1_remove,
- .id_table = atxp1_id,
- .detect = atxp1_detect,
- .address_list = normal_i2c,
-};
-
struct atxp1_data {
struct device *hwmon_dev;
struct mutex update_lock;
@@ -386,4 +362,22 @@ static int atxp1_remove(struct i2c_client *client)
return 0;
};
+static const struct i2c_device_id atxp1_id[] = {
+ { "atxp1", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, atxp1_id);
+
+static struct i2c_driver atxp1_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "atxp1",
+ },
+ .probe = atxp1_probe,
+ .remove = atxp1_remove,
+ .id_table = atxp1_id,
+ .detect = atxp1_detect,
+ .address_list = normal_i2c,
+};
+
module_i2c_driver(atxp1_driver);
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index fd892dd48e4c..78002de46cb6 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = DIV_ROUND_CLOSEST(val, 1000);
- if ((val < -63) || (val > 127))
- return -EINVAL;
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
mutex_lock(&data->update_lock);
data->temp_min[nr] = val;
@@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = DIV_ROUND_CLOSEST(val, 1000);
- if ((val < -63) || (val > 127))
- return -EINVAL;
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
mutex_lock(&data->update_lock);
data->temp_max[nr] = val;
@@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
{
struct emc2103_data *data = emc2103_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
- long rpm_target;
+ unsigned long rpm_target;
- int result = kstrtol(buf, 10, &rpm_target);
+ int result = kstrtoul(buf, 10, &rpm_target);
if (result < 0)
return result;
/* Datasheet states 16384 as maximum RPM target (table 3.2) */
- if ((rpm_target < 0) || (rpm_target > 16384))
- return -EINVAL;
+ rpm_target = clamp_val(rpm_target, 0, 16384);
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index ba35e4d530b5..2566c43dd1e9 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -538,7 +538,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
/* Make this driver part of hwmon class. */
fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
- "gpio-fan", fan_data,
+ "gpio_fan", fan_data,
gpio_fan_groups);
if (IS_ERR(fan_data->hwmon_dev))
return PTR_ERR(fan_data->hwmon_dev);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 93d26e8af3e2..bfd3f3eeabcd 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -148,7 +148,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
switch (reg) {
case INA2XX_SHUNT_VOLTAGE:
- val = DIV_ROUND_CLOSEST(data->regs[reg],
+ /* signed register */
+ val = DIV_ROUND_CLOSEST((s16)data->regs[reg],
data->config->shunt_div);
break;
case INA2XX_BUS_VOLTAGE:
@@ -160,8 +161,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
val = data->regs[reg] * data->config->power_lsb;
break;
case INA2XX_CURRENT:
- /* LSB=1mA (selected). Is in mA */
- val = data->regs[reg];
+ /* signed register, LSB=1mA (selected), in mA */
+ val = (s16)data->regs[reg];
break;
default:
/* programmer goofed */
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index bed4af358308..b0129a54e1a6 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -5,7 +5,7 @@
* Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com>
* Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de>
* Copyright (c) 2004 Justin Thiessen <jthiessen@penguincomputing.com>
- * Copyright (C) 2007--2009 Jean Delvare <jdelvare@suse.de>
+ * Copyright (C) 2007--2014 Jean Delvare <jdelvare@suse.de>
*
* Chip details at <http://www.national.com/ds/LM/LM85.pdf>
*
@@ -39,7 +39,7 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
- any_chip, lm85b, lm85c,
+ lm85,
adm1027, adt7463, adt7468,
emc6d100, emc6d102, emc6d103, emc6d103s
};
@@ -75,9 +75,6 @@ enum chips {
#define LM85_COMPANY_NATIONAL 0x01
#define LM85_COMPANY_ANALOG_DEV 0x41
#define LM85_COMPANY_SMSC 0x5c
-#define LM85_VERSTEP_VMASK 0xf0
-#define LM85_VERSTEP_GENERIC 0x60
-#define LM85_VERSTEP_GENERIC2 0x70
#define LM85_VERSTEP_LM85C 0x60
#define LM85_VERSTEP_LM85B 0x62
#define LM85_VERSTEP_LM96000_1 0x68
@@ -351,9 +348,9 @@ static const struct i2c_device_id lm85_id[] = {
{ "adm1027", adm1027 },
{ "adt7463", adt7463 },
{ "adt7468", adt7468 },
- { "lm85", any_chip },
- { "lm85b", lm85b },
- { "lm85c", lm85c },
+ { "lm85", lm85 },
+ { "lm85b", lm85 },
+ { "lm85c", lm85 },
{ "emc6d100", emc6d100 },
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
@@ -1281,7 +1278,7 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int address = client->addr;
- const char *type_name;
+ const char *type_name = NULL;
int company, verstep;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -1297,16 +1294,6 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
"Detecting device at 0x%02x with COMPANY: 0x%02x and VERSTEP: 0x%02x\n",
address, company, verstep);
- /* All supported chips have the version in common */
- if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC &&
- (verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC2) {
- dev_dbg(&adapter->dev,
- "Autodetection failed: unsupported version\n");
- return -ENODEV;
- }
- type_name = "lm85";
-
- /* Now, refine the detection */
if (company == LM85_COMPANY_NATIONAL) {
switch (verstep) {
case LM85_VERSTEP_LM85C:
@@ -1323,6 +1310,7 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
"Found Winbond WPCD377I, ignoring\n");
return -ENODEV;
}
+ type_name = "lm85";
break;
}
} else if (company == LM85_COMPANY_ANALOG_DEV) {
@@ -1357,12 +1345,11 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
type_name = "emc6d103s";
break;
}
- } else {
- dev_dbg(&adapter->dev,
- "Autodetection failed: unknown vendor\n");
- return -ENODEV;
}
+ if (!type_name)
+ return -ENODEV;
+
strlcpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index af81be1237c9..c86a18402496 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -47,7 +47,7 @@
#define LTC4151_ADIN_L 0x05
struct ltc4151_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
bool valid;
@@ -59,8 +59,8 @@ struct ltc4151_data {
static struct ltc4151_data *ltc4151_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ltc4151_data *data = i2c_get_clientdata(client);
+ struct ltc4151_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct ltc4151_data *ret = data;
mutex_lock(&data->update_lock);
@@ -159,7 +159,7 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4151_show_value, NULL,
* Finally, construct an array of pointers to members of the above objects,
* as required for sysfs_create_group()
*/
-static struct attribute *ltc4151_attributes[] = {
+static struct attribute *ltc4151_attrs[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
@@ -167,54 +167,30 @@ static struct attribute *ltc4151_attributes[] = {
NULL,
};
-
-static const struct attribute_group ltc4151_group = {
- .attrs = ltc4151_attributes,
-};
+ATTRIBUTE_GROUPS(ltc4151);
static int ltc4151_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
struct ltc4151_data *data;
- int ret;
+ struct device *hwmon_dev;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
- /* Register sysfs hooks */
- ret = sysfs_create_group(&client->dev.kobj, &ltc4151_group);
- if (ret)
- return ret;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_hwmon_device_register;
- }
-
- return 0;
-
-out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
- return ret;
-}
-
-static int ltc4151_remove(struct i2c_client *client)
-{
- struct ltc4151_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ ltc4151_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ltc4151_id[] = {
@@ -229,7 +205,6 @@ static struct i2c_driver ltc4151_driver = {
.name = "ltc4151",
},
.probe = ltc4151_probe,
- .remove = ltc4151_remove,
.id_table = ltc4151_id,
};
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e76feb86a1d4..ae66f42c4d6d 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -163,6 +163,18 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
}
static const struct of_device_id ntc_match[] = {
+ { .compatible = "murata,ncp15wb473",
+ .data = &ntc_thermistor_id[0] },
+ { .compatible = "murata,ncp18wb473",
+ .data = &ntc_thermistor_id[1] },
+ { .compatible = "murata,ncp21wb473",
+ .data = &ntc_thermistor_id[2] },
+ { .compatible = "murata,ncp03wb473",
+ .data = &ntc_thermistor_id[3] },
+ { .compatible = "murata,ncp15wl333",
+ .data = &ntc_thermistor_id[4] },
+
+ /* Usage of vendor name "ntc" is deprecated */
{ .compatible = "ntc,ncp15wb473",
.data = &ntc_thermistor_id[0] },
{ .compatible = "ntc,ncp18wb473",
@@ -500,7 +512,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
- pdev->name);
+ pdev_id->name);
return 0;
err_after_sysfs:
@@ -534,7 +546,7 @@ static struct platform_driver ntc_thermistor_driver = {
module_platform_driver(ntc_thermistor_driver);
-MODULE_DESCRIPTION("NTC Thermistor Driver");
+MODULE_DESCRIPTION("NTC Thermistor Driver from Murata");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ntc-thermistor");
diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
new file mode 100644
index 000000000000..decd7df995ab
--- /dev/null
+++ b/drivers/hwmon/shtc1.c
@@ -0,0 +1,251 @@
+/* Sensirion SHTC1 humidity and temperature sensor driver
+ *
+ * Copyright (C) 2014 Sensirion AG, Switzerland
+ * Author: Johannes Winkelmann <johannes.winkelmann@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_data/shtc1.h>
+
+/* commands (high precision mode) */
+static const unsigned char shtc1_cmd_measure_blocking_hpm[] = { 0x7C, 0xA2 };
+static const unsigned char shtc1_cmd_measure_nonblocking_hpm[] = { 0x78, 0x66 };
+
+/* commands (low precision mode) */
+static const unsigned char shtc1_cmd_measure_blocking_lpm[] = { 0x64, 0x58 };
+static const unsigned char shtc1_cmd_measure_nonblocking_lpm[] = { 0x60, 0x9c };
+
+/* command for reading the ID register */
+static const unsigned char shtc1_cmd_read_id_reg[] = { 0xef, 0xc8 };
+
+/* constants for reading the ID register */
+#define SHTC1_ID 0x07
+#define SHTC1_ID_REG_MASK 0x1f
+
+/* delays for non-blocking i2c commands, both in us */
+#define SHTC1_NONBLOCKING_WAIT_TIME_HPM 14400
+#define SHTC1_NONBLOCKING_WAIT_TIME_LPM 1000
+
+#define SHTC1_CMD_LENGTH 2
+#define SHTC1_RESPONSE_LENGTH 6
+
+struct shtc1_data {
+ struct i2c_client *client;
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ const unsigned char *command;
+ unsigned int nonblocking_wait_time; /* in us */
+
+ struct shtc1_platform_data setup;
+
+ int temperature; /* 1000 * temperature in dgr C */
+ int humidity; /* 1000 * relative humidity in %RH */
+};
+
+static int shtc1_update_values(struct i2c_client *client,
+ struct shtc1_data *data,
+ char *buf, int bufsize)
+{
+ int ret = i2c_master_send(client, data->command, SHTC1_CMD_LENGTH);
+ if (ret != SHTC1_CMD_LENGTH) {
+ dev_err(&client->dev, "failed to send command: %d\n", ret);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ /*
+ * In blocking mode (clock stretching mode) the I2C bus
+ * is blocked for other traffic, thus the call to i2c_master_recv()
+ * will wait until the data is ready. For non blocking mode, we
+ * have to wait ourselves.
+ */
+ if (!data->setup.blocking_io)
+ usleep_range(data->nonblocking_wait_time,
+ data->nonblocking_wait_time + 1000);
+
+ ret = i2c_master_recv(client, buf, bufsize);
+ if (ret != bufsize) {
+ dev_err(&client->dev, "failed to read values: %d\n", ret);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return 0;
+}
+
+/* sysfs attributes */
+static struct shtc1_data *shtc1_update_client(struct device *dev)
+{
+ struct shtc1_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned char buf[SHTC1_RESPONSE_LENGTH];
+ int val;
+ int ret = 0;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ / 10) || !data->valid) {
+ ret = shtc1_update_values(client, data, buf, sizeof(buf));
+ if (ret)
+ goto out;
+
+ /*
+ * From datasheet:
+ * T = -45 + 175 * ST / 2^16
+ * RH = 100 * SRH / 2^16
+ *
+ * Adapted for integer fixed point (3 digit) arithmetic.
+ */
+ val = be16_to_cpup((__be16 *)buf);
+ data->temperature = ((21875 * val) >> 13) - 45000;
+ val = be16_to_cpup((__be16 *)(buf + 3));
+ data->humidity = ((12500 * val) >> 13);
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+out:
+ mutex_unlock(&data->update_lock);
+
+ return ret == 0 ? data : ERR_PTR(ret);
+}
+
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct shtc1_data *data = shtc1_update_client(dev);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temperature);
+}
+
+static ssize_t humidity1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct shtc1_data *data = shtc1_update_client(dev);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->humidity);
+}
+
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RO(humidity1_input);
+
+static struct attribute *shtc1_attrs[] = {
+ &dev_attr_temp1_input.attr,
+ &dev_attr_humidity1_input.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(shtc1);
+
+static void shtc1_select_command(struct shtc1_data *data)
+{
+ if (data->setup.high_precision) {
+ data->command = data->setup.blocking_io ?
+ shtc1_cmd_measure_blocking_hpm :
+ shtc1_cmd_measure_nonblocking_hpm;
+ data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_HPM;
+
+ } else {
+ data->command = data->setup.blocking_io ?
+ shtc1_cmd_measure_blocking_lpm :
+ shtc1_cmd_measure_nonblocking_lpm;
+ data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_LPM;
+ }
+}
+
+static int shtc1_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ char id_reg[2];
+ struct shtc1_data *data;
+ struct device *hwmon_dev;
+ struct i2c_adapter *adap = client->adapter;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(adap, I2C_FUNC_I2C)) {
+ dev_err(dev, "plain i2c transactions not supported\n");
+ return -ENODEV;
+ }
+
+ ret = i2c_master_send(client, shtc1_cmd_read_id_reg, SHTC1_CMD_LENGTH);
+ if (ret != SHTC1_CMD_LENGTH) {
+ dev_err(dev, "could not send read_id_reg command: %d\n", ret);
+ return ret < 0 ? ret : -ENODEV;
+ }
+ ret = i2c_master_recv(client, id_reg, sizeof(id_reg));
+ if (ret != sizeof(id_reg)) {
+ dev_err(dev, "could not read ID register: %d\n", ret);
+ return -ENODEV;
+ }
+ if ((id_reg[1] & SHTC1_ID_REG_MASK) != SHTC1_ID) {
+ dev_err(dev, "ID register doesn't match\n");
+ return -ENODEV;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->setup.blocking_io = false;
+ data->setup.high_precision = true;
+ data->client = client;
+
+ if (client->dev.platform_data)
+ data->setup = *(struct shtc1_platform_data *)dev->platform_data;
+ shtc1_select_command(data);
+ mutex_init(&data->update_lock);
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ client->name,
+ data,
+ shtc1_groups);
+ if (IS_ERR(hwmon_dev))
+ dev_dbg(dev, "unable to register hwmon device\n");
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+/* device ID table */
+static const struct i2c_device_id shtc1_id[] = {
+ { "shtc1", 0 },
+ { "shtw1", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, shtc1_id);
+
+static struct i2c_driver shtc1_i2c_driver = {
+ .driver.name = "shtc1",
+ .probe = shtc1_probe,
+ .id_table = shtc1_id,
+};
+
+module_i2c_driver(shtc1_i2c_driver);
+
+MODULE_AUTHOR("Johannes Winkelmann <johannes.winkelmann@sensirion.com>");
+MODULE_DESCRIPTION("Sensirion SHTC1 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
index 611f34c7333d..c53619086f33 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress.c
@@ -27,17 +27,8 @@
struct vexpress_hwmon_data {
struct device *hwmon_dev;
struct regmap *reg;
- const char *name;
};
-static ssize_t vexpress_hwmon_name_show(struct device *dev,
- struct device_attribute *dev_attr, char *buffer)
-{
- struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
-
- return sprintf(buffer, "%s\n", data->name);
-}
-
static ssize_t vexpress_hwmon_label_show(struct device *dev,
struct device_attribute *dev_attr, char *buffer)
{
@@ -95,16 +86,6 @@ static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static DEVICE_ATTR(name, S_IRUGO, vexpress_hwmon_name_show, NULL);
-
-#define VEXPRESS_HWMON_ATTRS(_name, _label_attr, _input_attr) \
-struct attribute *vexpress_hwmon_attrs_##_name[] = { \
- &dev_attr_name.attr, \
- &dev_attr_##_label_attr.attr, \
- &sensor_dev_attr_##_input_attr.dev_attr.attr, \
- NULL \
-}
-
struct vexpress_hwmon_type {
const char *name;
const struct attribute_group **attr_groups;
@@ -114,7 +95,11 @@ struct vexpress_hwmon_type {
static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show,
NULL, 1000);
-static VEXPRESS_HWMON_ATTRS(volt, in1_label, in1_input);
+static struct attribute *vexpress_hwmon_attrs_volt[] = {
+ &dev_attr_in1_label.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ NULL
+};
static struct attribute_group vexpress_hwmon_group_volt = {
.is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_volt,
@@ -131,7 +116,11 @@ static struct vexpress_hwmon_type vexpress_hwmon_volt = {
static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show,
NULL, 1000);
-static VEXPRESS_HWMON_ATTRS(amp, curr1_label, curr1_input);
+static struct attribute *vexpress_hwmon_attrs_amp[] = {
+ &dev_attr_curr1_label.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ NULL
+};
static struct attribute_group vexpress_hwmon_group_amp = {
.is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_amp,
@@ -147,7 +136,11 @@ static struct vexpress_hwmon_type vexpress_hwmon_amp = {
static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show,
NULL, 1000);
-static VEXPRESS_HWMON_ATTRS(temp, temp1_label, temp1_input);
+static struct attribute *vexpress_hwmon_attrs_temp[] = {
+ &dev_attr_temp1_label.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
static struct attribute_group vexpress_hwmon_group_temp = {
.is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_temp,
@@ -163,7 +156,11 @@ static struct vexpress_hwmon_type vexpress_hwmon_temp = {
static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show,
NULL, 1);
-static VEXPRESS_HWMON_ATTRS(power, power1_label, power1_input);
+static struct attribute *vexpress_hwmon_attrs_power[] = {
+ &dev_attr_power1_label.attr,
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+ NULL
+};
static struct attribute_group vexpress_hwmon_group_power = {
.is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_power,
@@ -179,7 +176,11 @@ static struct vexpress_hwmon_type vexpress_hwmon_power = {
static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show,
NULL, 1);
-static VEXPRESS_HWMON_ATTRS(energy, energy1_label, energy1_input);
+static struct attribute *vexpress_hwmon_attrs_energy[] = {
+ &dev_attr_energy1_label.attr,
+ &sensor_dev_attr_energy1_input.dev_attr.attr,
+ NULL
+};
static struct attribute_group vexpress_hwmon_group_energy = {
.is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_energy,
@@ -218,7 +219,6 @@ MODULE_DEVICE_TABLE(of, vexpress_hwmon_of_match);
static int vexpress_hwmon_probe(struct platform_device *pdev)
{
- int err;
const struct of_device_id *match;
struct vexpress_hwmon_data *data;
const struct vexpress_hwmon_type *type;
@@ -232,45 +232,19 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
if (!match)
return -ENODEV;
type = match->data;
- data->name = type->name;
data->reg = devm_regmap_init_vexpress_config(&pdev->dev);
if (IS_ERR(data->reg))
return PTR_ERR(data->reg);
- err = sysfs_create_groups(&pdev->dev.kobj, type->attr_groups);
- if (err)
- goto error;
-
- data->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error;
- }
-
- return 0;
-
-error:
- sysfs_remove_group(&pdev->dev.kobj, match->data);
- return err;
-}
-
-static int vexpress_hwmon_remove(struct platform_device *pdev)
-{
- struct vexpress_hwmon_data *data = platform_get_drvdata(pdev);
- const struct of_device_id *match;
-
- hwmon_device_unregister(data->hwmon_dev);
-
- match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
- sysfs_remove_group(&pdev->dev.kobj, match->data);
+ data->hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ type->name, data, type->attr_groups);
- return 0;
+ return PTR_ERR_OR_ZERO(data->hwmon_dev);
}
static struct platform_driver vexpress_hwmon_driver = {
.probe = vexpress_hwmon_probe,
- .remove = vexpress_hwmon_remove,
.driver = {
.name = DRVNAME,
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 6ed76ceb9270..32487c19cbfc 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -249,7 +249,7 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
int nr = to_sensor_dev_attr(attr)->index; \
struct w83l786ng_data *data = w83l786ng_update_device(dev); \
return sprintf(buf, "%d\n", \
- FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); \
+ FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
}
show_fan_reg(fan);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 620d1004a1e7..9f7d5859cf65 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -676,6 +676,16 @@ config I2C_RIIC
This driver can also be built as a module. If so, the module
will be called i2c-riic.
+config I2C_RK3X
+ tristate "Rockchip RK3xxx I2C adapter"
+ depends on OF
+ help
+ Say Y here to include support for the I2C adapter in Rockchip RK3xxx
+ SoCs.
+
+ This driver can also be built as a module. If so, the module will
+ be called i2c-rk3x.
+
config HAVE_S3C2410_I2C
bool
help
@@ -764,6 +774,19 @@ config I2C_STU300
This driver can also be built as a module. If so, the module
will be called i2c-stu300.
+config I2C_SUN6I_P2WI
+ tristate "Allwinner sun6i internal P2WI controller"
+ depends on RESET_CONTROLLER
+ depends on MACH_SUN6I || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ P2WI (Push/Pull 2 Wire Interface) controller embedded in some sunxi
+ SOCs.
+ The P2WI looks like an SMBus controller (which supports only byte
+ accesses), except that it only supports one slave device.
+ This interface is used to connect to specific PMIC devices (like the
+ AXP221).
+
config I2C_TEGRA
tristate "NVIDIA Tegra internal I2C controller"
depends on ARCH_TEGRA
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 298692cc6000..dd9a7f8e873f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
obj-$(CONFIG_I2C_QUP) += i2c-qup.o
obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
+obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o
obj-$(CONFIG_I2C_ST) += i2c-st.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
+obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o
obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_WMT) += i2c-wmt.o
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
new file mode 100644
index 000000000000..a9791509966a
--- /dev/null
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -0,0 +1,763 @@
+/*
+ * Driver for I2C adapter in Rockchip RK3xxx SoC
+ *
+ * Max Schwarz <max.schwarz@online.de>
+ * based on the patches by Rockchip Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/wait.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+
+/* Register Map */
+#define REG_CON 0x00 /* control register */
+#define REG_CLKDIV 0x04 /* clock divisor register */
+#define REG_MRXADDR 0x08 /* slave address for REGISTER_TX */
+#define REG_MRXRADDR 0x0c /* slave register address for REGISTER_TX */
+#define REG_MTXCNT 0x10 /* number of bytes to be transmitted */
+#define REG_MRXCNT 0x14 /* number of bytes to be received */
+#define REG_IEN 0x18 /* interrupt enable */
+#define REG_IPD 0x1c /* interrupt pending */
+#define REG_FCNT 0x20 /* finished count */
+
+/* Data buffer offsets */
+#define TXBUFFER_BASE 0x100
+#define RXBUFFER_BASE 0x200
+
+/* REG_CON bits */
+#define REG_CON_EN BIT(0)
+enum {
+ REG_CON_MOD_TX = 0, /* transmit data */
+ REG_CON_MOD_REGISTER_TX, /* select register and restart */
+ REG_CON_MOD_RX, /* receive data */
+ REG_CON_MOD_REGISTER_RX, /* broken: transmits read addr AND writes
+ * register addr */
+};
+#define REG_CON_MOD(mod) ((mod) << 1)
+#define REG_CON_MOD_MASK (BIT(1) | BIT(2))
+#define REG_CON_START BIT(3)
+#define REG_CON_STOP BIT(4)
+#define REG_CON_LASTACK BIT(5) /* 1: send NACK after last received byte */
+#define REG_CON_ACTACK BIT(6) /* 1: stop if NACK is received */
+
+/* REG_MRXADDR bits */
+#define REG_MRXADDR_VALID(x) BIT(24 + (x)) /* [x*8+7:x*8] of MRX[R]ADDR valid */
+
+/* REG_IEN/REG_IPD bits */
+#define REG_INT_BTF BIT(0) /* a byte was transmitted */
+#define REG_INT_BRF BIT(1) /* a byte was received */
+#define REG_INT_MBTF BIT(2) /* master data transmit finished */
+#define REG_INT_MBRF BIT(3) /* master data receive finished */
+#define REG_INT_START BIT(4) /* START condition generated */
+#define REG_INT_STOP BIT(5) /* STOP condition generated */
+#define REG_INT_NAKRCV BIT(6) /* NACK received */
+#define REG_INT_ALL 0x7f
+
+/* Constants */
+#define WAIT_TIMEOUT 200 /* ms */
+#define DEFAULT_SCL_RATE (100 * 1000) /* Hz */
+
+enum rk3x_i2c_state {
+ STATE_IDLE,
+ STATE_START,
+ STATE_READ,
+ STATE_WRITE,
+ STATE_STOP
+};
+
+/**
+ * @grf_offset: offset inside the grf regmap for setting the i2c type
+ */
+struct rk3x_i2c_soc_data {
+ int grf_offset;
+};
+
+struct rk3x_i2c {
+ struct i2c_adapter adap;
+ struct device *dev;
+ struct rk3x_i2c_soc_data *soc_data;
+
+ /* Hardware resources */
+ void __iomem *regs;
+ struct clk *clk;
+
+ /* Settings */
+ unsigned int scl_frequency;
+
+ /* Synchronization & notification */
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ bool busy;
+
+ /* Current message */
+ struct i2c_msg *msg;
+ u8 addr;
+ unsigned int mode;
+ bool is_last_msg;
+
+ /* I2C state machine */
+ enum rk3x_i2c_state state;
+ unsigned int processed; /* sent/received bytes */
+ int error;
+};
+
+static inline void i2c_writel(struct rk3x_i2c *i2c, u32 value,
+ unsigned int offset)
+{
+ writel(value, i2c->regs + offset);
+}
+
+static inline u32 i2c_readl(struct rk3x_i2c *i2c, unsigned int offset)
+{
+ return readl(i2c->regs + offset);
+}
+
+/* Reset all interrupt pending bits */
+static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
+{
+ i2c_writel(i2c, REG_INT_ALL, REG_IPD);
+}
+
+/**
+ * Generate a START condition, which triggers a REG_INT_START interrupt.
+ */
+static void rk3x_i2c_start(struct rk3x_i2c *i2c)
+{
+ u32 val;
+
+ rk3x_i2c_clean_ipd(i2c);
+ i2c_writel(i2c, REG_INT_START, REG_IEN);
+
+ /* enable adapter with correct mode, send START condition */
+ val = REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START;
+
+ /* if we want to react to NACK, set ACTACK bit */
+ if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
+ val |= REG_CON_ACTACK;
+
+ i2c_writel(i2c, val, REG_CON);
+}
+
+/**
+ * Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
+ *
+ * @error: Error code to return in rk3x_i2c_xfer
+ */
+static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
+{
+ unsigned int ctrl;
+
+ i2c->processed = 0;
+ i2c->msg = NULL;
+ i2c->error = error;
+
+ if (i2c->is_last_msg) {
+ /* Enable stop interrupt */
+ i2c_writel(i2c, REG_INT_STOP, REG_IEN);
+
+ i2c->state = STATE_STOP;
+
+ ctrl = i2c_readl(i2c, REG_CON);
+ ctrl |= REG_CON_STOP;
+ i2c_writel(i2c, ctrl, REG_CON);
+ } else {
+ /* Signal rk3x_i2c_xfer to start the next message. */
+ i2c->busy = false;
+ i2c->state = STATE_IDLE;
+
+ /*
+ * The HW is actually not capable of REPEATED START. But we can
+ * get the intended effect by resetting its internal state
+ * and issuing an ordinary START.
+ */
+ i2c_writel(i2c, 0, REG_CON);
+
+ /* signal that we are finished with the current msg */
+ wake_up(&i2c->wait);
+ }
+}
+
+/**
+ * Setup a read according to i2c->msg
+ */
+static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
+{
+ unsigned int len = i2c->msg->len - i2c->processed;
+ u32 con;
+
+ con = i2c_readl(i2c, REG_CON);
+
+ /*
+ * The hw can read up to 32 bytes at a time. If we need more than one
+ * chunk, send an ACK after the last byte of the current chunk.
+ */
+ if (unlikely(len > 32)) {
+ len = 32;
+ con &= ~REG_CON_LASTACK;
+ } else {
+ con |= REG_CON_LASTACK;
+ }
+
+ /* make sure we are in plain RX mode if we read a second chunk */
+ if (i2c->processed != 0) {
+ con &= ~REG_CON_MOD_MASK;
+ con |= REG_CON_MOD(REG_CON_MOD_RX);
+ }
+
+ i2c_writel(i2c, con, REG_CON);
+ i2c_writel(i2c, len, REG_MRXCNT);
+}
+
+/**
+ * Fill the transmit buffer with data from i2c->msg
+ */
+static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
+{
+ unsigned int i, j;
+ u32 cnt = 0;
+ u32 val;
+ u8 byte;
+
+ for (i = 0; i < 8; ++i) {
+ val = 0;
+ for (j = 0; j < 4; ++j) {
+ if (i2c->processed == i2c->msg->len)
+ break;
+
+ if (i2c->processed == 0 && cnt == 0)
+ byte = (i2c->addr & 0x7f) << 1;
+ else
+ byte = i2c->msg->buf[i2c->processed++];
+
+ val |= byte << (j * 8);
+ cnt++;
+ }
+
+ i2c_writel(i2c, val, TXBUFFER_BASE + 4 * i);
+
+ if (i2c->processed == i2c->msg->len)
+ break;
+ }
+
+ i2c_writel(i2c, cnt, REG_MTXCNT);
+}
+
+
+/* IRQ handlers for individual states */
+
+static void rk3x_i2c_handle_start(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+ if (!(ipd & REG_INT_START)) {
+ rk3x_i2c_stop(i2c, -EIO);
+ dev_warn(i2c->dev, "unexpected irq in START: 0x%x\n", ipd);
+ rk3x_i2c_clean_ipd(i2c);
+ return;
+ }
+
+ /* ack interrupt */
+ i2c_writel(i2c, REG_INT_START, REG_IPD);
+
+ /* disable start bit */
+ i2c_writel(i2c, i2c_readl(i2c, REG_CON) & ~REG_CON_START, REG_CON);
+
+ /* enable appropriate interrupts and transition */
+ if (i2c->mode == REG_CON_MOD_TX) {
+ i2c_writel(i2c, REG_INT_MBTF | REG_INT_NAKRCV, REG_IEN);
+ i2c->state = STATE_WRITE;
+ rk3x_i2c_fill_transmit_buf(i2c);
+ } else {
+ /* in any other case, we are going to be reading. */
+ i2c_writel(i2c, REG_INT_MBRF | REG_INT_NAKRCV, REG_IEN);
+ i2c->state = STATE_READ;
+ rk3x_i2c_prepare_read(i2c);
+ }
+}
+
+static void rk3x_i2c_handle_write(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+ if (!(ipd & REG_INT_MBTF)) {
+ rk3x_i2c_stop(i2c, -EIO);
+ dev_err(i2c->dev, "unexpected irq in WRITE: 0x%x\n", ipd);
+ rk3x_i2c_clean_ipd(i2c);
+ return;
+ }
+
+ /* ack interrupt */
+ i2c_writel(i2c, REG_INT_MBTF, REG_IPD);
+
+ /* are we finished? */
+ if (i2c->processed == i2c->msg->len)
+ rk3x_i2c_stop(i2c, i2c->error);
+ else
+ rk3x_i2c_fill_transmit_buf(i2c);
+}
+
+static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+ unsigned int i;
+ unsigned int len = i2c->msg->len - i2c->processed;
+ u32 uninitialized_var(val);
+ u8 byte;
+
+ /* we only care for MBRF here. */
+ if (!(ipd & REG_INT_MBRF))
+ return;
+
+ /* ack interrupt */
+ i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
+
+ /* read the data from receive buffer */
+ for (i = 0; i < len; ++i) {
+ if (i % 4 == 0)
+ val = i2c_readl(i2c, RXBUFFER_BASE + (i / 4) * 4);
+
+ byte = (val >> ((i % 4) * 8)) & 0xff;
+ i2c->msg->buf[i2c->processed++] = byte;
+ }
+
+ /* are we finished? */
+ if (i2c->processed == i2c->msg->len)
+ rk3x_i2c_stop(i2c, i2c->error);
+ else
+ rk3x_i2c_prepare_read(i2c);
+}
+
+static void rk3x_i2c_handle_stop(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+ unsigned int con;
+
+ if (!(ipd & REG_INT_STOP)) {
+ rk3x_i2c_stop(i2c, -EIO);
+ dev_err(i2c->dev, "unexpected irq in STOP: 0x%x\n", ipd);
+ rk3x_i2c_clean_ipd(i2c);
+ return;
+ }
+
+ /* ack interrupt */
+ i2c_writel(i2c, REG_INT_STOP, REG_IPD);
+
+ /* disable STOP bit */
+ con = i2c_readl(i2c, REG_CON);
+ con &= ~REG_CON_STOP;
+ i2c_writel(i2c, con, REG_CON);
+
+ i2c->busy = false;
+ i2c->state = STATE_IDLE;
+
+ /* signal rk3x_i2c_xfer that we are finished */
+ wake_up(&i2c->wait);
+}
+
+static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
+{
+ struct rk3x_i2c *i2c = dev_id;
+ unsigned int ipd;
+
+ spin_lock(&i2c->lock);
+
+ ipd = i2c_readl(i2c, REG_IPD);
+ if (i2c->state == STATE_IDLE) {
+ dev_warn(i2c->dev, "irq in STATE_IDLE, ipd = 0x%x\n", ipd);
+ rk3x_i2c_clean_ipd(i2c);
+ goto out;
+ }
+
+ dev_dbg(i2c->dev, "IRQ: state %d, ipd: %x\n", i2c->state, ipd);
+
+ /* Clean interrupt bits we don't care about */
+ ipd &= ~(REG_INT_BRF | REG_INT_BTF);
+
+ if (ipd & REG_INT_NAKRCV) {
+ /*
+ * We got a NACK in the last operation. Depending on whether
+ * IGNORE_NAK is set, we have to stop the operation and report
+ * an error.
+ */
+ i2c_writel(i2c, REG_INT_NAKRCV, REG_IPD);
+
+ ipd &= ~REG_INT_NAKRCV;
+
+ if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
+ rk3x_i2c_stop(i2c, -ENXIO);
+ }
+
+ /* is there anything left to handle? */
+ if (unlikely(ipd == 0))
+ goto out;
+
+ switch (i2c->state) {
+ case STATE_START:
+ rk3x_i2c_handle_start(i2c, ipd);
+ break;
+ case STATE_WRITE:
+ rk3x_i2c_handle_write(i2c, ipd);
+ break;
+ case STATE_READ:
+ rk3x_i2c_handle_read(i2c, ipd);
+ break;
+ case STATE_STOP:
+ rk3x_i2c_handle_stop(i2c, ipd);
+ break;
+ case STATE_IDLE:
+ break;
+ }
+
+out:
+ spin_unlock(&i2c->lock);
+ return IRQ_HANDLED;
+}
+
+static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate)
+{
+ unsigned long i2c_rate = clk_get_rate(i2c->clk);
+ unsigned int div;
+
+ /* SCL rate = (clk rate) / (8 * DIV) */
+ div = DIV_ROUND_UP(i2c_rate, scl_rate * 8);
+
+ /* The lower and upper half of the CLKDIV reg describe the length of
+ * SCL low & high periods. */
+ div = DIV_ROUND_UP(div, 2);
+
+ i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV);
+}
+
+/**
+ * Setup I2C registers for an I2C operation specified by msgs, num.
+ *
+ * Must be called with i2c->lock held.
+ *
+ * @msgs: I2C msgs to process
+ * @num: Number of msgs
+ *
+ * returns: Number of I2C msgs processed or negative in case of error
+ */
+static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
+{
+ u32 addr = (msgs[0].addr & 0x7f) << 1;
+ int ret = 0;
+
+ /*
+ * The I2C adapter can issue a small (len < 4) write packet before
+ * reading. This speeds up SMBus-style register reads.
+ * The MRXADDR/MRXRADDR hold the slave address and the slave register
+ * address in this case.
+ */
+
+ if (num >= 2 && msgs[0].len < 4 &&
+ !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) {
+ u32 reg_addr = 0;
+ int i;
+
+ dev_dbg(i2c->dev, "Combined write/read from addr 0x%x\n",
+ addr >> 1);
+
+ /* Fill MRXRADDR with the register address(es) */
+ for (i = 0; i < msgs[0].len; ++i) {
+ reg_addr |= msgs[0].buf[i] << (i * 8);
+ reg_addr |= REG_MRXADDR_VALID(i);
+ }
+
+ /* msgs[0] is handled by hw. */
+ i2c->msg = &msgs[1];
+
+ i2c->mode = REG_CON_MOD_REGISTER_TX;
+
+ i2c_writel(i2c, addr | REG_MRXADDR_VALID(0), REG_MRXADDR);
+ i2c_writel(i2c, reg_addr, REG_MRXRADDR);
+
+ ret = 2;
+ } else {
+ /*
+ * We'll have to do it the boring way and process the msgs
+ * one-by-one.
+ */
+
+ if (msgs[0].flags & I2C_M_RD) {
+ addr |= 1; /* set read bit */
+
+ /*
+ * We have to transmit the slave addr first. Use
+ * MOD_REGISTER_TX for that purpose.
+ */
+ i2c->mode = REG_CON_MOD_REGISTER_TX;
+ i2c_writel(i2c, addr | REG_MRXADDR_VALID(0),
+ REG_MRXADDR);
+ i2c_writel(i2c, 0, REG_MRXRADDR);
+ } else {
+ i2c->mode = REG_CON_MOD_TX;
+ }
+
+ i2c->msg = &msgs[0];
+
+ ret = 1;
+ }
+
+ i2c->addr = msgs[0].addr;
+ i2c->busy = true;
+ i2c->state = STATE_START;
+ i2c->processed = 0;
+ i2c->error = 0;
+
+ rk3x_i2c_clean_ipd(i2c);
+
+ return ret;
+}
+
+static int rk3x_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data;
+ unsigned long timeout, flags;
+ int ret = 0;
+ int i;
+
+ spin_lock_irqsave(&i2c->lock, flags);
+
+ clk_enable(i2c->clk);
+
+ /* The clock rate might have changed, so setup the divider again */
+ rk3x_i2c_set_scl_rate(i2c, i2c->scl_frequency);
+
+ i2c->is_last_msg = false;
+
+ /*
+ * Process msgs. We can handle more than one message at once (see
+ * rk3x_i2c_setup()).
+ */
+ for (i = 0; i < num; i += ret) {
+ ret = rk3x_i2c_setup(i2c, msgs + i, num - i);
+
+ if (ret < 0) {
+ dev_err(i2c->dev, "rk3x_i2c_setup() failed\n");
+ break;
+ }
+
+ if (i + ret >= num)
+ i2c->is_last_msg = true;
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ rk3x_i2c_start(i2c);
+
+ timeout = wait_event_timeout(i2c->wait, !i2c->busy,
+ msecs_to_jiffies(WAIT_TIMEOUT));
+
+ spin_lock_irqsave(&i2c->lock, flags);
+
+ if (timeout == 0) {
+ dev_err(i2c->dev, "timeout, ipd: 0x%02x, state: %d\n",
+ i2c_readl(i2c, REG_IPD), i2c->state);
+
+ /* Force a STOP condition without interrupt */
+ i2c_writel(i2c, 0, REG_IEN);
+ i2c_writel(i2c, REG_CON_EN | REG_CON_STOP, REG_CON);
+
+ i2c->state = STATE_IDLE;
+
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ if (i2c->error) {
+ ret = i2c->error;
+ break;
+ }
+ }
+
+ clk_disable(i2c->clk);
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ return ret;
+}
+
+static u32 rk3x_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+}
+
+static const struct i2c_algorithm rk3x_i2c_algorithm = {
+ .master_xfer = rk3x_i2c_xfer,
+ .functionality = rk3x_i2c_func,
+};
+
+static struct rk3x_i2c_soc_data soc_data[3] = {
+ { .grf_offset = 0x154 }, /* rk3066 */
+ { .grf_offset = 0x0a4 }, /* rk3188 */
+ { .grf_offset = -1 }, /* no I2C switching needed */
+};
+
+static const struct of_device_id rk3x_i2c_match[] = {
+ { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
+ { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+ { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
+ {},
+};
+
+static int rk3x_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct rk3x_i2c *i2c;
+ struct resource *mem;
+ int ret = 0;
+ int bus_nr;
+ u32 value;
+ int irq;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(struct rk3x_i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ match = of_match_node(rk3x_i2c_match, np);
+ i2c->soc_data = (struct rk3x_i2c_soc_data *)match->data;
+
+ if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &i2c->scl_frequency)) {
+ dev_info(&pdev->dev, "using default SCL frequency: %d\n",
+ DEFAULT_SCL_RATE);
+ i2c->scl_frequency = DEFAULT_SCL_RATE;
+ }
+
+ if (i2c->scl_frequency == 0 || i2c->scl_frequency > 400 * 1000) {
+ dev_warn(&pdev->dev, "invalid SCL frequency specified.\n");
+ dev_warn(&pdev->dev, "using default SCL frequency: %d\n",
+ DEFAULT_SCL_RATE);
+ i2c->scl_frequency = DEFAULT_SCL_RATE;
+ }
+
+ strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.algo = &rk3x_i2c_algorithm;
+ i2c->adap.retries = 3;
+ i2c->adap.dev.of_node = np;
+ i2c->adap.algo_data = i2c;
+ i2c->adap.dev.parent = &pdev->dev;
+
+ i2c->dev = &pdev->dev;
+
+ spin_lock_init(&i2c->lock);
+ init_waitqueue_head(&i2c->wait);
+
+ i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(i2c->clk);
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(i2c->regs))
+ return PTR_ERR(i2c->regs);
+
+ /* Try to set the I2C adapter number from dt */
+ bus_nr = of_alias_get_id(np, "i2c");
+
+ /*
+ * Switch to new interface if the SoC also offers the old one.
+ * The control bit is located in the GRF register space.
+ */
+ if (i2c->soc_data->grf_offset >= 0) {
+ struct regmap *grf;
+
+ grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(grf)) {
+ dev_err(&pdev->dev,
+ "rk3x-i2c needs 'rockchip,grf' property\n");
+ return PTR_ERR(grf);
+ }
+
+ if (bus_nr < 0) {
+ dev_err(&pdev->dev, "rk3x-i2c needs i2cX alias");
+ return -EINVAL;
+ }
+
+ /* 27+i: write mask, 11+i: value */
+ value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
+
+ ret = regmap_write(grf, i2c->soc_data->grf_offset, value);
+ if (ret != 0) {
+ dev_err(i2c->dev, "Could not write to GRF: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* IRQ setup */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "cannot find rk3x IRQ\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, rk3x_i2c_irq,
+ 0, dev_name(&pdev->dev), i2c);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot request IRQ\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, i2c);
+
+ ret = clk_prepare(i2c->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not prepare clock\n");
+ return ret;
+ }
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register adapter\n");
+ goto err_clk;
+ }
+
+ dev_info(&pdev->dev, "Initialized RK3xxx I2C bus at %p\n", i2c->regs);
+
+ return 0;
+
+err_clk:
+ clk_unprepare(i2c->clk);
+ return ret;
+}
+
+static int rk3x_i2c_remove(struct platform_device *pdev)
+{
+ struct rk3x_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adap);
+ clk_unprepare(i2c->clk);
+
+ return 0;
+}
+
+static struct platform_driver rk3x_i2c_driver = {
+ .probe = rk3x_i2c_probe,
+ .remove = rk3x_i2c_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rk3x-i2c",
+ .of_match_table = rk3x_i2c_match,
+ },
+};
+
+module_platform_driver(rk3x_i2c_driver);
+
+MODULE_DESCRIPTION("Rockchip RK3xxx I2C Bus driver");
+MODULE_AUTHOR("Max Schwarz <max.schwarz@online.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
new file mode 100644
index 000000000000..4d75d4759709
--- /dev/null
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -0,0 +1,344 @@
+/*
+ * P2WI (Push-Pull Two Wire Interface) bus driver.
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The P2WI controller looks like an SMBus controller which only supports byte
+ * data transfers. But, it differs from standard SMBus protocol on several
+ * aspects:
+ * - it supports only one slave device, and thus drop the address field
+ * - it adds a parity bit every 8bits of data
+ * - only one read access is required to read a byte (instead of a write
+ * followed by a read access in standard SMBus protocol)
+ * - there's no Ack bit after each byte transfer
+ *
+ * This means this bus cannot be used to interface with standard SMBus
+ * devices (the only known device to support this interface is the AXP221
+ * PMIC).
+ *
+ */
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+
+/* P2WI registers */
+#define P2WI_CTRL 0x0
+#define P2WI_CCR 0x4
+#define P2WI_INTE 0x8
+#define P2WI_INTS 0xc
+#define P2WI_DADDR0 0x10
+#define P2WI_DADDR1 0x14
+#define P2WI_DLEN 0x18
+#define P2WI_DATA0 0x1c
+#define P2WI_DATA1 0x20
+#define P2WI_LCR 0x24
+#define P2WI_PMCR 0x28
+
+/* CTRL fields */
+#define P2WI_CTRL_START_TRANS BIT(7)
+#define P2WI_CTRL_ABORT_TRANS BIT(6)
+#define P2WI_CTRL_GLOBAL_INT_ENB BIT(1)
+#define P2WI_CTRL_SOFT_RST BIT(0)
+
+/* CLK CTRL fields */
+#define P2WI_CCR_SDA_OUT_DELAY(v) (((v) & 0x7) << 8)
+#define P2WI_CCR_MAX_CLK_DIV 0xff
+#define P2WI_CCR_CLK_DIV(v) ((v) & P2WI_CCR_MAX_CLK_DIV)
+
+/* STATUS fields */
+#define P2WI_INTS_TRANS_ERR_ID(v) (((v) >> 8) & 0xff)
+#define P2WI_INTS_LOAD_BSY BIT(2)
+#define P2WI_INTS_TRANS_ERR BIT(1)
+#define P2WI_INTS_TRANS_OVER BIT(0)
+
+/* DATA LENGTH fields*/
+#define P2WI_DLEN_READ BIT(4)
+#define P2WI_DLEN_DATA_LENGTH(v) ((v - 1) & 0x7)
+
+/* LINE CTRL fields*/
+#define P2WI_LCR_SCL_STATE BIT(5)
+#define P2WI_LCR_SDA_STATE BIT(4)
+#define P2WI_LCR_SCL_CTL BIT(3)
+#define P2WI_LCR_SCL_CTL_EN BIT(2)
+#define P2WI_LCR_SDA_CTL BIT(1)
+#define P2WI_LCR_SDA_CTL_EN BIT(0)
+
+/* PMU MODE CTRL fields */
+#define P2WI_PMCR_PMU_INIT_SEND BIT(31)
+#define P2WI_PMCR_PMU_INIT_DATA(v) (((v) & 0xff) << 16)
+#define P2WI_PMCR_PMU_MODE_REG(v) (((v) & 0xff) << 8)
+#define P2WI_PMCR_PMU_DEV_ADDR(v) ((v) & 0xff)
+
+#define P2WI_MAX_FREQ 6000000
+
+struct p2wi {
+ struct i2c_adapter adapter;
+ struct completion complete;
+ unsigned int status;
+ void __iomem *regs;
+ struct clk *clk;
+ struct reset_control *rstc;
+ int slave_addr;
+};
+
+static irqreturn_t p2wi_interrupt(int irq, void *dev_id)
+{
+ struct p2wi *p2wi = dev_id;
+ unsigned long status;
+
+ status = readl(p2wi->regs + P2WI_INTS);
+ p2wi->status = status;
+
+ /* Clear interrupts */
+ status &= (P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR |
+ P2WI_INTS_TRANS_OVER);
+ writel(status, p2wi->regs + P2WI_INTS);
+
+ complete(&p2wi->complete);
+
+ return IRQ_HANDLED;
+}
+
+static u32 p2wi_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static int p2wi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data)
+{
+ struct p2wi *p2wi = i2c_get_adapdata(adap);
+ unsigned long dlen = P2WI_DLEN_DATA_LENGTH(1);
+
+ if (p2wi->slave_addr >= 0 && addr != p2wi->slave_addr) {
+ dev_err(&adap->dev, "invalid P2WI address\n");
+ return -EINVAL;
+ }
+
+ if (!data)
+ return -EINVAL;
+
+ writel(command, p2wi->regs + P2WI_DADDR0);
+
+ if (read_write == I2C_SMBUS_READ)
+ dlen |= P2WI_DLEN_READ;
+ else
+ writel(data->byte, p2wi->regs + P2WI_DATA0);
+
+ writel(dlen, p2wi->regs + P2WI_DLEN);
+
+ if (readl(p2wi->regs + P2WI_CTRL) & P2WI_CTRL_START_TRANS) {
+ dev_err(&adap->dev, "P2WI bus busy\n");
+ return -EBUSY;
+ }
+
+ reinit_completion(&p2wi->complete);
+
+ writel(P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR | P2WI_INTS_TRANS_OVER,
+ p2wi->regs + P2WI_INTE);
+
+ writel(P2WI_CTRL_START_TRANS | P2WI_CTRL_GLOBAL_INT_ENB,
+ p2wi->regs + P2WI_CTRL);
+
+ wait_for_completion(&p2wi->complete);
+
+ if (p2wi->status & P2WI_INTS_LOAD_BSY) {
+ dev_err(&adap->dev, "P2WI bus busy\n");
+ return -EBUSY;
+ }
+
+ if (p2wi->status & P2WI_INTS_TRANS_ERR) {
+ dev_err(&adap->dev, "P2WI bus xfer error\n");
+ return -ENXIO;
+ }
+
+ if (read_write == I2C_SMBUS_READ)
+ data->byte = readl(p2wi->regs + P2WI_DATA0);
+
+ return 0;
+}
+
+static const struct i2c_algorithm p2wi_algo = {
+ .smbus_xfer = p2wi_smbus_xfer,
+ .functionality = p2wi_functionality,
+};
+
+static const struct of_device_id p2wi_of_match_table[] = {
+ { .compatible = "allwinner,sun6i-a31-p2wi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, p2wi_of_match_table);
+
+static int p2wi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *childnp;
+ unsigned long parent_clk_freq;
+ u32 clk_freq = 100000;
+ struct resource *r;
+ struct p2wi *p2wi;
+ u32 slave_addr;
+ int clk_div;
+ int irq;
+ int ret;
+
+ of_property_read_u32(np, "clock-frequency", &clk_freq);
+ if (clk_freq > P2WI_MAX_FREQ) {
+ dev_err(dev,
+ "required clock-frequency (%u Hz) is too high (max = 6MHz)",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (of_get_child_count(np) > 1) {
+ dev_err(dev, "P2WI only supports one slave device\n");
+ return -EINVAL;
+ }
+
+ p2wi = devm_kzalloc(dev, sizeof(struct p2wi), GFP_KERNEL);
+ if (!p2wi)
+ return -ENOMEM;
+
+ p2wi->slave_addr = -1;
+
+ /*
+ * Authorize a p2wi node without any children to be able to use an
+ * i2c-dev from userpace.
+ * In this case the slave_addr is set to -1 and won't be checked when
+ * launching a P2WI transfer.
+ */
+ childnp = of_get_next_available_child(np, NULL);
+ if (childnp) {
+ ret = of_property_read_u32(childnp, "reg", &slave_addr);
+ if (ret) {
+ dev_err(dev, "invalid slave address on node %s\n",
+ childnp->full_name);
+ return -EINVAL;
+ }
+
+ p2wi->slave_addr = slave_addr;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p2wi->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(p2wi->regs))
+ return PTR_ERR(p2wi->regs);
+
+ strlcpy(p2wi->adapter.name, pdev->name, sizeof(p2wi->adapter.name));
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to retrieve irq: %d\n", irq);
+ return irq;
+ }
+
+ p2wi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(p2wi->clk)) {
+ ret = PTR_ERR(p2wi->clk);
+ dev_err(dev, "failed to retrieve clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(p2wi->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk: %d\n", ret);
+ return ret;
+ }
+
+ parent_clk_freq = clk_get_rate(p2wi->clk);
+
+ p2wi->rstc = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(p2wi->rstc)) {
+ ret = PTR_ERR(p2wi->rstc);
+ dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ ret = reset_control_deassert(p2wi->rstc);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset line: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ init_completion(&p2wi->complete);
+ p2wi->adapter.dev.parent = dev;
+ p2wi->adapter.algo = &p2wi_algo;
+ p2wi->adapter.owner = THIS_MODULE;
+ p2wi->adapter.dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, p2wi);
+ i2c_set_adapdata(&p2wi->adapter, p2wi);
+
+ ret = devm_request_irq(dev, irq, p2wi_interrupt, 0, pdev->name, p2wi);
+ if (ret) {
+ dev_err(dev, "can't register interrupt handler irq%d: %d\n",
+ irq, ret);
+ goto err_reset_assert;
+ }
+
+ writel(P2WI_CTRL_SOFT_RST, p2wi->regs + P2WI_CTRL);
+
+ clk_div = parent_clk_freq / clk_freq;
+ if (!clk_div) {
+ dev_warn(dev,
+ "clock-frequency is too high, setting it to %lu Hz\n",
+ parent_clk_freq);
+ clk_div = 1;
+ } else if (clk_div > P2WI_CCR_MAX_CLK_DIV) {
+ dev_warn(dev,
+ "clock-frequency is too low, setting it to %lu Hz\n",
+ parent_clk_freq / P2WI_CCR_MAX_CLK_DIV);
+ clk_div = P2WI_CCR_MAX_CLK_DIV;
+ }
+
+ writel(P2WI_CCR_SDA_OUT_DELAY(1) | P2WI_CCR_CLK_DIV(clk_div),
+ p2wi->regs + P2WI_CCR);
+
+ ret = i2c_add_adapter(&p2wi->adapter);
+ if (!ret)
+ return 0;
+
+err_reset_assert:
+ reset_control_assert(p2wi->rstc);
+
+err_clk_disable:
+ clk_disable_unprepare(p2wi->clk);
+
+ return ret;
+}
+
+static int p2wi_remove(struct platform_device *dev)
+{
+ struct p2wi *p2wi = platform_get_drvdata(dev);
+
+ reset_control_assert(p2wi->rstc);
+ clk_disable_unprepare(p2wi->clk);
+ i2c_del_adapter(&p2wi->adapter);
+
+ return 0;
+}
+
+static struct platform_driver p2wi_driver = {
+ .probe = p2wi_probe,
+ .remove = p2wi_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "i2c-sunxi-p2wi",
+ .of_match_table = p2wi_of_match_table,
+ },
+};
+module_platform_driver(p2wi_driver);
+
+MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner P2WI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index f7f9865b8b89..f6d313e528de 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -40,6 +40,7 @@ config I2C_MUX_PCA9541
config I2C_MUX_PCA954x
tristate "Philips PCA954x I2C Mux/switches"
+ depends on GPIOLIB
help
If you say yes here you get support for the Philips PCA954x
I2C mux/switch devices.
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 69abf9163df7..54e464e4bb72 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -110,7 +110,6 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
struct accel_3d_state *accel_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -151,14 +150,12 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&accel_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&accel_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 39b4cb48d738..6eba301ee03d 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -427,9 +427,12 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev,
int ret;
struct ad799x_state *st = iio_priv(indio_dev);
+ if (val < 0 || val > RES_MASK(chan->scan_type.realbits))
+ return -EINVAL;
+
mutex_lock(&indio_dev->mlock);
ret = ad799x_i2c_write16(st, ad799x_threshold_reg(chan, dir, info),
- val);
+ val << chan->scan_type.shift);
mutex_unlock(&indio_dev->mlock);
return ret;
@@ -452,7 +455,8 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
- *val = valin;
+ *val = (valin >> chan->scan_type.shift) &
+ RES_MASK(chan->scan_type.realbits);
return IIO_VAL_INT;
}
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 3b5bacd4d8da..2b6a9ce9927c 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -510,12 +510,11 @@ static int at91_adc_channel_init(struct iio_dev *idev)
return idev->num_channels;
}
-static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
+static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
struct at91_adc_trigger *triggers,
const char *trigger_name)
{
struct at91_adc_state *st = iio_priv(idev);
- u8 value = 0;
int i;
for (i = 0; i < st->trigger_number; i++) {
@@ -528,15 +527,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
return -ENOMEM;
if (strcmp(trigger_name, name) == 0) {
- value = triggers[i].value;
kfree(name);
- break;
+ if (triggers[i].value == 0)
+ return -EINVAL;
+ return triggers[i].value;
}
kfree(name);
}
- return value;
+ return -EINVAL;
}
static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
@@ -546,14 +546,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
struct iio_buffer *buffer = idev->buffer;
struct at91_adc_reg_desc *reg = st->registers;
u32 status = at91_adc_readl(st, reg->trigger_register);
- u8 value;
+ int value;
u8 bit;
value = at91_adc_get_trigger_value_by_name(idev,
st->trigger_list,
idev->trig->name);
- if (value == 0)
- return -EINVAL;
+ if (value < 0)
+ return value;
if (state) {
st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 6989c16aec2b..b58d6302521f 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -121,8 +121,8 @@ static int men_z188_probe(struct mcb_device *dev,
indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
mem = mcb_request_mem(dev, "z188-adc");
- if (!mem)
- return -ENOMEM;
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
adc->base = ioremap(mem->start, resource_size(mem));
if (adc->base == NULL)
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index a4db3026bec6..d5dc4c6ce86c 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -374,7 +374,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
return -EAGAIN;
}
}
- map_val = chan->channel + TOTAL_CHANNELS;
+ map_val = adc_dev->channel_step[chan->scan_index];
/*
* We check the complete FIFO. We programmed just one entry but in case
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 7de1c4c87942..eb86786e698e 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -645,6 +645,7 @@ int twl4030_get_madc_conversion(int channel_no)
req.channels = (1 << channel_no);
req.method = TWL4030_MADC_SW2;
req.active = 0;
+ req.raw = 0;
req.func_cb = NULL;
ret = twl4030_madc_conversion(&req);
if (ret < 0)
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 73282cee0c81..a3109a6f4d86 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -75,6 +75,9 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
(s32)report_val);
}
+ sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
+ st->power_state.index,
+ &state_val);
return 0;
}
EXPORT_SYMBOL(hid_sensor_power_state);
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 40f4e4935d0d..fa034a3dad78 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -110,7 +110,6 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -151,14 +150,12 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&gyro_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&gyro_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index d833d55052ea..c7497009d60a 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
else if (name && index >= 0) {
pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
np->full_name, name ? name : "", index);
- return chan;
+ return NULL;
}
/*
@@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
*/
np = np->parent;
if (np && !of_get_property(np, "io-channel-ranges", NULL))
- break;
+ return NULL;
}
+
return chan;
}
@@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev,
if (channel != NULL)
return channel;
}
+
return iio_channel_get_sys(name, channel_name);
}
EXPORT_SYMBOL_GPL(iio_channel_get);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index f34c94380b41..96e71e103ea7 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -79,7 +79,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
struct als_state *als_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -129,14 +128,12 @@ static int als_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&als_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&als_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index d203ef4d892f..412bae86d6ae 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -74,7 +74,6 @@ static int prox_read_raw(struct iio_dev *indio_dev,
struct prox_state *prox_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -125,14 +124,12 @@ static int prox_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&prox_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&prox_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index fe063a0a21cd..752569985d1d 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -52,6 +52,7 @@
struct tcs3472_data {
struct i2c_client *client;
+ struct mutex lock;
u8 enable;
u8 control;
u8 atime;
@@ -116,10 +117,17 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ mutex_lock(&data->lock);
ret = tcs3472_req_data(data);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
return ret;
+ }
ret = i2c_smbus_read_word_data(data->client, chan->address);
+ mutex_unlock(&data->lock);
if (ret < 0)
return ret;
*val = ret;
@@ -255,6 +263,7 @@ static int tcs3472_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &tcs3472_info;
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 09ea5c481f4c..ea08313af0d2 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -373,8 +373,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
{
struct ak8975_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
- u16 meas_reg;
- s16 raw;
int ret;
mutex_lock(&data->lock);
@@ -422,16 +420,11 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
dev_err(&client->dev, "Read axis data fails\n");
goto exit;
}
- meas_reg = ret;
mutex_unlock(&data->lock);
- /* Endian conversion of the measured values. */
- raw = (s16) (le16_to_cpu(meas_reg));
-
/* Clamp to valid range. */
- raw = clamp_t(s16, raw, -4096, 4095);
- *val = raw;
+ *val = clamp_t(s16, ret, -4096, 4095);
return IIO_VAL_INT;
exit:
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 41cf29e2a371..b2b0937d5133 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -110,7 +110,6 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
struct magn_3d_state *magn_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -153,14 +152,12 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&magn_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&magn_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 1cd190c73788..2c0d2a4fed8c 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -78,7 +78,6 @@ static int press_read_raw(struct iio_dev *indio_dev,
struct press_state *press_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
- int ret;
int ret_type;
s32 poll_value;
@@ -128,14 +127,12 @@ static int press_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hid_sensor_read_samp_freq_value(
+ ret_type = hid_sensor_read_samp_freq_value(
&press_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_read_raw_hyst_value(
+ ret_type = hid_sensor_read_raw_hyst_value(
&press_state->common_attributes, val, val2);
- ret_type = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret_type = -EINVAL;
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index ba6d0c520e63..01b2e0b18878 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -98,7 +98,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
if (ret < 0)
return ret;
- *val = sign_extend32(be32_to_cpu(tmp) >> 12, 23);
+ *val = be32_to_cpu(tmp) >> 12;
return IIO_VAL_INT;
case IIO_TEMP: /* in 0.0625 celsius / LSB */
mutex_lock(&data->lock);
@@ -112,7 +112,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
if (ret < 0)
return ret;
- *val = sign_extend32(be32_to_cpu(tmp) >> 20, 15);
+ *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -185,7 +185,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 0,
.scan_type = {
- .sign = 's',
+ .sign = 'u',
.realbits = 20,
.storagebits = 32,
.shift = 12,
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index bf508b5550c4..dc21836b5a8d 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,18 +1,3 @@
obj-$(CONFIG_INFINIBAND) += core/
-obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
-obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
-obj-$(CONFIG_INFINIBAND_QIB) += hw/qib/
-obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
-obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
-obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
-obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
-obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
-obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/
-obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
-obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/
-obj-$(CONFIG_INFINIBAND_USNIC) += hw/usnic/
-obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
-obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
-obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
-obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
-obj-$(CONFIG_INFINIBAND_ISERT) += ulp/isert/
+obj-$(CONFIG_INFINIBAND) += hw/
+obj-$(CONFIG_INFINIBAND) += ulp/
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 3ab3865544bb..ffd0af6734af 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -18,7 +18,7 @@ ib_sa-y := sa_query.o multicast.o
ib_cm-y := cm.o
-iw_cm-y := iwcm.o
+iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o
rdma_cm-y := cma.o
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 42c3058e6e9c..d570030d899c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3607,7 +3607,8 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
sizeof *id_stats, RDMA_NL_RDMA_CM,
- RDMA_NL_RDMA_CM_ID_STATS);
+ RDMA_NL_RDMA_CM_ID_STATS,
+ NLM_F_MULTI);
if (!id_stats)
goto out;
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
new file mode 100644
index 000000000000..b85ddbc979e0
--- /dev/null
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -0,0 +1,685 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ * Copyright (c) 2014 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "iwpm_util.h"
+
+static const char iwpm_ulib_name[] = "iWarpPortMapperUser";
+static int iwpm_ulib_version = 3;
+static int iwpm_user_pid = IWPM_PID_UNDEFINED;
+static atomic_t echo_nlmsg_seq;
+
+int iwpm_valid_pid(void)
+{
+ return iwpm_user_pid > 0;
+}
+EXPORT_SYMBOL(iwpm_valid_pid);
+
+/*
+ * iwpm_register_pid - Send a netlink query to user space
+ * for the iwarp port mapper pid
+ *
+ * nlmsg attributes:
+ * [IWPM_NLA_REG_PID_SEQ]
+ * [IWPM_NLA_REG_IF_NAME]
+ * [IWPM_NLA_REG_IBDEV_NAME]
+ * [IWPM_NLA_REG_ULIB_NAME]
+ */
+int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
+{
+ struct sk_buff *skb = NULL;
+ struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ struct nlmsghdr *nlh;
+ u32 msg_seq;
+ const char *err_str = "";
+ int ret = -EINVAL;
+
+ if (!iwpm_valid_client(nl_client)) {
+ err_str = "Invalid port mapper client";
+ goto pid_query_error;
+ }
+ if (iwpm_registered_client(nl_client))
+ return 0;
+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
+ if (!skb) {
+ err_str = "Unable to create a nlmsg";
+ goto pid_query_error;
+ }
+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();
+ nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, nl_client, GFP_KERNEL);
+ if (!nlmsg_request) {
+ err_str = "Unable to allocate netlink request";
+ goto pid_query_error;
+ }
+ msg_seq = atomic_read(&echo_nlmsg_seq);
+
+ /* fill in the pid request message */
+ err_str = "Unable to put attribute of the nlmsg";
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ);
+ if (ret)
+ goto pid_query_error;
+ ret = ibnl_put_attr(skb, nlh, IWPM_IFNAME_SIZE,
+ pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
+ if (ret)
+ goto pid_query_error;
+ ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE,
+ pm_msg->dev_name, IWPM_NLA_REG_IBDEV_NAME);
+ if (ret)
+ goto pid_query_error;
+ ret = ibnl_put_attr(skb, nlh, IWPM_ULIBNAME_SIZE,
+ (char *)iwpm_ulib_name, IWPM_NLA_REG_ULIB_NAME);
+ if (ret)
+ goto pid_query_error;
+
+ pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
+ __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
+
+ ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
+ if (ret) {
+ skb = NULL; /* skb is freed in the netlink send-op handling */
+ iwpm_set_registered(nl_client, 1);
+ iwpm_user_pid = IWPM_PID_UNAVAILABLE;
+ err_str = "Unable to send a nlmsg";
+ goto pid_query_error;
+ }
+ nlmsg_request->req_buffer = pm_msg;
+ ret = iwpm_wait_complete_req(nlmsg_request);
+ return ret;
+pid_query_error:
+ pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+ if (skb)
+ dev_kfree_skb(skb);
+ if (nlmsg_request)
+ iwpm_free_nlmsg_request(&nlmsg_request->kref);
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_register_pid);
+
+/*
+ * iwpm_add_mapping - Send a netlink add mapping message
+ * to the port mapper
+ * nlmsg attributes:
+ * [IWPM_NLA_MANAGE_MAPPING_SEQ]
+ * [IWPM_NLA_MANAGE_ADDR]
+ */
+int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
+{
+ struct sk_buff *skb = NULL;
+ struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ struct nlmsghdr *nlh;
+ u32 msg_seq;
+ const char *err_str = "";
+ int ret = -EINVAL;
+
+ if (!iwpm_valid_client(nl_client)) {
+ err_str = "Invalid port mapper client";
+ goto add_mapping_error;
+ }
+ if (!iwpm_registered_client(nl_client)) {
+ err_str = "Unregistered port mapper client";
+ goto add_mapping_error;
+ }
+ if (!iwpm_valid_pid())
+ return 0;
+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
+ if (!skb) {
+ err_str = "Unable to create a nlmsg";
+ goto add_mapping_error;
+ }
+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();
+ nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, nl_client, GFP_KERNEL);
+ if (!nlmsg_request) {
+ err_str = "Unable to allocate netlink request";
+ goto add_mapping_error;
+ }
+ msg_seq = atomic_read(&echo_nlmsg_seq);
+ /* fill in the add mapping message */
+ err_str = "Unable to put attribute of the nlmsg";
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq,
+ IWPM_NLA_MANAGE_MAPPING_SEQ);
+ if (ret)
+ goto add_mapping_error;
+ ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),
+ &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
+ if (ret)
+ goto add_mapping_error;
+ nlmsg_request->req_buffer = pm_msg;
+
+ ret = ibnl_unicast(skb, nlh, iwpm_user_pid);
+ if (ret) {
+ skb = NULL; /* skb is freed in the netlink send-op handling */
+ iwpm_user_pid = IWPM_PID_UNDEFINED;
+ err_str = "Unable to send a nlmsg";
+ goto add_mapping_error;
+ }
+ ret = iwpm_wait_complete_req(nlmsg_request);
+ return ret;
+add_mapping_error:
+ pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+ if (skb)
+ dev_kfree_skb(skb);
+ if (nlmsg_request)
+ iwpm_free_nlmsg_request(&nlmsg_request->kref);
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_add_mapping);
+
+/*
+ * iwpm_add_and_query_mapping - Send a netlink add and query
+ * mapping message to the port mapper
+ * nlmsg attributes:
+ * [IWPM_NLA_QUERY_MAPPING_SEQ]
+ * [IWPM_NLA_QUERY_LOCAL_ADDR]
+ * [IWPM_NLA_QUERY_REMOTE_ADDR]
+ */
+int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
+{
+ struct sk_buff *skb = NULL;
+ struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ struct nlmsghdr *nlh;
+ u32 msg_seq;
+ const char *err_str = "";
+ int ret = -EINVAL;
+
+ if (!iwpm_valid_client(nl_client)) {
+ err_str = "Invalid port mapper client";
+ goto query_mapping_error;
+ }
+ if (!iwpm_registered_client(nl_client)) {
+ err_str = "Unregistered port mapper client";
+ goto query_mapping_error;
+ }
+ if (!iwpm_valid_pid())
+ return 0;
+ ret = -ENOMEM;
+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
+ if (!skb) {
+ err_str = "Unable to create a nlmsg";
+ goto query_mapping_error;
+ }
+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();
+ nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq,
+ nl_client, GFP_KERNEL);
+ if (!nlmsg_request) {
+ err_str = "Unable to allocate netlink request";
+ goto query_mapping_error;
+ }
+ msg_seq = atomic_read(&echo_nlmsg_seq);
+
+ /* fill in the query message */
+ err_str = "Unable to put attribute of the nlmsg";
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq,
+ IWPM_NLA_QUERY_MAPPING_SEQ);
+ if (ret)
+ goto query_mapping_error;
+ ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),
+ &pm_msg->loc_addr, IWPM_NLA_QUERY_LOCAL_ADDR);
+ if (ret)
+ goto query_mapping_error;
+ ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),
+ &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
+ if (ret)
+ goto query_mapping_error;
+ nlmsg_request->req_buffer = pm_msg;
+
+ ret = ibnl_unicast(skb, nlh, iwpm_user_pid);
+ if (ret) {
+ skb = NULL; /* skb is freed in the netlink send-op handling */
+ err_str = "Unable to send a nlmsg";
+ goto query_mapping_error;
+ }
+ ret = iwpm_wait_complete_req(nlmsg_request);
+ return ret;
+query_mapping_error:
+ pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+ if (skb)
+ dev_kfree_skb(skb);
+ if (nlmsg_request)
+ iwpm_free_nlmsg_request(&nlmsg_request->kref);
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_add_and_query_mapping);
+
+/*
+ * iwpm_remove_mapping - Send a netlink remove mapping message
+ * to the port mapper
+ * nlmsg attributes:
+ * [IWPM_NLA_MANAGE_MAPPING_SEQ]
+ * [IWPM_NLA_MANAGE_ADDR]
+ */
+int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ u32 msg_seq;
+ const char *err_str = "";
+ int ret = -EINVAL;
+
+ if (!iwpm_valid_client(nl_client)) {
+ err_str = "Invalid port mapper client";
+ goto remove_mapping_error;
+ }
+ if (!iwpm_registered_client(nl_client)) {
+ err_str = "Unregistered port mapper client";
+ goto remove_mapping_error;
+ }
+ if (!iwpm_valid_pid())
+ return 0;
+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
+ if (!skb) {
+ ret = -ENOMEM;
+ err_str = "Unable to create a nlmsg";
+ goto remove_mapping_error;
+ }
+ msg_seq = atomic_read(&echo_nlmsg_seq);
+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();
+ err_str = "Unable to put attribute of the nlmsg";
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq,
+ IWPM_NLA_MANAGE_MAPPING_SEQ);
+ if (ret)
+ goto remove_mapping_error;
+ ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),
+ local_addr, IWPM_NLA_MANAGE_ADDR);
+ if (ret)
+ goto remove_mapping_error;
+
+ ret = ibnl_unicast(skb, nlh, iwpm_user_pid);
+ if (ret) {
+ skb = NULL; /* skb is freed in the netlink send-op handling */
+ iwpm_user_pid = IWPM_PID_UNDEFINED;
+ err_str = "Unable to send a nlmsg";
+ goto remove_mapping_error;
+ }
+ iwpm_print_sockaddr(local_addr,
+ "remove_mapping: Local sockaddr:");
+ return 0;
+remove_mapping_error:
+ pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+ if (skb)
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_remove_mapping);
+
+/* netlink attribute policy for the received response to register pid request */
+static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = {
+ [IWPM_NLA_RREG_PID_SEQ] = { .type = NLA_U32 },
+ [IWPM_NLA_RREG_IBDEV_NAME] = { .type = NLA_STRING,
+ .len = IWPM_DEVNAME_SIZE - 1 },
+ [IWPM_NLA_RREG_ULIB_NAME] = { .type = NLA_STRING,
+ .len = IWPM_ULIBNAME_SIZE - 1 },
+ [IWPM_NLA_RREG_ULIB_VER] = { .type = NLA_U16 },
+ [IWPM_NLA_RREG_PID_ERR] = { .type = NLA_U16 }
+};
+
+/*
+ * iwpm_register_pid_cb - Process a port mapper response to
+ * iwpm_register_pid()
+ */
+int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ struct nlattr *nltb[IWPM_NLA_RREG_PID_MAX];
+ struct iwpm_dev_data *pm_msg;
+ char *dev_name, *iwpm_name;
+ u32 msg_seq;
+ u8 nl_client;
+ u16 iwpm_version;
+ const char *msg_type = "Register Pid response";
+
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_RREG_PID_MAX,
+ resp_reg_policy, nltb, msg_type))
+ return -EINVAL;
+
+ msg_seq = nla_get_u32(nltb[IWPM_NLA_RREG_PID_SEQ]);
+ nlmsg_request = iwpm_find_nlmsg_request(msg_seq);
+ if (!nlmsg_request) {
+ pr_info("%s: Could not find a matching request (seq = %u)\n",
+ __func__, msg_seq);
+ return -EINVAL;
+ }
+ pm_msg = nlmsg_request->req_buffer;
+ nl_client = nlmsg_request->nl_client;
+ dev_name = (char *)nla_data(nltb[IWPM_NLA_RREG_IBDEV_NAME]);
+ iwpm_name = (char *)nla_data(nltb[IWPM_NLA_RREG_ULIB_NAME]);
+ iwpm_version = nla_get_u16(nltb[IWPM_NLA_RREG_ULIB_VER]);
+
+ /* check device name, ulib name and version */
+ if (strcmp(pm_msg->dev_name, dev_name) ||
+ strcmp(iwpm_ulib_name, iwpm_name) ||
+ iwpm_version != iwpm_ulib_version) {
+
+ pr_info("%s: Incorrect info (dev = %s name = %s version = %d)\n",
+ __func__, dev_name, iwpm_name, iwpm_version);
+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;
+ goto register_pid_response_exit;
+ }
+ iwpm_user_pid = cb->nlh->nlmsg_pid;
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
+ __func__, iwpm_user_pid);
+ if (iwpm_valid_client(nl_client))
+ iwpm_set_registered(nl_client, 1);
+register_pid_response_exit:
+ nlmsg_request->request_done = 1;
+ /* always for found nlmsg_request */
+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
+ barrier();
+ wake_up(&nlmsg_request->waitq);
+ return 0;
+}
+EXPORT_SYMBOL(iwpm_register_pid_cb);
+
+/* netlink attribute policy for the received response to add mapping request */
+static const struct nla_policy resp_add_policy[IWPM_NLA_RMANAGE_MAPPING_MAX] = {
+ [IWPM_NLA_MANAGE_MAPPING_SEQ] = { .type = NLA_U32 },
+ [IWPM_NLA_MANAGE_ADDR] = { .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_MANAGE_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RMANAGE_MAPPING_ERR] = { .type = NLA_U16 }
+};
+
+/*
+ * iwpm_add_mapping_cb - Process a port mapper response to
+ * iwpm_add_mapping()
+ */
+int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct iwpm_sa_data *pm_msg;
+ struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ struct nlattr *nltb[IWPM_NLA_RMANAGE_MAPPING_MAX];
+ struct sockaddr_storage *local_sockaddr;
+ struct sockaddr_storage *mapped_sockaddr;
+ const char *msg_type;
+ u32 msg_seq;
+
+ msg_type = "Add Mapping response";
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_RMANAGE_MAPPING_MAX,
+ resp_add_policy, nltb, msg_type))
+ return -EINVAL;
+
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+
+ msg_seq = nla_get_u32(nltb[IWPM_NLA_MANAGE_MAPPING_SEQ]);
+ nlmsg_request = iwpm_find_nlmsg_request(msg_seq);
+ if (!nlmsg_request) {
+ pr_info("%s: Could not find a matching request (seq = %u)\n",
+ __func__, msg_seq);
+ return -EINVAL;
+ }
+ pm_msg = nlmsg_request->req_buffer;
+ local_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_MANAGE_ADDR]);
+ mapped_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_MANAGE_MAPPED_LOC_ADDR]);
+
+ if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr)) {
+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;
+ goto add_mapping_response_exit;
+ }
+ if (mapped_sockaddr->ss_family != local_sockaddr->ss_family) {
+ pr_info("%s: Sockaddr family doesn't match the requested one\n",
+ __func__);
+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;
+ goto add_mapping_response_exit;
+ }
+ memcpy(&pm_msg->mapped_loc_addr, mapped_sockaddr,
+ sizeof(*mapped_sockaddr));
+ iwpm_print_sockaddr(&pm_msg->loc_addr,
+ "add_mapping: Local sockaddr:");
+ iwpm_print_sockaddr(&pm_msg->mapped_loc_addr,
+ "add_mapping: Mapped local sockaddr:");
+
+add_mapping_response_exit:
+ nlmsg_request->request_done = 1;
+ /* always for found request */
+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
+ barrier();
+ wake_up(&nlmsg_request->waitq);
+ return 0;
+}
+EXPORT_SYMBOL(iwpm_add_mapping_cb);
+
+/* netlink attribute policy for the response to add and query mapping request */
+static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
+ [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 },
+ [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_QUERY_REMOTE_ADDR] = { .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_MAPPED_REM_ADDR] = { .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_MAPPING_ERR] = { .type = NLA_U16 }
+};
+
+/*
+ * iwpm_add_and_query_mapping_cb - Process a port mapper response to
+ * iwpm_add_and_query_mapping()
+ */
+int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct iwpm_sa_data *pm_msg;
+ struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX];
+ struct sockaddr_storage *local_sockaddr, *remote_sockaddr;
+ struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr;
+ const char *msg_type;
+ u32 msg_seq;
+ u16 err_code;
+
+ msg_type = "Query Mapping response";
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,
+ resp_query_policy, nltb, msg_type))
+ return -EINVAL;
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+
+ msg_seq = nla_get_u32(nltb[IWPM_NLA_QUERY_MAPPING_SEQ]);
+ nlmsg_request = iwpm_find_nlmsg_request(msg_seq);
+ if (!nlmsg_request) {
+ pr_info("%s: Could not find a matching request (seq = %u)\n",
+ __func__, msg_seq);
+ return -EINVAL;
+ }
+ pm_msg = nlmsg_request->req_buffer;
+ local_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
+ remote_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
+ mapped_loc_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
+ mapped_rem_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]);
+
+ err_code = nla_get_u16(nltb[IWPM_NLA_RQUERY_MAPPING_ERR]);
+ if (err_code == IWPM_REMOTE_QUERY_REJECT) {
+ pr_info("%s: Received a Reject (pid = %u, echo seq = %u)\n",
+ __func__, cb->nlh->nlmsg_pid, msg_seq);
+ nlmsg_request->err_code = IWPM_REMOTE_QUERY_REJECT;
+ }
+ if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr) ||
+ iwpm_compare_sockaddr(remote_sockaddr, &pm_msg->rem_addr)) {
+ pr_info("%s: Incorrect local sockaddr\n", __func__);
+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;
+ goto query_mapping_response_exit;
+ }
+ if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family ||
+ mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) {
+ pr_info("%s: Sockaddr family doesn't match the requested one\n",
+ __func__);
+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;
+ goto query_mapping_response_exit;
+ }
+ memcpy(&pm_msg->mapped_loc_addr, mapped_loc_sockaddr,
+ sizeof(*mapped_loc_sockaddr));
+ memcpy(&pm_msg->mapped_rem_addr, mapped_rem_sockaddr,
+ sizeof(*mapped_rem_sockaddr));
+
+ iwpm_print_sockaddr(&pm_msg->loc_addr,
+ "query_mapping: Local sockaddr:");
+ iwpm_print_sockaddr(&pm_msg->mapped_loc_addr,
+ "query_mapping: Mapped local sockaddr:");
+ iwpm_print_sockaddr(&pm_msg->rem_addr,
+ "query_mapping: Remote sockaddr:");
+ iwpm_print_sockaddr(&pm_msg->mapped_rem_addr,
+ "query_mapping: Mapped remote sockaddr:");
+query_mapping_response_exit:
+ nlmsg_request->request_done = 1;
+ /* always for found request */
+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
+ barrier();
+ wake_up(&nlmsg_request->waitq);
+ return 0;
+}
+EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
+
+/* netlink attribute policy for the received request for mapping info */
+static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
+ [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING,
+ .len = IWPM_ULIBNAME_SIZE - 1 },
+ [IWPM_NLA_MAPINFO_ULIB_VER] = { .type = NLA_U16 }
+};
+
+/*
+ * iwpm_mapping_info_cb - Process a port mapper request for mapping info
+ */
+int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
+ const char *msg_type = "Mapping Info response";
+ int iwpm_pid;
+ u8 nl_client;
+ char *iwpm_name;
+ u16 iwpm_version;
+ int ret = -EINVAL;
+
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_REQ_MAX,
+ resp_mapinfo_policy, nltb, msg_type)) {
+ pr_info("%s: Unable to parse nlmsg\n", __func__);
+ return ret;
+ }
+ iwpm_name = (char *)nla_data(nltb[IWPM_NLA_MAPINFO_ULIB_NAME]);
+ iwpm_version = nla_get_u16(nltb[IWPM_NLA_MAPINFO_ULIB_VER]);
+ if (strcmp(iwpm_ulib_name, iwpm_name) ||
+ iwpm_version != iwpm_ulib_version) {
+ pr_info("%s: Invalid port mapper name = %s version = %d\n",
+ __func__, iwpm_name, iwpm_version);
+ return ret;
+ }
+ nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
+ if (!iwpm_valid_client(nl_client)) {
+ pr_info("%s: Invalid port mapper client = %d\n",
+ __func__, nl_client);
+ return ret;
+ }
+ iwpm_set_registered(nl_client, 0);
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ if (!iwpm_mapinfo_available())
+ return 0;
+ iwpm_pid = cb->nlh->nlmsg_pid;
+ pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
+ __func__, iwpm_pid);
+ ret = iwpm_send_mapinfo(nl_client, iwpm_pid);
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_mapping_info_cb);
+
+/* netlink attribute policy for the received mapping info ack */
+static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = {
+ [IWPM_NLA_MAPINFO_SEQ] = { .type = NLA_U32 },
+ [IWPM_NLA_MAPINFO_SEND_NUM] = { .type = NLA_U32 },
+ [IWPM_NLA_MAPINFO_ACK_NUM] = { .type = NLA_U32 }
+};
+
+/*
+ * iwpm_ack_mapping_info_cb - Process a port mapper ack for
+ * the provided mapping info records
+ */
+int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nlattr *nltb[IWPM_NLA_MAPINFO_NUM_MAX];
+ u32 mapinfo_send, mapinfo_ack;
+ const char *msg_type = "Mapping Info Ack";
+
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_NUM_MAX,
+ ack_mapinfo_policy, nltb, msg_type))
+ return -EINVAL;
+ mapinfo_send = nla_get_u32(nltb[IWPM_NLA_MAPINFO_SEND_NUM]);
+ mapinfo_ack = nla_get_u32(nltb[IWPM_NLA_MAPINFO_ACK_NUM]);
+ if (mapinfo_ack != mapinfo_send)
+ pr_info("%s: Invalid mapinfo number (sent = %u ack-ed = %u)\n",
+ __func__, mapinfo_send, mapinfo_ack);
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ return 0;
+}
+EXPORT_SYMBOL(iwpm_ack_mapping_info_cb);
+
+/* netlink attribute policy for the received port mapper error message */
+static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = {
+ [IWPM_NLA_ERR_SEQ] = { .type = NLA_U32 },
+ [IWPM_NLA_ERR_CODE] = { .type = NLA_U16 },
+};
+
+/*
+ * iwpm_mapping_error_cb - Process a port mapper error message
+ */
+int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ int nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
+ struct nlattr *nltb[IWPM_NLA_ERR_MAX];
+ u32 msg_seq;
+ u16 err_code;
+ const char *msg_type = "Mapping Error Msg";
+
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_ERR_MAX,
+ map_error_policy, nltb, msg_type))
+ return -EINVAL;
+
+ msg_seq = nla_get_u32(nltb[IWPM_NLA_ERR_SEQ]);
+ err_code = nla_get_u16(nltb[IWPM_NLA_ERR_CODE]);
+ pr_info("%s: Received msg seq = %u err code = %u client = %d\n",
+ __func__, msg_seq, err_code, nl_client);
+ /* look for nlmsg_request */
+ nlmsg_request = iwpm_find_nlmsg_request(msg_seq);
+ if (!nlmsg_request) {
+ /* not all errors have associated requests */
+ pr_debug("Could not find matching req (seq = %u)\n", msg_seq);
+ return 0;
+ }
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ nlmsg_request->err_code = err_code;
+ nlmsg_request->request_done = 1;
+ /* always for found request */
+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
+ barrier();
+ wake_up(&nlmsg_request->waitq);
+ return 0;
+}
+EXPORT_SYMBOL(iwpm_mapping_error_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
new file mode 100644
index 000000000000..69e9f84c1605
--- /dev/null
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2014 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "iwpm_util.h"
+
+#define IWPM_HASH_BUCKET_SIZE 512
+#define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1)
+
+static LIST_HEAD(iwpm_nlmsg_req_list);
+static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
+
+static struct hlist_head *iwpm_hash_bucket;
+static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
+
+static DEFINE_MUTEX(iwpm_admin_lock);
+static struct iwpm_admin_data iwpm_admin;
+
+int iwpm_init(u8 nl_client)
+{
+ if (iwpm_valid_client(nl_client))
+ return -EINVAL;
+ mutex_lock(&iwpm_admin_lock);
+ if (atomic_read(&iwpm_admin.refcount) == 0) {
+ iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE *
+ sizeof(struct hlist_head), GFP_KERNEL);
+ if (!iwpm_hash_bucket) {
+ mutex_unlock(&iwpm_admin_lock);
+ pr_err("%s Unable to create mapinfo hash table\n", __func__);
+ return -ENOMEM;
+ }
+ }
+ atomic_inc(&iwpm_admin.refcount);
+ mutex_unlock(&iwpm_admin_lock);
+ iwpm_set_valid(nl_client, 1);
+ return 0;
+}
+EXPORT_SYMBOL(iwpm_init);
+
+static void free_hash_bucket(void);
+
+int iwpm_exit(u8 nl_client)
+{
+
+ if (!iwpm_valid_client(nl_client))
+ return -EINVAL;
+ mutex_lock(&iwpm_admin_lock);
+ if (atomic_read(&iwpm_admin.refcount) == 0) {
+ mutex_unlock(&iwpm_admin_lock);
+ pr_err("%s Incorrect usage - negative refcount\n", __func__);
+ return -EINVAL;
+ }
+ if (atomic_dec_and_test(&iwpm_admin.refcount)) {
+ free_hash_bucket();
+ pr_debug("%s: Mapinfo hash table is destroyed\n", __func__);
+ }
+ mutex_unlock(&iwpm_admin_lock);
+ iwpm_set_valid(nl_client, 0);
+ return 0;
+}
+EXPORT_SYMBOL(iwpm_exit);
+
+static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *,
+ struct sockaddr_storage *);
+
+int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
+ struct sockaddr_storage *mapped_sockaddr,
+ u8 nl_client)
+{
+ struct hlist_head *hash_bucket_head;
+ struct iwpm_mapping_info *map_info;
+ unsigned long flags;
+
+ if (!iwpm_valid_client(nl_client))
+ return -EINVAL;
+ map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
+ if (!map_info) {
+ pr_err("%s: Unable to allocate a mapping info\n", __func__);
+ return -ENOMEM;
+ }
+ memcpy(&map_info->local_sockaddr, local_sockaddr,
+ sizeof(struct sockaddr_storage));
+ memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,
+ sizeof(struct sockaddr_storage));
+ map_info->nl_client = nl_client;
+
+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ if (iwpm_hash_bucket) {
+ hash_bucket_head = get_hash_bucket_head(
+ &map_info->local_sockaddr,
+ &map_info->mapped_sockaddr);
+ hlist_add_head(&map_info->hlist_node, hash_bucket_head);
+ }
+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(iwpm_create_mapinfo);
+
+int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
+ struct sockaddr_storage *mapped_local_addr)
+{
+ struct hlist_node *tmp_hlist_node;
+ struct hlist_head *hash_bucket_head;
+ struct iwpm_mapping_info *map_info = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ if (iwpm_hash_bucket) {
+ hash_bucket_head = get_hash_bucket_head(
+ local_sockaddr,
+ mapped_local_addr);
+ hlist_for_each_entry_safe(map_info, tmp_hlist_node,
+ hash_bucket_head, hlist_node) {
+
+ if (!iwpm_compare_sockaddr(&map_info->mapped_sockaddr,
+ mapped_local_addr)) {
+
+ hlist_del_init(&map_info->hlist_node);
+ kfree(map_info);
+ ret = 0;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_remove_mapinfo);
+
+static void free_hash_bucket(void)
+{
+ struct hlist_node *tmp_hlist_node;
+ struct iwpm_mapping_info *map_info;
+ unsigned long flags;
+ int i;
+
+ /* remove all the mapinfo data from the list */
+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+ hlist_for_each_entry_safe(map_info, tmp_hlist_node,
+ &iwpm_hash_bucket[i], hlist_node) {
+
+ hlist_del_init(&map_info->hlist_node);
+ kfree(map_info);
+ }
+ }
+ /* free the hash list */
+ kfree(iwpm_hash_bucket);
+ iwpm_hash_bucket = NULL;
+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
+}
+
+struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
+ u8 nl_client, gfp_t gfp)
+{
+ struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ unsigned long flags;
+
+ nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
+ if (!nlmsg_request) {
+ pr_err("%s Unable to allocate a nlmsg_request\n", __func__);
+ return NULL;
+ }
+ spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
+ list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list);
+ spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);
+
+ kref_init(&nlmsg_request->kref);
+ kref_get(&nlmsg_request->kref);
+ nlmsg_request->nlmsg_seq = nlmsg_seq;
+ nlmsg_request->nl_client = nl_client;
+ nlmsg_request->request_done = 0;
+ nlmsg_request->err_code = 0;
+ return nlmsg_request;
+}
+
+void iwpm_free_nlmsg_request(struct kref *kref)
+{
+ struct iwpm_nlmsg_request *nlmsg_request;
+ unsigned long flags;
+
+ nlmsg_request = container_of(kref, struct iwpm_nlmsg_request, kref);
+
+ spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
+ list_del_init(&nlmsg_request->inprocess_list);
+ spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);
+
+ if (!nlmsg_request->request_done)
+ pr_debug("%s Freeing incomplete nlmsg request (seq = %u).\n",
+ __func__, nlmsg_request->nlmsg_seq);
+ kfree(nlmsg_request);
+}
+
+struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq)
+{
+ struct iwpm_nlmsg_request *nlmsg_request;
+ struct iwpm_nlmsg_request *found_request = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
+ list_for_each_entry(nlmsg_request, &iwpm_nlmsg_req_list,
+ inprocess_list) {
+ if (nlmsg_request->nlmsg_seq == echo_seq) {
+ found_request = nlmsg_request;
+ kref_get(&nlmsg_request->kref);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);
+ return found_request;
+}
+
+int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request)
+{
+ int ret;
+ init_waitqueue_head(&nlmsg_request->waitq);
+
+ ret = wait_event_timeout(nlmsg_request->waitq,
+ (nlmsg_request->request_done != 0), IWPM_NL_TIMEOUT);
+ if (!ret) {
+ ret = -EINVAL;
+ pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n",
+ __func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq);
+ } else {
+ ret = nlmsg_request->err_code;
+ }
+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
+ return ret;
+}
+
+int iwpm_get_nlmsg_seq(void)
+{
+ return atomic_inc_return(&iwpm_admin.nlmsg_seq);
+}
+
+int iwpm_valid_client(u8 nl_client)
+{
+ if (nl_client >= RDMA_NL_NUM_CLIENTS)
+ return 0;
+ return iwpm_admin.client_list[nl_client];
+}
+
+void iwpm_set_valid(u8 nl_client, int valid)
+{
+ if (nl_client >= RDMA_NL_NUM_CLIENTS)
+ return;
+ iwpm_admin.client_list[nl_client] = valid;
+}
+
+/* valid client */
+int iwpm_registered_client(u8 nl_client)
+{
+ return iwpm_admin.reg_list[nl_client];
+}
+
+/* valid client */
+void iwpm_set_registered(u8 nl_client, int reg)
+{
+ iwpm_admin.reg_list[nl_client] = reg;
+}
+
+int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
+ struct sockaddr_storage *b_sockaddr)
+{
+ if (a_sockaddr->ss_family != b_sockaddr->ss_family)
+ return 1;
+ if (a_sockaddr->ss_family == AF_INET) {
+ struct sockaddr_in *a4_sockaddr =
+ (struct sockaddr_in *)a_sockaddr;
+ struct sockaddr_in *b4_sockaddr =
+ (struct sockaddr_in *)b_sockaddr;
+ if (!memcmp(&a4_sockaddr->sin_addr,
+ &b4_sockaddr->sin_addr, sizeof(struct in_addr))
+ && a4_sockaddr->sin_port == b4_sockaddr->sin_port)
+ return 0;
+
+ } else if (a_sockaddr->ss_family == AF_INET6) {
+ struct sockaddr_in6 *a6_sockaddr =
+ (struct sockaddr_in6 *)a_sockaddr;
+ struct sockaddr_in6 *b6_sockaddr =
+ (struct sockaddr_in6 *)b_sockaddr;
+ if (!memcmp(&a6_sockaddr->sin6_addr,
+ &b6_sockaddr->sin6_addr, sizeof(struct in6_addr))
+ && a6_sockaddr->sin6_port == b6_sockaddr->sin6_port)
+ return 0;
+
+ } else {
+ pr_err("%s: Invalid sockaddr family\n", __func__);
+ }
+ return 1;
+}
+
+struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,
+ int nl_client)
+{
+ struct sk_buff *skb = NULL;
+
+ skb = dev_alloc_skb(NLMSG_GOODSIZE);
+ if (!skb) {
+ pr_err("%s Unable to allocate skb\n", __func__);
+ goto create_nlmsg_exit;
+ }
+ if (!(ibnl_put_msg(skb, nlh, 0, 0, nl_client, nl_op,
+ NLM_F_REQUEST))) {
+ pr_warn("%s: Unable to put the nlmsg header\n", __func__);
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+create_nlmsg_exit:
+ return skb;
+}
+
+int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
+ const struct nla_policy *nlmsg_policy,
+ struct nlattr *nltb[], const char *msg_type)
+{
+ int nlh_len = 0;
+ int ret;
+ const char *err_str = "";
+
+ ret = nlmsg_validate(cb->nlh, nlh_len, policy_max-1, nlmsg_policy);
+ if (ret) {
+ err_str = "Invalid attribute";
+ goto parse_nlmsg_error;
+ }
+ ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max-1, nlmsg_policy);
+ if (ret) {
+ err_str = "Unable to parse the nlmsg";
+ goto parse_nlmsg_error;
+ }
+ ret = iwpm_validate_nlmsg_attr(nltb, policy_max);
+ if (ret) {
+ err_str = "Invalid NULL attribute";
+ goto parse_nlmsg_error;
+ }
+ return 0;
+parse_nlmsg_error:
+ pr_warn("%s: %s (msg type %s ret = %d)\n",
+ __func__, err_str, msg_type, ret);
+ return ret;
+}
+
+void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg)
+{
+ struct sockaddr_in6 *sockaddr_v6;
+ struct sockaddr_in *sockaddr_v4;
+
+ switch (sockaddr->ss_family) {
+ case AF_INET:
+ sockaddr_v4 = (struct sockaddr_in *)sockaddr;
+ pr_debug("%s IPV4 %pI4: %u(0x%04X)\n",
+ msg, &sockaddr_v4->sin_addr,
+ ntohs(sockaddr_v4->sin_port),
+ ntohs(sockaddr_v4->sin_port));
+ break;
+ case AF_INET6:
+ sockaddr_v6 = (struct sockaddr_in6 *)sockaddr;
+ pr_debug("%s IPV6 %pI6: %u(0x%04X)\n",
+ msg, &sockaddr_v6->sin6_addr,
+ ntohs(sockaddr_v6->sin6_port),
+ ntohs(sockaddr_v6->sin6_port));
+ break;
+ default:
+ break;
+ }
+}
+
+static u32 iwpm_ipv6_jhash(struct sockaddr_in6 *ipv6_sockaddr)
+{
+ u32 ipv6_hash = jhash(&ipv6_sockaddr->sin6_addr, sizeof(struct in6_addr), 0);
+ u32 hash = jhash_2words(ipv6_hash, (__force u32) ipv6_sockaddr->sin6_port, 0);
+ return hash;
+}
+
+static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr)
+{
+ u32 ipv4_hash = jhash(&ipv4_sockaddr->sin_addr, sizeof(struct in_addr), 0);
+ u32 hash = jhash_2words(ipv4_hash, (__force u32) ipv4_sockaddr->sin_port, 0);
+ return hash;
+}
+
+static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage
+ *local_sockaddr,
+ struct sockaddr_storage
+ *mapped_sockaddr)
+{
+ u32 local_hash, mapped_hash, hash;
+
+ if (local_sockaddr->ss_family == AF_INET) {
+ local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr);
+ mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr);
+
+ } else if (local_sockaddr->ss_family == AF_INET6) {
+ local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr);
+ mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr);
+ } else {
+ pr_err("%s: Invalid sockaddr family\n", __func__);
+ return NULL;
+ }
+
+ if (local_hash == mapped_hash) /* if port mapper isn't available */
+ hash = local_hash;
+ else
+ hash = jhash_2words(local_hash, mapped_hash, 0);
+
+ return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK];
+}
+
+static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ u32 msg_seq;
+ const char *err_str = "";
+ int ret = -EINVAL;
+
+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_MAPINFO_NUM, &nlh, nl_client);
+ if (!skb) {
+ err_str = "Unable to create a nlmsg";
+ goto mapinfo_num_error;
+ }
+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();
+ msg_seq = 0;
+ err_str = "Unable to put attribute of mapinfo number nlmsg";
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_MAPINFO_SEQ);
+ if (ret)
+ goto mapinfo_num_error;
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32),
+ &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
+ if (ret)
+ goto mapinfo_num_error;
+ ret = ibnl_unicast(skb, nlh, iwpm_pid);
+ if (ret) {
+ skb = NULL;
+ err_str = "Unable to send a nlmsg";
+ goto mapinfo_num_error;
+ }
+ pr_debug("%s: Sent mapping number = %d\n", __func__, mapping_num);
+ return 0;
+mapinfo_num_error:
+ pr_info("%s: %s\n", __func__, err_str);
+ if (skb)
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
+{
+ struct nlmsghdr *nlh = NULL;
+ int ret = 0;
+
+ if (!skb)
+ return ret;
+ if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
+ RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {
+ pr_warn("%s Unable to put NLMSG_DONE\n", __func__);
+ return -ENOMEM;
+ }
+ nlh->nlmsg_type = NLMSG_DONE;
+ ret = ibnl_unicast(skb, (struct nlmsghdr *)skb->data, iwpm_pid);
+ if (ret)
+ pr_warn("%s Unable to send a nlmsg\n", __func__);
+ return ret;
+}
+
+int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
+{
+ struct iwpm_mapping_info *map_info;
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ int skb_num = 0, mapping_num = 0;
+ int i = 0, nlmsg_bytes = 0;
+ unsigned long flags;
+ const char *err_str = "";
+ int ret;
+
+ skb = dev_alloc_skb(NLMSG_GOODSIZE);
+ if (!skb) {
+ ret = -ENOMEM;
+ err_str = "Unable to allocate skb";
+ goto send_mapping_info_exit;
+ }
+ skb_num++;
+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+ hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
+ hlist_node) {
+ if (map_info->nl_client != nl_client)
+ continue;
+ nlh = NULL;
+ if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
+ RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {
+ ret = -ENOMEM;
+ err_str = "Unable to put the nlmsg header";
+ goto send_mapping_info_unlock;
+ }
+ err_str = "Unable to put attribute of the nlmsg";
+ ret = ibnl_put_attr(skb, nlh,
+ sizeof(struct sockaddr_storage),
+ &map_info->local_sockaddr,
+ IWPM_NLA_MAPINFO_LOCAL_ADDR);
+ if (ret)
+ goto send_mapping_info_unlock;
+
+ ret = ibnl_put_attr(skb, nlh,
+ sizeof(struct sockaddr_storage),
+ &map_info->mapped_sockaddr,
+ IWPM_NLA_MAPINFO_MAPPED_ADDR);
+ if (ret)
+ goto send_mapping_info_unlock;
+
+ iwpm_print_sockaddr(&map_info->local_sockaddr,
+ "send_mapping_info: Local sockaddr:");
+ iwpm_print_sockaddr(&map_info->mapped_sockaddr,
+ "send_mapping_info: Mapped local sockaddr:");
+ mapping_num++;
+ nlmsg_bytes += nlh->nlmsg_len;
+
+ /* check if all mappings can fit in one skb */
+ if (NLMSG_GOODSIZE - nlmsg_bytes < nlh->nlmsg_len * 2) {
+ /* and leave room for NLMSG_DONE */
+ nlmsg_bytes = 0;
+ skb_num++;
+ spin_unlock_irqrestore(&iwpm_mapinfo_lock,
+ flags);
+ /* send the skb */
+ ret = send_nlmsg_done(skb, nl_client, iwpm_pid);
+ skb = NULL;
+ if (ret) {
+ err_str = "Unable to send map info";
+ goto send_mapping_info_exit;
+ }
+ if (skb_num == IWPM_MAPINFO_SKB_COUNT) {
+ ret = -ENOMEM;
+ err_str = "Insufficient skbs for map info";
+ goto send_mapping_info_exit;
+ }
+ skb = dev_alloc_skb(NLMSG_GOODSIZE);
+ if (!skb) {
+ ret = -ENOMEM;
+ err_str = "Unable to allocate skb";
+ goto send_mapping_info_exit;
+ }
+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ }
+ }
+ }
+send_mapping_info_unlock:
+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
+send_mapping_info_exit:
+ if (ret) {
+ pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret);
+ if (skb)
+ dev_kfree_skb(skb);
+ return ret;
+ }
+ send_nlmsg_done(skb, nl_client, iwpm_pid);
+ return send_mapinfo_num(mapping_num, nl_client, iwpm_pid);
+}
+
+int iwpm_mapinfo_available(void)
+{
+ unsigned long flags;
+ int full_bucket = 0, i = 0;
+
+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ if (iwpm_hash_bucket) {
+ for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+ if (!hlist_empty(&iwpm_hash_bucket[i])) {
+ full_bucket = 1;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
+ return full_bucket;
+}
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
new file mode 100644
index 000000000000..9777c869a140
--- /dev/null
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ * Copyright (c) 2014 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _IWPM_UTIL_H
+#define _IWPM_UTIL_H
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/jhash.h>
+#include <linux/kref.h>
+#include <net/netlink.h>
+#include <linux/errno.h>
+#include <rdma/iw_portmap.h>
+#include <rdma/rdma_netlink.h>
+
+
+#define IWPM_NL_RETRANS 3
+#define IWPM_NL_TIMEOUT (10*HZ)
+#define IWPM_MAPINFO_SKB_COUNT 20
+
+#define IWPM_PID_UNDEFINED -1
+#define IWPM_PID_UNAVAILABLE -2
+
+struct iwpm_nlmsg_request {
+ struct list_head inprocess_list;
+ __u32 nlmsg_seq;
+ void *req_buffer;
+ u8 nl_client;
+ u8 request_done;
+ u16 err_code;
+ wait_queue_head_t waitq;
+ struct kref kref;
+};
+
+struct iwpm_mapping_info {
+ struct hlist_node hlist_node;
+ struct sockaddr_storage local_sockaddr;
+ struct sockaddr_storage mapped_sockaddr;
+ u8 nl_client;
+};
+
+struct iwpm_admin_data {
+ atomic_t refcount;
+ atomic_t nlmsg_seq;
+ int client_list[RDMA_NL_NUM_CLIENTS];
+ int reg_list[RDMA_NL_NUM_CLIENTS];
+};
+
+/**
+ * iwpm_get_nlmsg_request - Allocate and initialize netlink message request
+ * @nlmsg_seq: Sequence number of the netlink message
+ * @nl_client: The index of the netlink client
+ * @gfp: Indicates how the memory for the request should be allocated
+ *
+ * Returns the newly allocated netlink request object if successful,
+ * otherwise returns NULL
+ */
+struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
+ u8 nl_client, gfp_t gfp);
+
+/**
+ * iwpm_free_nlmsg_request - Deallocate netlink message request
+ * @kref: Holds reference of netlink message request
+ */
+void iwpm_free_nlmsg_request(struct kref *kref);
+
+/**
+ * iwpm_find_nlmsg_request - Find netlink message request in the request list
+ * @echo_seq: Sequence number of the netlink request to find
+ *
+ * Returns the found netlink message request,
+ * if not found, returns NULL
+ */
+struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq);
+
+/**
+ * iwpm_wait_complete_req - Block while servicing the netlink request
+ * @nlmsg_request: Netlink message request to service
+ *
+ * Wakes up, after the request is completed or expired
+ * Returns 0 if the request is complete without error
+ */
+int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
+
+/**
+ * iwpm_get_nlmsg_seq - Get the sequence number for a netlink
+ * message to send to the port mapper
+ *
+ * Returns the sequence number for the netlink message.
+ */
+int iwpm_get_nlmsg_seq(void);
+
+/**
+ * iwpm_valid_client - Check if the port mapper client is valid
+ * @nl_client: The index of the netlink client
+ *
+ * Valid clients need to call iwpm_init() before using
+ * the port mapper
+ */
+int iwpm_valid_client(u8 nl_client);
+
+/**
+ * iwpm_set_valid - Set the port mapper client to valid or not
+ * @nl_client: The index of the netlink client
+ * @valid: 1 if valid or 0 if invalid
+ */
+void iwpm_set_valid(u8 nl_client, int valid);
+
+/**
+ * iwpm_registered_client - Check if the port mapper client is registered
+ * @nl_client: The index of the netlink client
+ *
+ * Call iwpm_register_pid() to register a client
+ */
+int iwpm_registered_client(u8 nl_client);
+
+/**
+ * iwpm_set_registered - Set the port mapper client to registered or not
+ * @nl_client: The index of the netlink client
+ * @reg: 1 if registered or 0 if not
+ */
+void iwpm_set_registered(u8 nl_client, int reg);
+
+/**
+ * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
+ * a client to the user space port mapper
+ * @nl_client: The index of the netlink client
+ * @iwpm_pid: The pid of the user space port mapper
+ *
+ * If successful, returns the number of sent mapping info records
+ */
+int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid);
+
+/**
+ * iwpm_mapinfo_available - Check if any mapping info records is available
+ * in the hash table
+ *
+ * Returns 1 if mapping information is available, otherwise returns 0
+ */
+int iwpm_mapinfo_available(void);
+
+/**
+ * iwpm_compare_sockaddr - Compare two sockaddr storage structs
+ *
+ * Returns 0 if they are holding the same ip/tcp address info,
+ * otherwise returns 1
+ */
+int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
+ struct sockaddr_storage *b_sockaddr);
+
+/**
+ * iwpm_validate_nlmsg_attr - Check for NULL netlink attributes
+ * @nltb: Holds address of each netlink message attributes
+ * @nla_count: Number of netlink message attributes
+ *
+ * Returns error if any of the nla_count attributes is NULL
+ */
+static inline int iwpm_validate_nlmsg_attr(struct nlattr *nltb[],
+ int nla_count)
+{
+ int i;
+ for (i = 1; i < nla_count; i++) {
+ if (!nltb[i])
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * iwpm_create_nlmsg - Allocate skb and form a netlink message
+ * @nl_op: Netlink message opcode
+ * @nlh: Holds address of the netlink message header in skb
+ * @nl_client: The index of the netlink client
+ *
+ * Returns the newly allcated skb, or NULL if the tailroom of the skb
+ * is insufficient to store the message header and payload
+ */
+struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,
+ int nl_client);
+
+/**
+ * iwpm_parse_nlmsg - Validate and parse the received netlink message
+ * @cb: Netlink callback structure
+ * @policy_max: Maximum attribute type to be expected
+ * @nlmsg_policy: Validation policy
+ * @nltb: Array to store policy_max parsed elements
+ * @msg_type: Type of netlink message
+ *
+ * Returns 0 on success or a negative error code
+ */
+int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
+ const struct nla_policy *nlmsg_policy,
+ struct nlattr *nltb[], const char *msg_type);
+
+/**
+ * iwpm_print_sockaddr - Print IPv4/IPv6 address and TCP port
+ * @sockaddr: Socket address to print
+ * @msg: Message to print
+ */
+void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg);
+#endif
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index a1e9cba84944..23dd5a5c7597 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -103,13 +103,13 @@ int ibnl_remove_client(int index)
EXPORT_SYMBOL(ibnl_remove_client);
void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
- int len, int client, int op)
+ int len, int client, int op, int flags)
{
unsigned char *prev_tail;
prev_tail = skb_tail_pointer(skb);
*nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
- len, NLM_F_MULTI);
+ len, flags);
if (!*nlh)
goto out_nlmsg_trim;
(*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
@@ -172,6 +172,20 @@ static void ibnl_rcv(struct sk_buff *skb)
mutex_unlock(&ibnl_mutex);
}
+int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
+ __u32 pid)
+{
+ return nlmsg_unicast(nls, skb, pid);
+}
+EXPORT_SYMBOL(ibnl_unicast);
+
+int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
+ unsigned int group, gfp_t flags)
+{
+ return nlmsg_multicast(nls, skb, 0, group, flags);
+}
+EXPORT_SYMBOL(ibnl_multicast);
+
int __init ibnl_init(void)
{
struct netlink_kernel_cfg cfg = {
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index f820958e4047..233eaf541f55 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -618,7 +618,7 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
{
- bool preload = gfp_mask & __GFP_WAIT;
+ bool preload = !!(gfp_mask & __GFP_WAIT);
unsigned long flags;
int ret, id;
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7d3292c7b4b4..cbd0383f622e 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -429,15 +429,19 @@ static void ib_port_release(struct kobject *kobj)
struct attribute *a;
int i;
- for (i = 0; (a = p->gid_group.attrs[i]); ++i)
- kfree(a);
+ if (p->gid_group.attrs) {
+ for (i = 0; (a = p->gid_group.attrs[i]); ++i)
+ kfree(a);
- kfree(p->gid_group.attrs);
+ kfree(p->gid_group.attrs);
+ }
- for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
- kfree(a);
+ if (p->pkey_group.attrs) {
+ for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
+ kfree(a);
- kfree(p->pkey_group.attrs);
+ kfree(p->pkey_group.attrs);
+ }
kfree(p);
}
@@ -534,10 +538,12 @@ static int add_port(struct ib_device *device, int port_num,
p->port_num = port_num;
ret = kobject_init_and_add(&p->kobj, &port_type,
- kobject_get(device->ports_parent),
+ device->ports_parent,
"%d", port_num);
- if (ret)
- goto err_put;
+ if (ret) {
+ kfree(p);
+ return ret;
+ }
ret = sysfs_create_group(&p->kobj, &pma_group);
if (ret)
@@ -585,6 +591,7 @@ err_free_pkey:
kfree(p->pkey_group.attrs[i]);
kfree(p->pkey_group.attrs);
+ p->pkey_group.attrs = NULL;
err_remove_gid:
sysfs_remove_group(&p->kobj, &p->gid_group);
@@ -594,13 +601,13 @@ err_free_gid:
kfree(p->gid_group.attrs[i]);
kfree(p->gid_group.attrs);
+ p->gid_group.attrs = NULL;
err_remove_pma:
sysfs_remove_group(&p->kobj, &pma_group);
err_put:
- kobject_put(device->ports_parent);
- kfree(p);
+ kobject_put(&p->kobj);
return ret;
}
@@ -809,6 +816,22 @@ static struct attribute_group iw_stats_group = {
.attrs = iw_proto_stats_attrs,
};
+static void free_port_list_attributes(struct ib_device *device)
+{
+ struct kobject *p, *t;
+
+ list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ struct ib_port *port = container_of(p, struct ib_port, kobj);
+ list_del(&p->entry);
+ sysfs_remove_group(p, &pma_group);
+ sysfs_remove_group(p, &port->pkey_group);
+ sysfs_remove_group(p, &port->gid_group);
+ kobject_put(p);
+ }
+
+ kobject_put(device->ports_parent);
+}
+
int ib_device_register_sysfs(struct ib_device *device,
int (*port_callback)(struct ib_device *,
u8, struct kobject *))
@@ -835,7 +858,7 @@ int ib_device_register_sysfs(struct ib_device *device,
}
device->ports_parent = kobject_create_and_add("ports",
- kobject_get(&class_dev->kobj));
+ &class_dev->kobj);
if (!device->ports_parent) {
ret = -ENOMEM;
goto err_put;
@@ -862,21 +885,7 @@ int ib_device_register_sysfs(struct ib_device *device,
return 0;
err_put:
- {
- struct kobject *p, *t;
- struct ib_port *port;
-
- list_for_each_entry_safe(p, t, &device->port_list, entry) {
- list_del(&p->entry);
- port = container_of(p, struct ib_port, kobj);
- sysfs_remove_group(p, &pma_group);
- sysfs_remove_group(p, &port->pkey_group);
- sysfs_remove_group(p, &port->gid_group);
- kobject_put(p);
- }
- }
-
- kobject_put(&class_dev->kobj);
+ free_port_list_attributes(device);
err_unregister:
device_unregister(class_dev);
@@ -887,22 +896,18 @@ err:
void ib_device_unregister_sysfs(struct ib_device *device)
{
- struct kobject *p, *t;
- struct ib_port *port;
-
/* Hold kobject until ib_dealloc_device() */
- kobject_get(&device->dev.kobj);
+ struct kobject *kobj_dev = kobject_get(&device->dev.kobj);
+ int i;
- list_for_each_entry_safe(p, t, &device->port_list, entry) {
- list_del(&p->entry);
- port = container_of(p, struct ib_port, kobj);
- sysfs_remove_group(p, &pma_group);
- sysfs_remove_group(p, &port->pkey_group);
- sysfs_remove_group(p, &port->gid_group);
- kobject_put(p);
- }
+ if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats)
+ sysfs_remove_group(kobj_dev, &iw_stats_group);
+
+ free_port_list_attributes(device);
+
+ for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i)
+ device_remove_file(&device->dev, ib_class_attributes[i]);
- kobject_put(device->ports_parent);
device_unregister(&device->dev);
}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index f0d588f8859e..1acb99100556 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -98,7 +98,7 @@ struct ib_umad_port {
struct ib_umad_device {
int start_port, end_port;
- struct kref ref;
+ struct kobject kobj;
struct ib_umad_port port[0];
};
@@ -134,14 +134,18 @@ static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device);
-static void ib_umad_release_dev(struct kref *ref)
+static void ib_umad_release_dev(struct kobject *kobj)
{
struct ib_umad_device *dev =
- container_of(ref, struct ib_umad_device, ref);
+ container_of(kobj, struct ib_umad_device, kobj);
kfree(dev);
}
+static struct kobj_type ib_umad_dev_ktype = {
+ .release = ib_umad_release_dev,
+};
+
static int hdr_size(struct ib_umad_file *file)
{
return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
@@ -780,27 +784,19 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{
struct ib_umad_port *port;
struct ib_umad_file *file;
- int ret;
+ int ret = -ENXIO;
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
- if (port)
- kref_get(&port->umad_dev->ref);
- else
- return -ENXIO;
mutex_lock(&port->file_mutex);
- if (!port->ib_dev) {
- ret = -ENXIO;
+ if (!port->ib_dev)
goto out;
- }
+ ret = -ENOMEM;
file = kzalloc(sizeof *file, GFP_KERNEL);
- if (!file) {
- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
- ret = -ENOMEM;
+ if (!file)
goto out;
- }
mutex_init(&file->mutex);
spin_lock_init(&file->send_lock);
@@ -814,6 +810,13 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
list_add_tail(&file->port_list, &port->file_list);
ret = nonseekable_open(inode, filp);
+ if (ret) {
+ list_del(&file->port_list);
+ kfree(file);
+ goto out;
+ }
+
+ kobject_get(&port->umad_dev->kobj);
out:
mutex_unlock(&port->file_mutex);
@@ -852,7 +855,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
mutex_unlock(&file->port->file_mutex);
kfree(file);
- kref_put(&dev->ref, ib_umad_release_dev);
+ kobject_put(&dev->kobj);
return 0;
}
@@ -880,10 +883,6 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
int ret;
port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
- if (port)
- kref_get(&port->umad_dev->ref);
- else
- return -ENXIO;
if (filp->f_flags & O_NONBLOCK) {
if (down_trylock(&port->sm_sem)) {
@@ -898,17 +897,27 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
}
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
- if (ret) {
- up(&port->sm_sem);
- goto fail;
- }
+ if (ret)
+ goto err_up_sem;
filp->private_data = port;
- return nonseekable_open(inode, filp);
+ ret = nonseekable_open(inode, filp);
+ if (ret)
+ goto err_clr_sm_cap;
+
+ kobject_get(&port->umad_dev->kobj);
+
+ return 0;
+
+err_clr_sm_cap:
+ swap(props.set_port_cap_mask, props.clr_port_cap_mask);
+ ib_modify_port(port->ib_dev, port->port_num, 0, &props);
+
+err_up_sem:
+ up(&port->sm_sem);
fail:
- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
return ret;
}
@@ -927,7 +936,7 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
up(&port->sm_sem);
- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
+ kobject_put(&port->umad_dev->kobj);
return ret;
}
@@ -995,6 +1004,7 @@ static int find_overflow_devnum(void)
}
static int ib_umad_init_port(struct ib_device *device, int port_num,
+ struct ib_umad_device *umad_dev,
struct ib_umad_port *port)
{
int devnum;
@@ -1027,6 +1037,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
cdev_init(&port->cdev, &umad_fops);
port->cdev.owner = THIS_MODULE;
+ port->cdev.kobj.parent = &umad_dev->kobj;
kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
if (cdev_add(&port->cdev, base, 1))
goto err_cdev;
@@ -1045,6 +1056,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
base += IB_UMAD_MAX_PORTS;
cdev_init(&port->sm_cdev, &umad_sm_fops);
port->sm_cdev.owner = THIS_MODULE;
+ port->sm_cdev.kobj.parent = &umad_dev->kobj;
kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
if (cdev_add(&port->sm_cdev, base, 1))
goto err_sm_cdev;
@@ -1138,7 +1150,7 @@ static void ib_umad_add_one(struct ib_device *device)
if (!umad_dev)
return;
- kref_init(&umad_dev->ref);
+ kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
umad_dev->start_port = s;
umad_dev->end_port = e;
@@ -1146,7 +1158,8 @@ static void ib_umad_add_one(struct ib_device *device)
for (i = s; i <= e; ++i) {
umad_dev->port[i - s].umad_dev = umad_dev;
- if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
+ if (ib_umad_init_port(device, i, umad_dev,
+ &umad_dev->port[i - s]))
goto err;
}
@@ -1158,7 +1171,7 @@ err:
while (--i >= s)
ib_umad_kill_port(&umad_dev->port[i - s]);
- kref_put(&umad_dev->ref, ib_umad_release_dev);
+ kobject_put(&umad_dev->kobj);
}
static void ib_umad_remove_one(struct ib_device *device)
@@ -1172,7 +1185,7 @@ static void ib_umad_remove_one(struct ib_device *device)
for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
ib_umad_kill_port(&umad_dev->port[i]);
- kref_put(&umad_dev->ref, ib_umad_release_dev);
+ kobject_put(&umad_dev->kobj);
}
static char *umad_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 92525f855d82..c2b89cc5dbca 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -48,7 +48,7 @@
#include "core_priv.h"
-int ib_rate_to_mult(enum ib_rate rate)
+__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
{
switch (rate) {
case IB_RATE_2_5_GBPS: return 1;
@@ -65,7 +65,7 @@ int ib_rate_to_mult(enum ib_rate rate)
}
EXPORT_SYMBOL(ib_rate_to_mult);
-enum ib_rate mult_to_ib_rate(int mult)
+__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
{
switch (mult) {
case 1: return IB_RATE_2_5_GBPS;
@@ -82,7 +82,7 @@ enum ib_rate mult_to_ib_rate(int mult)
}
EXPORT_SYMBOL(mult_to_ib_rate);
-int ib_rate_to_mbps(enum ib_rate rate)
+__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
{
switch (rate) {
case IB_RATE_2_5_GBPS: return 2500;
@@ -107,7 +107,7 @@ int ib_rate_to_mbps(enum ib_rate rate)
}
EXPORT_SYMBOL(ib_rate_to_mbps);
-enum rdma_transport_type
+__attribute_const__ enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type)
{
switch (node_type) {
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
new file mode 100644
index 000000000000..e900b03531a9
--- /dev/null
+++ b/drivers/infiniband/hw/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
+obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
+obj-$(CONFIG_INFINIBAND_QIB) += qib/
+obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
+obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
+obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
+obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
+obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
+obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
+obj-$(CONFIG_INFINIBAND_NES) += nes/
+obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
+obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index c3f5aca4ef00..de1c61b417d6 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -735,14 +735,12 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
V_TPT_PAGE_SIZE(page_size));
- tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
- cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
+ tpt.rsvd_pbl_addr = cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
tpt.len = cpu_to_be32(len);
tpt.va_hi = cpu_to_be32((u32) (to >> 32));
tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
tpt.rsvd_bind_cnt_or_pstag = 0;
- tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
- cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
+ tpt.rsvd_pbl_size = cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
}
err = cxio_hal_ctrl_qp_write_mem(rdev_p,
stag_idx +
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 095bb046e2c8..cb78b1e9bcd9 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -418,6 +418,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
skb->priority = CPL_PRIORITY_DATA;
set_arp_failure_handler(skb, abort_arp_failure);
req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 1f863a96a480..5e153f6d4b48 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -47,6 +47,8 @@
#include <net/ip6_route.h>
#include <net/addrconf.h>
+#include <rdma/ib_addr.h>
+
#include "iw_cxgb4.h"
static char *states[] = {
@@ -232,12 +234,16 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
static void set_emss(struct c4iw_ep *ep, u16 opt)
{
- ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
+ ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
+ sizeof(struct iphdr) - sizeof(struct tcphdr);
ep->mss = ep->emss;
if (GET_TCPOPT_TSTAMP(opt))
ep->emss -= 12;
if (ep->emss < 128)
ep->emss = 128;
+ if (ep->emss & 7)
+ PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
ep->mss, ep->emss);
}
@@ -294,6 +300,12 @@ void _c4iw_free_ep(struct kref *kref)
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
}
+ if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) {
+ print_addr(&ep->com, __func__, "remove_mapinfo/mapping");
+ iwpm_remove_mapinfo(&ep->com.local_addr,
+ &ep->com.mapped_local_addr);
+ iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
+ }
kfree(ep);
}
@@ -341,10 +353,7 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
static struct net_device *get_real_dev(struct net_device *egress_dev)
{
- struct net_device *phys_dev = egress_dev;
- if (egress_dev->priv_flags & IFF_802_1Q_VLAN)
- phys_dev = vlan_dev_real_dev(egress_dev);
- return phys_dev;
+ return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
}
static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
@@ -468,7 +477,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
- flowc->mnemval[6].val = cpu_to_be32(snd_win);
+ flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = cpu_to_be32(ep->emss);
/* Pad WR to 16 byte boundary */
@@ -528,6 +537,49 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
+/*
+ * c4iw_form_pm_msg - Form a port mapper message with mapping info
+ */
+static void c4iw_form_pm_msg(struct c4iw_ep *ep,
+ struct iwpm_sa_data *pm_msg)
+{
+ memcpy(&pm_msg->loc_addr, &ep->com.local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&pm_msg->rem_addr, &ep->com.remote_addr,
+ sizeof(ep->com.remote_addr));
+}
+
+/*
+ * c4iw_form_reg_msg - Form a port mapper message with dev info
+ */
+static void c4iw_form_reg_msg(struct c4iw_dev *dev,
+ struct iwpm_dev_data *pm_msg)
+{
+ memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE);
+ memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name,
+ IWPM_IFNAME_SIZE);
+}
+
+static void c4iw_record_pm_msg(struct c4iw_ep *ep,
+ struct iwpm_sa_data *pm_msg)
+{
+ memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr,
+ sizeof(ep->com.mapped_local_addr));
+ memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr,
+ sizeof(ep->com.mapped_remote_addr));
+}
+
+static void best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx, int use_ts)
+{
+ unsigned short hdr_size = sizeof(struct iphdr) +
+ sizeof(struct tcphdr) +
+ (use_ts ? 12 : 0);
+ unsigned short data_size = mtu - hdr_size;
+
+ cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+}
+
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
@@ -546,10 +598,15 @@ static int send_connect(struct c4iw_ep *ep)
int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
sizeof(struct cpl_act_open_req6) :
sizeof(struct cpl_t5_act_open_req6);
- struct sockaddr_in *la = (struct sockaddr_in *)&ep->com.local_addr;
- struct sockaddr_in *ra = (struct sockaddr_in *)&ep->com.remote_addr;
- struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
- struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
+ struct sockaddr_in *la = (struct sockaddr_in *)
+ &ep->com.mapped_local_addr;
+ struct sockaddr_in *ra = (struct sockaddr_in *)
+ &ep->com.mapped_remote_addr;
+ struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
+ &ep->com.mapped_local_addr;
+ struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
+ &ep->com.mapped_remote_addr;
+ int win;
wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
roundup(sizev4, 16) :
@@ -565,8 +622,18 @@ static int send_connect(struct c4iw_ep *ep)
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
- cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps);
wscale = compute_wscale(rcv_win);
+
+ /*
+ * Specify the largest window that will fit in opt0. The
+ * remainder will be specified in the rx_data_ack.
+ */
+ win = ep->rcv_win >> 10;
+ if (win > RCV_BUFSIZ_MASK)
+ win = RCV_BUFSIZ_MASK;
+
opt0 = (nocong ? NO_CONG(1) : 0) |
KEEP_ALIVE(1) |
DELACK(1) |
@@ -577,7 +644,7 @@ static int send_connect(struct c4iw_ep *ep)
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos) |
ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(rcv_win>>10);
+ RCV_BUFSIZ(win);
opt2 = RX_CHANNEL(0) |
CCTRL_ECN(enable_ecn) |
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
@@ -633,6 +700,13 @@ static int send_connect(struct c4iw_ep *ep)
req6->opt2 = cpu_to_be32(opt2);
}
} else {
+ u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ opt2 |= T5_OPT_2_VALID;
+ opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+ if (peer2peer)
+ isn += 4;
+
if (ep->com.remote_addr.ss_family == AF_INET) {
t5_req = (struct cpl_t5_act_open_req *)
skb_put(skb, wrlen);
@@ -649,6 +723,9 @@ static int send_connect(struct c4iw_ep *ep)
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
+ t5_req->rsvd = cpu_to_be32(isn);
+ PDBG("%s snd_isn %u\n", __func__,
+ be32_to_cpu(t5_req->rsvd));
t5_req->opt2 = cpu_to_be32(opt2);
} else {
t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -672,6 +749,9 @@ static int send_connect(struct c4iw_ep *ep)
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
+ t5_req6->rsvd = cpu_to_be32(isn);
+ PDBG("%s snd_isn %u\n", __func__,
+ be32_to_cpu(t5_req6->rsvd));
t5_req6->opt2 = cpu_to_be32(opt2);
}
}
@@ -1145,6 +1225,14 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
return 0;
}
+ /*
+ * If we couldn't specify the entire rcv window at connection setup
+ * due to the limit in the number of bits in the RCV_BUFSIZ field,
+ * then add the overage in to the credits returned.
+ */
+ if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024)
+ credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024;
+
req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
memset(req, 0, wrlen);
INIT_TP_WR(req, ep->hwtid);
@@ -1618,6 +1706,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
unsigned int mtu_idx;
int wscale;
struct sockaddr_in *sin;
+ int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
@@ -1627,10 +1716,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
- sin = (struct sockaddr_in *)&ep->com.local_addr;
+ sin = (struct sockaddr_in *)&ep->com.mapped_local_addr;
req->le.lport = sin->sin_port;
req->le.u.ipv4.lip = sin->sin_addr.s_addr;
- sin = (struct sockaddr_in *)&ep->com.remote_addr;
+ sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
req->le.pport = sin->sin_port;
req->le.u.ipv4.pip = sin->sin_addr.s_addr;
req->tcb.t_state_to_astid =
@@ -1640,8 +1729,18 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
- cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps);
wscale = compute_wscale(rcv_win);
+
+ /*
+ * Specify the largest window that will fit in opt0. The
+ * remainder will be specified in the rx_data_ack.
+ */
+ win = ep->rcv_win >> 10;
+ if (win > RCV_BUFSIZ_MASK)
+ win = RCV_BUFSIZ_MASK;
+
req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
(nocong ? NO_CONG(1) : 0) |
KEEP_ALIVE(1) |
@@ -1653,7 +1752,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos) |
ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(rcv_win >> 10));
+ RCV_BUFSIZ(win));
req->tcb.opt2 = (__force __be32) (PACE(1) |
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
RX_CHANNEL(0) |
@@ -1690,6 +1789,13 @@ static int is_neg_adv(unsigned int status)
status == CPL_ERR_KEEPALV_NEG_ADVICE;
}
+static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
+{
+ ep->snd_win = snd_win;
+ ep->rcv_win = rcv_win;
+ PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
+}
+
#define ACT_OPEN_RETRY_COUNT 2
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
@@ -1738,6 +1844,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
ep->ctrlq_idx = cxgb4_port_idx(pdev);
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step];
+ set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
dev_put(pdev);
} else {
pdev = get_real_dev(n->dev);
@@ -1746,16 +1853,17 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
if (!ep->l2t)
goto out;
ep->mtu = dst_mtu(dst);
- ep->tx_chan = cxgb4_port_chan(n->dev);
- ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
step = cdev->rdev.lldi.ntxq /
cdev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(n->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(pdev);
step = cdev->rdev.lldi.nrxq /
cdev->rdev.lldi.nchan;
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(n->dev) * step];
+ cxgb4_port_idx(pdev) * step];
+ set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
if (clear_mpa_v1) {
ep->retry_with_mpa_v1 = 0;
@@ -1870,10 +1978,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct sockaddr_in6 *ra6;
ep = lookup_atid(t, atid);
- la = (struct sockaddr_in *)&ep->com.local_addr;
- ra = (struct sockaddr_in *)&ep->com.remote_addr;
- la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
- ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
+ la = (struct sockaddr_in *)&ep->com.mapped_local_addr;
+ ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
+ la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr;
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
status, status2errno(status));
@@ -1986,13 +2094,36 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
u64 opt0;
u32 opt2;
int wscale;
+ struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
+ int win;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(*rpl));
+
skb_get(skb);
- cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ rpl = cplhdr(skb);
+ if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ skb_trim(skb, roundup(sizeof(*rpl5), 16));
+ rpl5 = (void *)rpl;
+ INIT_TP_WR(rpl5, ep->hwtid);
+ } else {
+ skb_trim(skb, sizeof(*rpl));
+ INIT_TP_WR(rpl, ep->hwtid);
+ }
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ ep->hwtid));
+
+ best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps && req->tcpopt.tstamp);
wscale = compute_wscale(rcv_win);
+
+ /*
+ * Specify the largest window that will fit in opt0. The
+ * remainder will be specified in the rx_data_ack.
+ */
+ win = ep->rcv_win >> 10;
+ if (win > RCV_BUFSIZ_MASK)
+ win = RCV_BUFSIZ_MASK;
opt0 = (nocong ? NO_CONG(1) : 0) |
KEEP_ALIVE(1) |
DELACK(1) |
@@ -2003,7 +2134,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos >> 2) |
ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(rcv_win>>10);
+ RCV_BUFSIZ(win);
opt2 = RX_CHANNEL(0) |
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
@@ -2023,14 +2154,18 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CCTRL_ECN(1);
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+ rpl5 = (void *)rpl;
+ memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
+ if (peer2peer)
+ isn += 4;
+ rpl5->iss = cpu_to_be32(isn);
+ PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
}
- rpl = cplhdr(skb);
- INIT_TP_WR(rpl, ep->hwtid);
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
rpl->opt0 = cpu_to_be64(opt0);
rpl->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
@@ -2095,6 +2230,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
int err;
u16 peer_mss = ntohs(req->tcpopt.mss);
int iptype;
+ unsigned short hdrs;
parent_ep = lookup_stid(t, stid);
if (!parent_ep) {
@@ -2152,8 +2288,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
- if (peer_mss && child_ep->mtu > (peer_mss + 40))
- child_ep->mtu = peer_mss + 40;
+ hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
+ ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
+ if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
+ child_ep->mtu = peer_mss + hdrs;
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
@@ -2730,13 +2868,15 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_ep *ep;
int err = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
- struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
- struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
- &cm_id->remote_addr;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ struct iwpm_dev_data pm_reg_msg;
+ struct iwpm_sa_data pm_msg;
__u8 *ra;
int iptype;
+ int iwpm_err = 0;
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
@@ -2767,7 +2907,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!ep->com.qp) {
PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
err = -EINVAL;
- goto fail2;
+ goto fail1;
}
ref_qp(ep);
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
@@ -2780,10 +2920,50 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ep->atid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
err = -ENOMEM;
- goto fail2;
+ goto fail1;
}
insert_handle(dev, &dev->atid_idr, ep, ep->atid);
+ memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+ sizeof(ep->com.remote_addr));
+
+ /* No port mapper available, go with the specified peer information */
+ memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
+ sizeof(ep->com.mapped_local_addr));
+ memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr,
+ sizeof(ep->com.mapped_remote_addr));
+
+ c4iw_form_reg_msg(dev, &pm_reg_msg);
+ iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
+ if (iwpm_err) {
+ PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
+ __func__, iwpm_err);
+ }
+ if (iwpm_valid_pid() && !iwpm_err) {
+ c4iw_form_pm_msg(ep, &pm_msg);
+ iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW);
+ if (iwpm_err)
+ PDBG("%s: Port Mapper query fail (err = %d).\n",
+ __func__, iwpm_err);
+ else
+ c4iw_record_pm_msg(ep, &pm_msg);
+ }
+ if (iwpm_create_mapinfo(&ep->com.local_addr,
+ &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
+ iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
+ err = -ENOMEM;
+ goto fail1;
+ }
+ print_addr(&ep->com, __func__, "add_query/create_mapinfo");
+ set_bit(RELEASE_MAPINFO, &ep->com.flags);
+
+ laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr;
+ raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr;
+
if (cm_id->remote_addr.ss_family == AF_INET) {
iptype = 4;
ra = (__u8 *)&raddr->sin_addr;
@@ -2794,7 +2974,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail1;
}
/* find a route */
@@ -2814,7 +2994,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail1;
}
/* find a route */
@@ -2830,13 +3010,13 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!ep->dst) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
err = -EHOSTUNREACH;
- goto fail3;
+ goto fail2;
}
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true);
if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- goto fail4;
+ goto fail3;
}
PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
@@ -2845,10 +3025,6 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
state_set(&ep->com, CONNECTING);
ep->tos = 0;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
- sizeof(ep->com.remote_addr));
/* send connect request to rnic */
err = send_connect(ep);
@@ -2856,12 +3032,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto out;
cxgb4_l2t_release(ep->l2t);
-fail4:
- dst_release(ep->dst);
fail3:
+ dst_release(ep->dst);
+fail2:
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
-fail2:
+fail1:
cm_id->rem_ref(cm_id);
c4iw_put_ep(&ep->com);
out:
@@ -2871,7 +3047,8 @@ out:
static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
int err;
- struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &ep->com.mapped_local_addr;
c4iw_init_wr_wait(&ep->com.wr_wait);
err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
@@ -2892,7 +3069,8 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
int err;
- struct sockaddr_in *sin = (struct sockaddr_in *)&ep->com.local_addr;
+ struct sockaddr_in *sin = (struct sockaddr_in *)
+ &ep->com.mapped_local_addr;
if (dev->rdev.lldi.enable_fw_ofld_conn) {
do {
@@ -2927,6 +3105,9 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
int err = 0;
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_listen_ep *ep;
+ struct iwpm_dev_data pm_reg_msg;
+ struct iwpm_sa_data pm_msg;
+ int iwpm_err = 0;
might_sleep();
@@ -2961,6 +3142,37 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail2;
}
insert_handle(dev, &dev->stid_idr, ep, ep->stid);
+
+ /* No port mapper available, go with the specified info */
+ memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
+ sizeof(ep->com.mapped_local_addr));
+
+ c4iw_form_reg_msg(dev, &pm_reg_msg);
+ iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
+ if (iwpm_err) {
+ PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
+ __func__, iwpm_err);
+ }
+ if (iwpm_valid_pid() && !iwpm_err) {
+ memcpy(&pm_msg.loc_addr, &ep->com.local_addr,
+ sizeof(ep->com.local_addr));
+ iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW);
+ if (iwpm_err)
+ PDBG("%s: Port Mapper query fail (err = %d).\n",
+ __func__, iwpm_err);
+ else
+ memcpy(&ep->com.mapped_local_addr,
+ &pm_msg.mapped_loc_addr,
+ sizeof(ep->com.mapped_local_addr));
+ }
+ if (iwpm_create_mapinfo(&ep->com.local_addr,
+ &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
+ err = -ENOMEM;
+ goto fail3;
+ }
+ print_addr(&ep->com, __func__, "add_mapping/create_mapinfo");
+
+ set_bit(RELEASE_MAPINFO, &ep->com.flags);
state_set(&ep->com, LISTEN);
if (ep->com.local_addr.ss_family == AF_INET)
err = create_server4(dev, ep);
@@ -2970,6 +3182,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = ep;
goto out;
}
+
+fail3:
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index cfaa56ada189..c04292c950f1 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -134,7 +134,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
V_FW_RI_RES_WR_IQANUS(0) |
V_FW_RI_RES_WR_IQANUD(1) |
F_FW_RI_RES_WR_IQANDST |
- V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
+ V_FW_RI_RES_WR_IQANDSTINDEX(
+ rdev->lldi.ciq_ids[cq->vector]));
res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
F_FW_RI_RES_WR_IQDROPRSS |
V_FW_RI_RES_WR_IQPCIECH(2) |
@@ -870,6 +871,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
rhp = to_c4iw_dev(ibdev);
+ if (vector >= rhp->rdev.lldi.nciq)
+ return ERR_PTR(-EINVAL);
+
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
@@ -915,6 +919,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
}
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
+ chp->cq.vector = vector;
ret = create_cq(&rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
@@ -940,7 +945,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
if (!mm2)
goto err4;
- memset(&uresp, 0, sizeof(uresp));
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
uresp.size = chp->cq.size;
@@ -951,7 +955,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
uresp.gts_key = ucontext->key;
ucontext->key += PAGE_SIZE;
spin_unlock(&ucontext->mmap_lock);
- ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+ ret = ib_copy_to_udata(udata, &uresp,
+ sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
goto err5;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index f4fa50a609e2..dd93aadc996e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -77,6 +77,16 @@ struct c4iw_debugfs_data {
int pos;
};
+/* registered cxgb4 netlink callbacks */
+static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
+ [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
+ [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
+ [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+ [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+ [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
+ [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
+};
+
static int count_idrs(int id, void *p, void *data)
{
int *countp = data;
@@ -113,35 +123,49 @@ static int dump_qp(int id, void *p, void *data)
&qp->ep->com.local_addr;
struct sockaddr_in *rsin = (struct sockaddr_in *)
&qp->ep->com.remote_addr;
+ struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
+ &qp->ep->com.mapped_local_addr;
+ struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
+ &qp->ep->com.mapped_remote_addr;
cc = snprintf(qpd->buf + qpd->pos, space,
"rc qp sq id %u rq id %u state %u "
"onchip %u ep tid %u state %u "
- "%pI4:%u->%pI4:%u\n",
+ "%pI4:%u/%u->%pI4:%u/%u\n",
qp->wq.sq.qid, qp->wq.rq.qid,
(int)qp->attr.state,
qp->wq.sq.flags & T4_SQ_ONCHIP,
qp->ep->hwtid, (int)qp->ep->com.state,
&lsin->sin_addr, ntohs(lsin->sin_port),
- &rsin->sin_addr, ntohs(rsin->sin_port));
+ ntohs(mapped_lsin->sin_port),
+ &rsin->sin_addr, ntohs(rsin->sin_port),
+ ntohs(mapped_rsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
&qp->ep->com.local_addr;
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
&qp->ep->com.remote_addr;
+ struct sockaddr_in6 *mapped_lsin6 =
+ (struct sockaddr_in6 *)
+ &qp->ep->com.mapped_local_addr;
+ struct sockaddr_in6 *mapped_rsin6 =
+ (struct sockaddr_in6 *)
+ &qp->ep->com.mapped_remote_addr;
cc = snprintf(qpd->buf + qpd->pos, space,
"rc qp sq id %u rq id %u state %u "
"onchip %u ep tid %u state %u "
- "%pI6:%u->%pI6:%u\n",
+ "%pI6:%u/%u->%pI6:%u/%u\n",
qp->wq.sq.qid, qp->wq.rq.qid,
(int)qp->attr.state,
qp->wq.sq.flags & T4_SQ_ONCHIP,
qp->ep->hwtid, (int)qp->ep->com.state,
&lsin6->sin6_addr,
ntohs(lsin6->sin6_port),
+ ntohs(mapped_lsin6->sin6_port),
&rsin6->sin6_addr,
- ntohs(rsin6->sin6_port));
+ ntohs(rsin6->sin6_port),
+ ntohs(mapped_rsin6->sin6_port));
}
} else
cc = snprintf(qpd->buf + qpd->pos, space,
@@ -386,31 +410,43 @@ static int dump_ep(int id, void *p, void *data)
&ep->com.local_addr;
struct sockaddr_in *rsin = (struct sockaddr_in *)
&ep->com.remote_addr;
+ struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
+ &ep->com.mapped_local_addr;
+ struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
+ &ep->com.mapped_remote_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
"history 0x%lx hwtid %d atid %d "
- "%pI4:%d <-> %pI4:%d\n",
+ "%pI4:%d/%d <-> %pI4:%d/%d\n",
ep, ep->com.cm_id, ep->com.qp,
(int)ep->com.state, ep->com.flags,
ep->com.history, ep->hwtid, ep->atid,
&lsin->sin_addr, ntohs(lsin->sin_port),
- &rsin->sin_addr, ntohs(rsin->sin_port));
+ ntohs(mapped_lsin->sin_port),
+ &rsin->sin_addr, ntohs(rsin->sin_port),
+ ntohs(mapped_rsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
&ep->com.local_addr;
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
&ep->com.remote_addr;
+ struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
+ &ep->com.mapped_local_addr;
+ struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
+ &ep->com.mapped_remote_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
"history 0x%lx hwtid %d atid %d "
- "%pI6:%d <-> %pI6:%d\n",
+ "%pI6:%d/%d <-> %pI6:%d/%d\n",
ep, ep->com.cm_id, ep->com.qp,
(int)ep->com.state, ep->com.flags,
ep->com.history, ep->hwtid, ep->atid,
&lsin6->sin6_addr, ntohs(lsin6->sin6_port),
- &rsin6->sin6_addr, ntohs(rsin6->sin6_port));
+ ntohs(mapped_lsin6->sin6_port),
+ &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
+ ntohs(mapped_rsin6->sin6_port));
}
if (cc < space)
epd->pos += cc;
@@ -431,23 +467,29 @@ static int dump_listen_ep(int id, void *p, void *data)
if (ep->com.local_addr.ss_family == AF_INET) {
struct sockaddr_in *lsin = (struct sockaddr_in *)
&ep->com.local_addr;
+ struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
+ &ep->com.mapped_local_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p state %d flags 0x%lx stid %d "
- "backlog %d %pI4:%d\n",
+ "backlog %d %pI4:%d/%d\n",
ep, ep->com.cm_id, (int)ep->com.state,
ep->com.flags, ep->stid, ep->backlog,
- &lsin->sin_addr, ntohs(lsin->sin_port));
+ &lsin->sin_addr, ntohs(lsin->sin_port),
+ ntohs(mapped_lsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
&ep->com.local_addr;
+ struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
+ &ep->com.mapped_local_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p state %d flags 0x%lx stid %d "
- "backlog %d %pI6:%d\n",
+ "backlog %d %pI6:%d/%d\n",
ep, ep->com.cm_id, (int)ep->com.state,
ep->com.flags, ep->stid, ep->backlog,
- &lsin6->sin6_addr, ntohs(lsin6->sin6_port));
+ &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
+ ntohs(mapped_lsin6->sin6_port));
}
if (cc < space)
epd->pos += cc;
@@ -687,6 +729,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
if (ctx->dev->rdev.oc_mw_kva)
iounmap(ctx->dev->rdev.oc_mw_kva);
ib_dealloc_device(&ctx->dev->ibdev);
+ iwpm_exit(RDMA_NL_C4IW);
ctx->dev = NULL;
}
@@ -736,6 +779,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
pci_resource_len(devp->rdev.lldi.pdev, 2));
if (!devp->rdev.bar2_kva) {
pr_err(MOD "Unable to ioremap BAR2\n");
+ ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL);
}
} else if (ocqp_supported(infop)) {
@@ -747,6 +791,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.lldi.vr->ocq.size);
if (!devp->rdev.oc_mw_kva) {
pr_err(MOD "Unable to ioremap onchip mem\n");
+ ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL);
}
}
@@ -780,6 +825,14 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
c4iw_debugfs_root);
setup_debugfs(devp);
}
+
+ ret = iwpm_init(RDMA_NL_C4IW);
+ if (ret) {
+ pr_err("port mapper initialization failed with %d\n", ret);
+ ib_dealloc_device(&devp->ibdev);
+ return ERR_PTR(ret);
+ }
+
return devp;
}
@@ -1274,6 +1327,11 @@ static int __init c4iw_init_module(void)
printk(KERN_WARNING MOD
"could not create debugfs entry, continuing\n");
+ if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
+ c4iw_nl_cb_table))
+ pr_err("%s[%u]: Failed to add netlink callback\n"
+ , __func__, __LINE__);
+
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
return 0;
@@ -1291,6 +1349,7 @@ static void __exit c4iw_exit_module(void)
}
mutex_unlock(&dev_mutex);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+ ibnl_remove_client(RDMA_NL_C4IW);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7474b490760a..125bc5d1e175 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -52,6 +52,8 @@
#include <rdma/ib_verbs.h>
#include <rdma/iw_cm.h>
+#include <rdma/rdma_netlink.h>
+#include <rdma/iw_portmap.h>
#include "cxgb4.h"
#include "cxgb4_uld.h"
@@ -728,6 +730,7 @@ enum c4iw_ep_flags {
CLOSE_SENT = 3,
TIMEOUT = 4,
QP_REFERENCED = 5,
+ RELEASE_MAPINFO = 6,
};
enum c4iw_ep_history {
@@ -764,6 +767,8 @@ struct c4iw_ep_common {
struct mutex mutex;
struct sockaddr_storage local_addr;
struct sockaddr_storage remote_addr;
+ struct sockaddr_storage mapped_local_addr;
+ struct sockaddr_storage mapped_remote_addr;
struct c4iw_wr_wait wr_wait;
unsigned long flags;
unsigned long history;
@@ -805,7 +810,48 @@ struct c4iw_ep {
u8 retry_with_mpa_v1;
u8 tried_with_mpa_v1;
unsigned int retry_count;
-};
+ int snd_win;
+ int rcv_win;
+};
+
+static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
+ const char *msg)
+{
+
+#define SINA(a) (&(((struct sockaddr_in *)(a))->sin_addr.s_addr))
+#define SINP(a) ntohs(((struct sockaddr_in *)(a))->sin_port)
+#define SIN6A(a) (&(((struct sockaddr_in6 *)(a))->sin6_addr))
+#define SIN6P(a) ntohs(((struct sockaddr_in6 *)(a))->sin6_port)
+
+ if (c4iw_debug) {
+ switch (epc->local_addr.ss_family) {
+ case AF_INET:
+ PDBG("%s %s %pI4:%u/%u <-> %pI4:%u/%u\n",
+ func, msg, SINA(&epc->local_addr),
+ SINP(&epc->local_addr),
+ SINP(&epc->mapped_local_addr),
+ SINA(&epc->remote_addr),
+ SINP(&epc->remote_addr),
+ SINP(&epc->mapped_remote_addr));
+ break;
+ case AF_INET6:
+ PDBG("%s %s %pI6:%u/%u <-> %pI6:%u/%u\n",
+ func, msg, SIN6A(&epc->local_addr),
+ SIN6P(&epc->local_addr),
+ SIN6P(&epc->mapped_local_addr),
+ SIN6A(&epc->remote_addr),
+ SIN6P(&epc->remote_addr),
+ SIN6P(&epc->mapped_remote_addr));
+ break;
+ default:
+ break;
+ }
+ }
+#undef SINA
+#undef SINP
+#undef SIN6A
+#undef SIN6P
+}
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
{
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index a94a3e12c349..b1d305338de6 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -122,7 +122,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
- if (udata->outlen < sizeof(uresp)) {
+ if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++)
pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
@@ -140,7 +140,8 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
context->key += PAGE_SIZE;
spin_unlock(&context->mmap_lock);
- ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ ret = ib_copy_to_udata(udata, &uresp,
+ sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
goto err_mm;
@@ -499,7 +500,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.node_type = RDMA_NODE_RNIC;
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
- dev->ibdev.num_comp_vectors = 1;
+ dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
dev->ibdev.query_device = c4iw_query_device;
dev->ibdev.query_port = c4iw_query_port;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 2178f3198410..68b0a6bf4eb0 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -542,6 +542,7 @@ struct t4_cq {
size_t memsize;
__be64 bits_type_ts;
u32 cqid;
+ int vector;
u16 size; /* including status page */
u16 cidx;
u16 sw_pidx;
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 6121ca08fe58..91289a051af9 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -848,6 +848,7 @@ enum { /* TCP congestion control algorithms */
#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+#define CONG_CNTRL_VALID (1 << 18)
#define T5_OPT_2_VALID (1 << 31)
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
index 11ccd276e5d9..cbd0ce170728 100644
--- a/drivers/infiniband/hw/cxgb4/user.h
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -48,6 +48,7 @@ struct c4iw_create_cq_resp {
__u32 cqid;
__u32 size;
__u32 qid_mask;
+ __u32 reserved; /* explicit padding (optional for i386) */
};
@@ -74,5 +75,6 @@ struct c4iw_create_qp_resp {
struct c4iw_alloc_ucontext_resp {
__u64 status_page_key;
__u32 status_page_size;
+ __u32 reserved; /* explicit padding (optional for i386) */
};
#endif
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index e2f9a51f4a38..45802e97332e 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -346,6 +346,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
ret = -EFAULT;
goto bail;
}
+ dp.len = odp.len;
+ dp.unit = odp.unit;
+ dp.data = odp.data;
+ dp.pbc_wd = 0;
} else {
ret = -EINVAL;
goto bail;
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 26dfbc8ee0f1..01ba792791a0 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -70,7 +70,7 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
int i;
if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
- dd->ipath_lastcancel > jiffies) {
+ time_after(dd->ipath_lastcancel, jiffies)) {
__IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
"SendbufErrs %lx %lx", sbuf[0],
sbuf[1]);
@@ -755,7 +755,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
/* likely due to cancel; so suppress message unless verbose */
if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
- dd->ipath_lastcancel > jiffies) {
+ time_after(dd->ipath_lastcancel, jiffies)) {
/* armlaunch takes precedence; it often causes both. */
ipath_cdbg(VERBOSE,
"Suppressed %s error (%llx) after sendbuf cancel\n",
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 98ac18ec977e..17a517766ad2 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -247,7 +247,7 @@ static void sdma_abort_task(unsigned long opaque)
/* ipath_sdma_abort() is done, waiting for interrupt */
if (status == IPATH_SDMA_ABORT_DISARMED) {
- if (jiffies < dd->ipath_sdma_abort_intr_timeout)
+ if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout))
goto resched_noprint;
/* give up, intr got lost somewhere */
ipath_dbg("give up waiting for SDMADISABLED intr\n");
@@ -341,7 +341,7 @@ resched:
* JAG - this is bad to just have default be a loop without
* state change
*/
- if (jiffies > dd->ipath_sdma_abort_jiffies) {
+ if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) {
ipath_dbg("looping with status 0x%08lx\n",
dd->ipath_sdma_status);
dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 170dca608042..2d8c3397774f 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -73,7 +73,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
{
struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
struct mlx4_dev *dev = ibdev->dev;
- int is_mcast;
+ int is_mcast = 0;
struct in6_addr in6;
u16 vlan_tag;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5f640814cc81..1066eec854a9 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
int err;
err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
- PAGE_SIZE * 2, &buf->buf);
+ PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
if (err)
goto out;
@@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
+ err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
if (err)
goto err_mtt;
@@ -209,7 +209,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
uar = &to_mucontext(context)->uar;
} else {
- err = mlx4_db_alloc(dev->dev, &cq->db, 1);
+ err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
if (err)
goto err_cq;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index fd36ec672632..287ad0564acd 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -478,10 +478,6 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
return -EAGAIN;
- /* QP0 forwarding only for Dom0 */
- if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
- return -EINVAL;
-
if (!dest_qpt)
tun_qp = &tun_ctx->qp[0];
else
@@ -667,6 +663,21 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
}
/* Class-specific handling */
switch (mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ /* 255 indicates the dom0 */
+ if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
+ if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
+ return -EPERM;
+ /* for a VF. drop unsolicited MADs */
+ if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
+ mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
+ slave, mad->mad_hdr.mgmt_class,
+ mad->mad_hdr.method);
+ return -EINVAL;
+ }
+ }
+ break;
case IB_MGMT_CLASS_SUBN_ADM:
if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
(struct ib_sa_mad *) mad))
@@ -1165,10 +1176,6 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
return -EAGAIN;
- /* QP0 forwarding only for Dom0 */
- if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
- return -EINVAL;
-
if (dest_qpt == IB_QPT_SMI) {
src_qpnum = 0;
sqp = &sqp_ctx->qp[0];
@@ -1285,11 +1292,6 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
"belongs to another slave\n", wc->src_qp);
return;
}
- if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
- mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
- "non-master trying to send QP0 packets\n", wc->src_qp);
- return;
- }
/* Map transaction ID */
ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
@@ -1317,6 +1319,12 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
/* Class-specific handling */
switch (tunnel->mad.mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ if (slave != mlx4_master_func_num(dev->dev) &&
+ !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
+ return;
+ break;
case IB_MGMT_CLASS_SUBN_ADM:
if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
(struct ib_sa_mad *) &tunnel->mad))
@@ -1749,9 +1757,9 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
return -EEXIST;
ctx->state = DEMUX_PV_STATE_STARTING;
- /* have QP0 only on port owner, and only if link layer is IB */
- if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
- rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
+ /* have QP0 only if link layer is IB */
+ if (rdma_port_get_link_layer(ibdev, ctx->port) ==
+ IB_LINK_LAYER_INFINIBAND)
ctx->has_smi = 1;
if (ctx->has_smi) {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 199c7896f081..0f7027e7db13 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -545,12 +545,11 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
return 0;
}
-static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
- u32 cap_mask)
+static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
+ u32 cap_mask)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
- u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
if (IS_ERR(mailbox))
@@ -564,8 +563,8 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
}
- err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev->dev, mailbox);
return err;
@@ -574,11 +573,20 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
struct ib_port_modify *props)
{
+ struct mlx4_ib_dev *mdev = to_mdev(ibdev);
+ u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
struct ib_port_attr attr;
u32 cap_mask;
int err;
- mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
+ /* return OK if this is RoCE. CM calls ib_modify_port() regardless
+ * of whether port link layer is ETH or IB. For ETH ports, qkey
+ * violations and port capabilities are not meaningful.
+ */
+ if (is_eth)
+ return 0;
+
+ mutex_lock(&mdev->cap_mask_mutex);
err = mlx4_ib_query_port(ibdev, port, &attr);
if (err)
@@ -587,9 +595,9 @@ static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
~props->clr_port_cap_mask;
- err = mlx4_SET_PORT(to_mdev(ibdev), port,
- !!(mask & IB_PORT_RESET_QKEY_CNTR),
- cap_mask);
+ err = mlx4_ib_SET_PORT(mdev, port,
+ !!(mask & IB_PORT_RESET_QKEY_CNTR),
+ cap_mask);
out:
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 66b0b7dbd9f4..369da3ca5d64 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -156,6 +156,7 @@ enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
+ MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
MLX4_IB_SRIOV_SQP = 1 << 31,
};
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index dc57482ae7af..67780452f0cf 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -608,9 +608,20 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
return !attr->srq;
}
+static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
+{
+ int i;
+ for (i = 0; i < dev->caps.num_ports; i++) {
+ if (qpn == dev->caps.qp0_proxy[i])
+ return !!dev->caps.qp0_qkey[i];
+ }
+ return 0;
+}
+
static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp)
+ struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
+ gfp_t gfp)
{
int qpn;
int err;
@@ -625,10 +636,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
!(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
if (init_attr->qp_type == IB_QPT_GSI)
qp_type = MLX4_IB_QPT_PROXY_GSI;
- else if (mlx4_is_master(dev->dev))
- qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
- else
- qp_type = MLX4_IB_QPT_PROXY_SMI;
+ else {
+ if (mlx4_is_master(dev->dev) ||
+ qp0_enabled_vf(dev->dev, sqpn))
+ qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
+ else
+ qp_type = MLX4_IB_QPT_PROXY_SMI;
+ }
}
qpn = sqpn;
/* add extra sg entry for tunneling */
@@ -643,7 +657,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
return -EINVAL;
if (tnl_init->proxy_qp_type == IB_QPT_GSI)
qp_type = MLX4_IB_QPT_TUN_GSI;
- else if (tnl_init->slave == mlx4_master_func_num(dev->dev))
+ else if (tnl_init->slave == mlx4_master_func_num(dev->dev) ||
+ mlx4_vf_smi_enabled(dev->dev, tnl_init->slave,
+ tnl_init->port))
qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
else
qp_type = MLX4_IB_QPT_TUN_SMI;
@@ -658,14 +674,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
(qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
- sqp = kzalloc(sizeof (struct mlx4_ib_sqp), GFP_KERNEL);
+ sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
if (!sqp)
return -ENOMEM;
qp = &sqp->qp;
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
} else {
- qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL);
+ qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
if (!qp)
return -ENOMEM;
qp->pri.vid = 0xFFFF;
@@ -748,14 +764,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
if (qp_has_rq(init_attr)) {
- err = mlx4_db_alloc(dev->dev, &qp->db, 0);
+ err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
if (err)
goto err;
*qp->db.db = 0;
}
- if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
+ if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) {
err = -ENOMEM;
goto err_db;
}
@@ -765,13 +781,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
+ err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
if (err)
goto err_mtt;
- qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL);
- qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL);
-
+ qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp);
+ qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp);
if (!qp->sq.wrid || !qp->rq.wrid) {
err = -ENOMEM;
goto err_wrid;
@@ -801,7 +816,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_proxy;
}
- err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
if (err)
goto err_qpn;
@@ -1040,7 +1055,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
struct mlx4_ib_qp *qp = NULL;
int err;
u16 xrcdn = 0;
+ gfp_t gfp;
+ gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
+ GFP_NOIO : GFP_KERNEL;
/*
* We only support LSO, vendor flag1, and multicast loopback blocking,
* and only for kernel UD QPs.
@@ -1049,7 +1067,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
MLX4_IB_SRIOV_TUNNEL_QP |
MLX4_IB_SRIOV_SQP |
- MLX4_IB_QP_NETIF))
+ MLX4_IB_QP_NETIF |
+ MLX4_IB_QP_CREATE_USE_GFP_NOIO))
return ERR_PTR(-EINVAL);
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
@@ -1059,7 +1078,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
if (init_attr->create_flags &&
(udata ||
- ((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) &&
+ ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | MLX4_IB_QP_CREATE_USE_GFP_NOIO)) &&
init_attr->qp_type != IB_QPT_UD) ||
((init_attr->create_flags & MLX4_IB_SRIOV_SQP) &&
init_attr->qp_type > IB_QPT_GSI)))
@@ -1079,7 +1098,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_RAW_PACKET:
- qp = kzalloc(sizeof *qp, GFP_KERNEL);
+ qp = kzalloc(sizeof *qp, gfp);
if (!qp)
return ERR_PTR(-ENOMEM);
qp->pri.vid = 0xFFFF;
@@ -1088,7 +1107,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
case IB_QPT_UD:
{
err = create_qp_common(to_mdev(pd->device), pd, init_attr,
- udata, 0, &qp);
+ udata, 0, &qp, gfp);
if (err)
return ERR_PTR(err);
@@ -1106,7 +1125,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
get_sqp_num(to_mdev(pd->device), init_attr),
- &qp);
+ &qp, gfp);
if (err)
return ERR_PTR(err);
@@ -1938,6 +1957,19 @@ out:
return err;
}
+static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
+{
+ int i;
+ for (i = 0; i < dev->caps.num_ports; i++) {
+ if (qpn == dev->caps.qp0_proxy[i] ||
+ qpn == dev->caps.qp0_tunnel[i]) {
+ *qkey = dev->caps.qp0_qkey[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
struct ib_send_wr *wr,
void *wqe, unsigned *mlx_seg_len)
@@ -1995,8 +2027,13 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
- if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
- return -EINVAL;
+ if (mlx4_is_master(mdev->dev)) {
+ if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
+ return -EINVAL;
+ } else {
+ if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
+ return -EINVAL;
+ }
sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
@@ -2378,7 +2415,8 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
struct mlx4_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr, enum ib_qp_type qpt)
+ struct ib_send_wr *wr,
+ enum mlx4_ib_qp_type qpt)
{
union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av;
struct mlx4_av sqp_av = {0};
@@ -2391,8 +2429,10 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
cpu_to_be32(0xf0000000);
memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
- /* This function used only for sending on QP1 proxies */
- dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
+ if (qpt == MLX4_IB_QPT_PROXY_GSI)
+ dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
+ else
+ dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]);
/* Use QKEY from the QP context, which is set by master */
dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
}
@@ -2687,11 +2727,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case MLX4_IB_QPT_PROXY_SMI_OWNER:
- if (unlikely(!mlx4_is_master(to_mdev(ibqp->device)->dev))) {
- err = -ENOSYS;
- *bad_wr = wr;
- goto out;
- }
err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
if (unlikely(err)) {
*bad_wr = wr;
@@ -2708,16 +2743,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
size += seglen / 16;
break;
case MLX4_IB_QPT_PROXY_SMI:
- /* don't allow QP0 sends on guests */
- err = -ENOSYS;
- *bad_wr = wr;
- goto out;
case MLX4_IB_QPT_PROXY_GSI:
/* If we are tunneling special qps, this is a UD qp.
* In this case we first add a UD segment targeting
* the tunnel qp, and then add a header with address
* information */
- set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, ibqp->qp_type);
+ set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr,
+ qp->mlx4_ib_qp_type);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
build_tunnel_header(wr, wqe, &seglen);
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 60c5fb025fc7..62d9285300af 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -134,13 +134,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
} else {
- err = mlx4_db_alloc(dev->dev, &srq->db, 0);
+ err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
if (err)
goto err_srq;
*srq->db.db = 0;
- if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+ if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
+ GFP_KERNEL)) {
err = -ENOMEM;
goto err_db;
}
@@ -165,7 +166,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
+ err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
if (err)
goto err_mtt;
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 5a38e43eca65..cb4c66e723b5 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -389,8 +389,10 @@ struct mlx4_port {
struct mlx4_ib_dev *dev;
struct attribute_group pkey_group;
struct attribute_group gid_group;
- u8 port_num;
+ struct device_attribute enable_smi_admin;
+ struct device_attribute smi_enabled;
int slave;
+ u8 port_num;
};
@@ -558,6 +560,101 @@ err:
return NULL;
}
+static ssize_t sysfs_show_smi_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlx4_port *p =
+ container_of(attr, struct mlx4_port, smi_enabled);
+ ssize_t len = 0;
+
+ if (mlx4_vf_smi_enabled(p->dev->dev, p->slave, p->port_num))
+ len = sprintf(buf, "%d\n", 1);
+ else
+ len = sprintf(buf, "%d\n", 0);
+
+ return len;
+}
+
+static ssize_t sysfs_show_enable_smi_admin(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx4_port *p =
+ container_of(attr, struct mlx4_port, enable_smi_admin);
+ ssize_t len = 0;
+
+ if (mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave, p->port_num))
+ len = sprintf(buf, "%d\n", 1);
+ else
+ len = sprintf(buf, "%d\n", 0);
+
+ return len;
+}
+
+static ssize_t sysfs_store_enable_smi_admin(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlx4_port *p =
+ container_of(attr, struct mlx4_port, enable_smi_admin);
+ int enable;
+
+ if (sscanf(buf, "%i", &enable) != 1 ||
+ enable < 0 || enable > 1)
+ return -EINVAL;
+
+ if (mlx4_vf_set_enable_smi_admin(p->dev->dev, p->slave, p->port_num, enable))
+ return -EINVAL;
+ return count;
+}
+
+static int add_vf_smi_entries(struct mlx4_port *p)
+{
+ int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
+ IB_LINK_LAYER_ETHERNET;
+ int ret;
+
+ /* do not display entries if eth transport, or if master */
+ if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev))
+ return 0;
+
+ sysfs_attr_init(&p->smi_enabled.attr);
+ p->smi_enabled.show = sysfs_show_smi_enabled;
+ p->smi_enabled.store = NULL;
+ p->smi_enabled.attr.name = "smi_enabled";
+ p->smi_enabled.attr.mode = 0444;
+ ret = sysfs_create_file(&p->kobj, &p->smi_enabled.attr);
+ if (ret) {
+ pr_err("failed to create smi_enabled\n");
+ return ret;
+ }
+
+ sysfs_attr_init(&p->enable_smi_admin.attr);
+ p->enable_smi_admin.show = sysfs_show_enable_smi_admin;
+ p->enable_smi_admin.store = sysfs_store_enable_smi_admin;
+ p->enable_smi_admin.attr.name = "enable_smi_admin";
+ p->enable_smi_admin.attr.mode = 0644;
+ ret = sysfs_create_file(&p->kobj, &p->enable_smi_admin.attr);
+ if (ret) {
+ pr_err("failed to create enable_smi_admin\n");
+ sysfs_remove_file(&p->kobj, &p->smi_enabled.attr);
+ return ret;
+ }
+ return 0;
+}
+
+static void remove_vf_smi_entries(struct mlx4_port *p)
+{
+ int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
+ IB_LINK_LAYER_ETHERNET;
+
+ if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev))
+ return;
+
+ sysfs_remove_file(&p->kobj, &p->smi_enabled.attr);
+ sysfs_remove_file(&p->kobj, &p->enable_smi_admin.attr);
+}
+
static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
{
struct mlx4_port *p;
@@ -602,6 +699,10 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
if (ret)
goto err_free_gid;
+ ret = add_vf_smi_entries(p);
+ if (ret)
+ goto err_free_gid;
+
list_add_tail(&p->kobj.entry, &dev->pkeys.pkey_port_list[slave]);
return 0;
@@ -669,6 +770,7 @@ err_add:
mport = container_of(p, struct mlx4_port, kobj);
sysfs_remove_group(p, &mport->pkey_group);
sysfs_remove_group(p, &mport->gid_group);
+ remove_vf_smi_entries(mport);
kobject_put(p);
}
kobject_put(dev->dev_ports_parent[slave]);
@@ -713,6 +815,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
port = container_of(p, struct mlx4_port, kobj);
sysfs_remove_group(p, &port->pkey_group);
sysfs_remove_group(p, &port->gid_group);
+ remove_vf_smi_entries(port);
kobject_put(p);
kobject_put(device->dev_ports_parent[slave]);
}
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 62bb6b49dc1d..8ae4f896cb41 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -32,6 +32,7 @@
#include <linux/kref.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
#include "mlx5_ib.h"
#include "user.h"
@@ -602,14 +603,24 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int *cqe_size, int *index, int *inlen)
{
struct mlx5_ib_create_cq ucmd;
+ size_t ucmdlen;
int page_shift;
int npages;
int ncont;
int err;
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+ ucmdlen =
+ (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
+ sizeof(ucmd)) ? (sizeof(ucmd) -
+ sizeof(ucmd.reserved)) : sizeof(ucmd);
+
+ if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
return -EFAULT;
+ if (ucmdlen == sizeof(ucmd) &&
+ ucmd.reserved != 0)
+ return -EINVAL;
+
if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 50541586e0a6..f2ccf1a5a291 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -264,8 +264,6 @@ struct mlx5_ib_mr {
__be64 *pas;
dma_addr_t dma;
int npages;
- struct completion done;
- enum ib_wc_status status;
struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out;
struct mlx5_core_sig_ctx *sig;
@@ -277,6 +275,17 @@ struct mlx5_ib_fast_reg_page_list {
dma_addr_t map;
};
+struct mlx5_ib_umr_context {
+ enum ib_wc_status status;
+ struct completion done;
+};
+
+static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
+{
+ context->status = -1;
+ init_completion(&context->done);
+}
+
struct umr_common {
struct ib_pd *pd;
struct ib_cq *cq;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 81392b26d078..afa873bd028e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -73,6 +73,8 @@ static void reg_mr_callback(int status, void *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
+ struct mlx5_mr_table *table = &dev->mdev.priv.mr_table;
+ int err;
spin_lock_irqsave(&ent->lock, flags);
ent->pending--;
@@ -107,6 +109,13 @@ static void reg_mr_callback(int status, void *context)
ent->cur++;
ent->size++;
spin_unlock_irqrestore(&ent->lock, flags);
+
+ write_lock_irqsave(&table->lock, flags);
+ err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
+ &mr->mmr);
+ if (err)
+ pr_err("Error inserting to mr tree. 0x%x\n", -err);
+ write_unlock_irqrestore(&table->lock, flags);
}
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
@@ -699,7 +708,7 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
{
- struct mlx5_ib_mr *mr;
+ struct mlx5_ib_umr_context *context;
struct ib_wc wc;
int err;
@@ -712,9 +721,9 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
if (err == 0)
break;
- mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id;
- mr->status = wc.status;
- complete(&mr->done);
+ context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
+ context->status = wc.status;
+ complete(&context->done);
}
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
}
@@ -726,11 +735,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct device *ddev = dev->ib_dev.dma_device;
struct umr_common *umrc = &dev->umrc;
+ struct mlx5_ib_umr_context umr_context;
struct ib_send_wr wr, *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
int size = sizeof(u64) * npages;
- int err;
+ int err = 0;
int i;
for (i = 0; i < 1; i++) {
@@ -751,7 +761,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
if (!mr->pas) {
err = -ENOMEM;
- goto error;
+ goto free_mr;
}
mlx5_ib_populate_pas(dev, umem, page_shift,
@@ -760,44 +770,46 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
DMA_TO_DEVICE);
if (dma_mapping_error(ddev, mr->dma)) {
- kfree(mr->pas);
err = -ENOMEM;
- goto error;
+ goto free_pas;
}
memset(&wr, 0, sizeof(wr));
- wr.wr_id = (u64)(unsigned long)mr;
+ wr.wr_id = (u64)(unsigned long)&umr_context;
prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
- /* We serialize polls so one process does not kidnap another's
- * completion. This is not a problem since wr is completed in
- * around 1 usec
- */
+ mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
- init_completion(&mr->done);
err = ib_post_send(umrc->qp, &wr, &bad);
if (err) {
mlx5_ib_warn(dev, "post send failed, err %d\n", err);
- up(&umrc->sem);
- goto error;
+ goto unmap_dma;
+ } else {
+ wait_for_completion(&umr_context.done);
+ if (umr_context.status != IB_WC_SUCCESS) {
+ mlx5_ib_warn(dev, "reg umr failed\n");
+ err = -EFAULT;
+ }
}
- wait_for_completion(&mr->done);
- up(&umrc->sem);
+ mr->mmr.iova = virt_addr;
+ mr->mmr.size = len;
+ mr->mmr.pd = to_mpd(pd)->pdn;
+
+unmap_dma:
+ up(&umrc->sem);
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
+
+free_pas:
kfree(mr->pas);
- if (mr->status != IB_WC_SUCCESS) {
- mlx5_ib_warn(dev, "reg umr failed\n");
- err = -EFAULT;
- goto error;
+free_mr:
+ if (err) {
+ free_cached_mr(dev, mr);
+ return ERR_PTR(err);
}
return mr;
-
-error:
- free_cached_mr(dev, mr);
- return ERR_PTR(err);
}
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
@@ -926,24 +938,26 @@ error:
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct umr_common *umrc = &dev->umrc;
+ struct mlx5_ib_umr_context umr_context;
struct ib_send_wr wr, *bad;
int err;
memset(&wr, 0, sizeof(wr));
- wr.wr_id = (u64)(unsigned long)mr;
+ wr.wr_id = (u64)(unsigned long)&umr_context;
prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
+ mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
- init_completion(&mr->done);
err = ib_post_send(umrc->qp, &wr, &bad);
if (err) {
up(&umrc->sem);
mlx5_ib_dbg(dev, "err %d\n", err);
goto error;
+ } else {
+ wait_for_completion(&umr_context.done);
+ up(&umrc->sem);
}
- wait_for_completion(&mr->done);
- up(&umrc->sem);
- if (mr->status != IB_WC_SUCCESS) {
+ if (umr_context.status != IB_WC_SUCCESS) {
mlx5_ib_warn(dev, "unreg umr failed\n");
err = -EFAULT;
goto error;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index dc930ed21eca..d13ddf1c0033 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -574,6 +574,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
+ qp->rq.offset = 0;
+ qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
+ qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
+
err = set_user_buf_size(dev, qp, &ucmd);
if (err)
goto err_uuar;
@@ -2078,6 +2082,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
struct ib_sig_domain *wire = &sig_attrs->wire;
int ret, selector;
+ memset(bsf, 0, sizeof(*bsf));
switch (sig_attrs->mem.sig_type) {
case IB_SIG_TYPE_T10_DIF:
if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF)
@@ -2090,9 +2095,11 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
/* Same block structure */
basic->bsf_size_sbs = 1 << 4;
if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
- basic->wire.copy_byte_mask = 0xff;
- else
- basic->wire.copy_byte_mask = 0x3f;
+ basic->wire.copy_byte_mask |= 0xc0;
+ if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
+ basic->wire.copy_byte_mask |= 0x30;
+ if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
+ basic->wire.copy_byte_mask |= 0x0f;
} else
basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
@@ -2131,9 +2138,13 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
int ret;
int wqe_size;
- if (!wr->wr.sig_handover.prot) {
+ if (!wr->wr.sig_handover.prot ||
+ (data_key == wr->wr.sig_handover.prot->lkey &&
+ data_va == wr->wr.sig_handover.prot->addr &&
+ data_len == wr->wr.sig_handover.prot->length)) {
/**
* Source domain doesn't contain signature information
+ * or data and protection are interleaved in memory.
* So need construct:
* ------------------
* | data_klm |
@@ -2187,23 +2198,13 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
data_sentry->bcount = cpu_to_be16(block_size);
data_sentry->key = cpu_to_be32(data_key);
data_sentry->va = cpu_to_be64(data_va);
+ data_sentry->stride = cpu_to_be16(block_size);
+
prot_sentry->bcount = cpu_to_be16(prot_size);
prot_sentry->key = cpu_to_be32(prot_key);
+ prot_sentry->va = cpu_to_be64(prot_va);
+ prot_sentry->stride = cpu_to_be16(prot_size);
- if (prot_key == data_key && prot_va == data_va) {
- /**
- * The data and protection are interleaved
- * in a single memory region
- **/
- prot_sentry->va = cpu_to_be64(data_va + block_size);
- prot_sentry->stride = cpu_to_be16(block_size + prot_size);
- data_sentry->stride = prot_sentry->stride;
- } else {
- /* The data and protection are two different buffers */
- prot_sentry->va = cpu_to_be64(prot_va);
- data_sentry->stride = cpu_to_be16(block_size);
- prot_sentry->stride = cpu_to_be16(prot_size);
- }
wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
sizeof(*prot_sentry), 64);
}
@@ -2275,7 +2276,10 @@ static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
/* length of the protected region, data + protection */
region_len = wr->sg_list->length;
- if (wr->wr.sig_handover.prot)
+ if (wr->wr.sig_handover.prot &&
+ (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
+ wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
+ wr->wr.sig_handover.prot->length != wr->sg_list->length))
region_len += wr->wr.sig_handover.prot->length;
/**
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 210b3eaf188a..384af6dec5eb 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/srq.h>
#include <linux/slab.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
#include "mlx5_ib.h"
#include "user.h"
@@ -78,16 +79,27 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd;
+ size_t ucmdlen;
int err;
int npages;
int page_shift;
int ncont;
u32 offset;
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ucmdlen =
+ (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
+ sizeof(ucmd)) ? (sizeof(ucmd) -
+ sizeof(ucmd.reserved)) : sizeof(ucmd);
+
+ if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
mlx5_ib_dbg(dev, "failed copy udata\n");
return -EFAULT;
}
+
+ if (ucmdlen == sizeof(ucmd) &&
+ ucmd.reserved != 0)
+ return -EINVAL;
+
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 0f4f8e42a17f..d0ba264ac1ed 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -91,6 +91,7 @@ struct mlx5_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
__u32 cqe_size;
+ __u32 reserved; /* explicit padding (optional on i386) */
};
struct mlx5_ib_create_cq_resp {
@@ -109,6 +110,7 @@ struct mlx5_ib_create_srq {
__u64 buf_addr;
__u64 db_addr;
__u32 flags;
+ __u32 reserved; /* explicit padding (optional on i386) */
};
struct mlx5_ib_create_srq_resp {
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 353c7b05a90a..3b2a6dc8ea99 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -68,7 +68,6 @@ MODULE_VERSION(DRV_VERSION);
int max_mtu = 9000;
int interrupt_mod_interval = 0;
-
/* Interoperability */
int mpa_version = 1;
module_param(mpa_version, int, 0644);
@@ -112,6 +111,16 @@ static struct pci_device_id nes_pci_table[] = {
MODULE_DEVICE_TABLE(pci, nes_pci_table);
+/* registered nes netlink callbacks */
+static struct ibnl_client_cbs nes_nl_cb_table[] = {
+ [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
+ [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
+ [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+ [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+ [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
+ [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
+};
+
static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *);
static int nes_net_event(struct notifier_block *, unsigned long, void *);
static int nes_notifiers_registered;
@@ -672,6 +681,17 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
}
nes_notifiers_registered++;
+ if (ibnl_add_client(RDMA_NL_NES, RDMA_NL_IWPM_NUM_OPS, nes_nl_cb_table))
+ printk(KERN_ERR PFX "%s[%u]: Failed to add netlink callback\n",
+ __func__, __LINE__);
+
+ ret = iwpm_init(RDMA_NL_NES);
+ if (ret) {
+ printk(KERN_ERR PFX "%s: port mapper initialization failed\n",
+ pci_name(pcidev));
+ goto bail7;
+ }
+
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */
@@ -710,6 +730,7 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n",
nesdev->netdev_count, nesdev->nesadapter->netdev_count);
+ ibnl_remove_client(RDMA_NL_NES);
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
@@ -773,6 +794,8 @@ static void nes_remove(struct pci_dev *pcidev)
nesdev->nesadapter->netdev_count--;
}
}
+ ibnl_remove_client(RDMA_NL_NES);
+ iwpm_exit(RDMA_NL_NES);
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 33cc58941a3e..bd9d132f11c7 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -51,6 +51,8 @@
#include <rdma/ib_pack.h>
#include <rdma/rdma_cm.h>
#include <rdma/iw_cm.h>
+#include <rdma/rdma_netlink.h>
+#include <rdma/iw_portmap.h>
#define NES_SEND_FIRST_WRITE
@@ -130,6 +132,7 @@
#define NES_DBG_IW_TX 0x00040000
#define NES_DBG_SHUTDOWN 0x00080000
#define NES_DBG_PAU 0x00100000
+#define NES_DBG_NLMSG 0x00200000
#define NES_DBG_RSVD1 0x10000000
#define NES_DBG_RSVD2 0x20000000
#define NES_DBG_RSVD3 0x40000000
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfa9df484505..6f09a72e78d7 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2014 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -59,6 +59,7 @@
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/tcp.h>
+#include <linux/fcntl.h>
#include "nes.h"
@@ -166,7 +167,6 @@ int nes_rem_ref_cm_node(struct nes_cm_node *cm_node)
{
return rem_ref_cm_node(cm_node->cm_core, cm_node);
}
-
/**
* create_event
*/
@@ -482,11 +482,11 @@ static void form_cm_frame(struct sk_buff *skb,
iph->ttl = 0x40;
iph->protocol = 0x06; /* IPPROTO_TCP */
- iph->saddr = htonl(cm_node->loc_addr);
- iph->daddr = htonl(cm_node->rem_addr);
+ iph->saddr = htonl(cm_node->mapped_loc_addr);
+ iph->daddr = htonl(cm_node->mapped_rem_addr);
- tcph->source = htons(cm_node->loc_port);
- tcph->dest = htons(cm_node->rem_port);
+ tcph->source = htons(cm_node->mapped_loc_port);
+ tcph->dest = htons(cm_node->mapped_rem_port);
tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
if (flags & SET_ACK) {
@@ -525,6 +525,100 @@ static void form_cm_frame(struct sk_buff *skb,
cm_packets_created++;
}
+/*
+ * nes_create_sockaddr - Record ip addr and tcp port in a sockaddr struct
+ */
+static void nes_create_sockaddr(__be32 ip_addr, __be16 port,
+ struct sockaddr_storage *addr)
+{
+ struct sockaddr_in *nes_sockaddr = (struct sockaddr_in *)addr;
+ nes_sockaddr->sin_family = AF_INET;
+ memcpy(&nes_sockaddr->sin_addr.s_addr, &ip_addr, sizeof(__be32));
+ nes_sockaddr->sin_port = port;
+}
+
+/*
+ * nes_create_mapinfo - Create a mapinfo object in the port mapper data base
+ */
+static int nes_create_mapinfo(struct nes_cm_info *cm_info)
+{
+ struct sockaddr_storage local_sockaddr;
+ struct sockaddr_storage mapped_sockaddr;
+
+ nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
+ &local_sockaddr);
+ nes_create_sockaddr(htonl(cm_info->mapped_loc_addr),
+ htons(cm_info->mapped_loc_port), &mapped_sockaddr);
+
+ return iwpm_create_mapinfo(&local_sockaddr,
+ &mapped_sockaddr, RDMA_NL_NES);
+}
+
+/*
+ * nes_remove_mapinfo - Remove a mapinfo object from the port mapper data base
+ * and send a remove mapping op message to
+ * the userspace port mapper
+ */
+static int nes_remove_mapinfo(u32 loc_addr, u16 loc_port,
+ u32 mapped_loc_addr, u16 mapped_loc_port)
+{
+ struct sockaddr_storage local_sockaddr;
+ struct sockaddr_storage mapped_sockaddr;
+
+ nes_create_sockaddr(htonl(loc_addr), htons(loc_port), &local_sockaddr);
+ nes_create_sockaddr(htonl(mapped_loc_addr), htons(mapped_loc_port),
+ &mapped_sockaddr);
+
+ iwpm_remove_mapinfo(&local_sockaddr, &mapped_sockaddr);
+ return iwpm_remove_mapping(&local_sockaddr, RDMA_NL_NES);
+}
+
+/*
+ * nes_form_pm_msg - Form a port mapper message with mapping info
+ */
+static void nes_form_pm_msg(struct nes_cm_info *cm_info,
+ struct iwpm_sa_data *pm_msg)
+{
+ nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
+ &pm_msg->loc_addr);
+ nes_create_sockaddr(htonl(cm_info->rem_addr), htons(cm_info->rem_port),
+ &pm_msg->rem_addr);
+}
+
+/*
+ * nes_form_reg_msg - Form a port mapper message with dev info
+ */
+static void nes_form_reg_msg(struct nes_vnic *nesvnic,
+ struct iwpm_dev_data *pm_msg)
+{
+ memcpy(pm_msg->dev_name, nesvnic->nesibdev->ibdev.name,
+ IWPM_DEVNAME_SIZE);
+ memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
+}
+
+/*
+ * nes_record_pm_msg - Save the received mapping info
+ */
+static void nes_record_pm_msg(struct nes_cm_info *cm_info,
+ struct iwpm_sa_data *pm_msg)
+{
+ struct sockaddr_in *mapped_loc_addr =
+ (struct sockaddr_in *)&pm_msg->mapped_loc_addr;
+ struct sockaddr_in *mapped_rem_addr =
+ (struct sockaddr_in *)&pm_msg->mapped_rem_addr;
+
+ if (mapped_loc_addr->sin_family == AF_INET) {
+ cm_info->mapped_loc_addr =
+ ntohl(mapped_loc_addr->sin_addr.s_addr);
+ cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port);
+ }
+ if (mapped_rem_addr->sin_family == AF_INET) {
+ cm_info->mapped_rem_addr =
+ ntohl(mapped_rem_addr->sin_addr.s_addr);
+ cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port);
+ }
+}
+
/**
* print_core - dump a cm core
*/
@@ -1147,8 +1241,11 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
loc_addr, loc_port,
cm_node->rem_addr, cm_node->rem_port,
rem_addr, rem_port);
- if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) &&
- (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) {
+ if ((cm_node->mapped_loc_addr == loc_addr) &&
+ (cm_node->mapped_loc_port == loc_port) &&
+ (cm_node->mapped_rem_addr == rem_addr) &&
+ (cm_node->mapped_rem_port == rem_port)) {
+
add_ref_cm_node(cm_node);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
return cm_node;
@@ -1165,18 +1262,28 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
* find_listener - find a cm node listening on this addr-port pair
*/
static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
- nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
+ nes_addr_t dst_addr, u16 dst_port,
+ enum nes_cm_listener_state listener_state, int local)
{
unsigned long flags;
struct nes_cm_listener *listen_node;
+ nes_addr_t listen_addr;
+ u16 listen_port;
/* walk list and find cm_node associated with this session ID */
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
+ if (local) {
+ listen_addr = listen_node->loc_addr;
+ listen_port = listen_node->loc_port;
+ } else {
+ listen_addr = listen_node->mapped_loc_addr;
+ listen_port = listen_node->mapped_loc_port;
+ }
/* compare node pair, return node handle if a match */
- if (((listen_node->loc_addr == dst_addr) ||
- listen_node->loc_addr == 0x00000000) &&
- (listen_node->loc_port == dst_port) &&
+ if (((listen_addr == dst_addr) ||
+ listen_addr == 0x00000000) &&
+ (listen_port == dst_port) &&
(listener_state & listen_node->listener_state)) {
atomic_inc(&listen_node->ref_count);
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
@@ -1189,7 +1296,6 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
return NULL;
}
-
/**
* add_hte_node - add a cm node to the hash table
*/
@@ -1310,9 +1416,20 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- if (listener->nesvnic)
- nes_manage_apbvt(listener->nesvnic, listener->loc_port,
- PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
+ if (listener->nesvnic) {
+ nes_manage_apbvt(listener->nesvnic,
+ listener->mapped_loc_port,
+ PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_DEL);
+
+ nes_remove_mapinfo(listener->loc_addr,
+ listener->loc_port,
+ listener->mapped_loc_addr,
+ listener->mapped_loc_port);
+ nes_debug(NES_DBG_NLMSG,
+ "Delete APBVT mapped_loc_port = %04X\n",
+ listener->mapped_loc_port);
+ }
nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
@@ -1454,6 +1571,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loc_port = cm_info->loc_port;
cm_node->rem_port = cm_info->rem_port;
+ cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
+ cm_node->mapped_rem_addr = cm_info->mapped_rem_addr;
+ cm_node->mapped_loc_port = cm_info->mapped_loc_port;
+ cm_node->mapped_rem_port = cm_info->mapped_rem_port;
+
cm_node->mpa_frame_rev = mpa_version;
cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
cm_node->mpav2_ird_ord = 0;
@@ -1500,8 +1622,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loopbackpartner = NULL;
/* get the mac addr for the remote node */
- oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
- arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
+ oldarpindex = nes_arp_table(nesdev, cm_node->mapped_rem_addr,
+ NULL, NES_ARP_RESOLVE);
+ arpindex = nes_addr_resolve_neigh(nesvnic,
+ cm_node->mapped_rem_addr, oldarpindex);
if (arpindex < 0) {
kfree(cm_node);
return NULL;
@@ -1563,11 +1687,14 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
} else {
if (cm_node->apbvt_set && cm_node->nesvnic) {
- nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
- PCI_FUNC(
- cm_node->nesvnic->nesdev->pcidev->devfn),
+ nes_manage_apbvt(cm_node->nesvnic, cm_node->mapped_loc_port,
+ PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
}
+ nes_debug(NES_DBG_NLMSG, "Delete APBVT mapped_loc_port = %04X\n",
+ cm_node->mapped_loc_port);
+ nes_remove_mapinfo(cm_node->loc_addr, cm_node->loc_port,
+ cm_node->mapped_loc_addr, cm_node->mapped_loc_port);
}
atomic_dec(&cm_core->node_cnt);
@@ -2235,17 +2362,21 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
* mini_cm_listen - create a listen node with params
*/
static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
- struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
+ struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
{
struct nes_cm_listener *listener;
+ struct iwpm_dev_data pm_reg_msg;
+ struct iwpm_sa_data pm_msg;
unsigned long flags;
+ int iwpm_err = 0;
nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
cm_info->loc_addr, cm_info->loc_port);
/* cannot have multiple matching listeners */
- listener = find_listener(cm_core, htonl(cm_info->loc_addr),
- htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE);
+ listener = find_listener(cm_core, cm_info->loc_addr, cm_info->loc_port,
+ NES_CM_LISTENER_EITHER_STATE, 1);
+
if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
/* find automatically incs ref count ??? */
atomic_dec(&listener->ref_count);
@@ -2254,6 +2385,22 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
}
if (!listener) {
+ nes_form_reg_msg(nesvnic, &pm_reg_msg);
+ iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
+ if (iwpm_err) {
+ nes_debug(NES_DBG_NLMSG,
+ "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
+ }
+ if (iwpm_valid_pid() && !iwpm_err) {
+ nes_form_pm_msg(cm_info, &pm_msg);
+ iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_NES);
+ if (iwpm_err)
+ nes_debug(NES_DBG_NLMSG,
+ "Port Mapper query fail (err = %d).\n", iwpm_err);
+ else
+ nes_record_pm_msg(cm_info, &pm_msg);
+ }
+
/* create a CM listen node (1/2 node to compare incoming traffic to) */
listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
if (!listener) {
@@ -2261,8 +2408,10 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
return NULL;
}
- listener->loc_addr = htonl(cm_info->loc_addr);
- listener->loc_port = htons(cm_info->loc_port);
+ listener->loc_addr = cm_info->loc_addr;
+ listener->loc_port = cm_info->loc_port;
+ listener->mapped_loc_addr = cm_info->mapped_loc_addr;
+ listener->mapped_loc_port = cm_info->mapped_loc_port;
listener->reused_node = 0;
atomic_set(&listener->ref_count, 1);
@@ -2324,14 +2473,18 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
if (cm_info->loc_addr == cm_info->rem_addr) {
loopbackremotelistener = find_listener(cm_core,
- ntohl(nesvnic->local_ipaddr), cm_node->rem_port,
- NES_CM_LISTENER_ACTIVE_STATE);
+ cm_node->mapped_loc_addr, cm_node->mapped_rem_port,
+ NES_CM_LISTENER_ACTIVE_STATE, 0);
if (loopbackremotelistener == NULL) {
create_event(cm_node, NES_CM_EVENT_ABORTED);
} else {
loopback_cm_info = *cm_info;
loopback_cm_info.loc_port = cm_info->rem_port;
loopback_cm_info.rem_port = cm_info->loc_port;
+ loopback_cm_info.mapped_loc_port =
+ cm_info->mapped_rem_port;
+ loopback_cm_info.mapped_rem_port =
+ cm_info->mapped_loc_port;
loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
loopbackremotenode = make_cm_node(cm_core, nesvnic,
&loopback_cm_info, loopbackremotelistener);
@@ -2560,6 +2713,12 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
nfo.rem_addr = ntohl(iph->saddr);
nfo.rem_port = ntohs(tcph->source);
+ /* If port mapper is available these should be mapped address info */
+ nfo.mapped_loc_addr = ntohl(iph->daddr);
+ nfo.mapped_loc_port = ntohs(tcph->dest);
+ nfo.mapped_rem_addr = ntohl(iph->saddr);
+ nfo.mapped_rem_port = ntohs(tcph->source);
+
tmp_daddr = cpu_to_be32(iph->daddr);
tmp_saddr = cpu_to_be32(iph->saddr);
@@ -2568,8 +2727,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
do {
cm_node = find_node(cm_core,
- nfo.rem_port, nfo.rem_addr,
- nfo.loc_port, nfo.loc_addr);
+ nfo.mapped_rem_port, nfo.mapped_rem_addr,
+ nfo.mapped_loc_port, nfo.mapped_loc_addr);
if (!cm_node) {
/* Only type of packet accepted are for */
@@ -2578,9 +2737,9 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
skb_handled = 0;
break;
}
- listener = find_listener(cm_core, nfo.loc_addr,
- nfo.loc_port,
- NES_CM_LISTENER_ACTIVE_STATE);
+ listener = find_listener(cm_core, nfo.mapped_loc_addr,
+ nfo.mapped_loc_port,
+ NES_CM_LISTENER_ACTIVE_STATE, 0);
if (!listener) {
nfo.cm_id = NULL;
nfo.conn_type = 0;
@@ -3184,10 +3343,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nes_cm_init_tsa_conn(nesqp, cm_node);
- nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port));
- nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port));
+ nesqp->nesqp_context->tcpPorts[0] =
+ cpu_to_le16(cm_node->mapped_loc_port);
+ nesqp->nesqp_context->tcpPorts[1] =
+ cpu_to_le16(cm_node->mapped_rem_port);
- nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr));
+ nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3211,9 +3372,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
memset(&nes_quad, 0, sizeof(nes_quad));
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = raddr->sin_addr.s_addr;
- nes_quad.TcpPorts[0] = raddr->sin_port;
- nes_quad.TcpPorts[1] = laddr->sin_port;
+ nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
+ nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
+ nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
/* Produce hash key */
crc_value = get_crc_value(&nes_quad);
@@ -3315,6 +3476,9 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int apbvt_set = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct iwpm_dev_data pm_reg_msg;
+ struct iwpm_sa_data pm_msg;
+ int iwpm_err = 0;
if (cm_id->remote_addr.ss_family != AF_INET)
return -ENOSYS;
@@ -3352,20 +3516,44 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
conn_param->private_data_len);
+ /* set up the connection params for the node */
+ cm_info.loc_addr = ntohl(laddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+ cm_info.rem_addr = ntohl(raddr->sin_addr.s_addr);
+ cm_info.rem_port = ntohs(raddr->sin_port);
+ cm_info.cm_id = cm_id;
+ cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
+
+ /* No port mapper available, go with the specified peer information */
+ cm_info.mapped_loc_addr = cm_info.loc_addr;
+ cm_info.mapped_loc_port = cm_info.loc_port;
+ cm_info.mapped_rem_addr = cm_info.rem_addr;
+ cm_info.mapped_rem_port = cm_info.rem_port;
+
+ nes_form_reg_msg(nesvnic, &pm_reg_msg);
+ iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
+ if (iwpm_err) {
+ nes_debug(NES_DBG_NLMSG,
+ "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
+ }
+ if (iwpm_valid_pid() && !iwpm_err) {
+ nes_form_pm_msg(&cm_info, &pm_msg);
+ iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_NES);
+ if (iwpm_err)
+ nes_debug(NES_DBG_NLMSG,
+ "Port Mapper query fail (err = %d).\n", iwpm_err);
+ else
+ nes_record_pm_msg(&cm_info, &pm_msg);
+ }
+
if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) {
- nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),
- PCI_FUNC(nesdev->pcidev->devfn),
- NES_MANAGE_APBVT_ADD);
+ nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
+ PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
apbvt_set = 1;
}
- /* set up the connection params for the node */
- cm_info.loc_addr = htonl(laddr->sin_addr.s_addr);
- cm_info.loc_port = htons(laddr->sin_port);
- cm_info.rem_addr = htonl(raddr->sin_addr.s_addr);
- cm_info.rem_port = htons(raddr->sin_port);
- cm_info.cm_id = cm_id;
- cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
+ if (nes_create_mapinfo(&cm_info))
+ return -ENOMEM;
cm_id->add_ref(cm_id);
@@ -3375,10 +3563,14 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
&cm_info);
if (!cm_node) {
if (apbvt_set)
- nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),
+ nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
PCI_FUNC(nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
+ nes_debug(NES_DBG_NLMSG, "Delete mapped_loc_port = %04X\n",
+ cm_info.mapped_loc_port);
+ nes_remove_mapinfo(cm_info.loc_addr, cm_info.loc_port,
+ cm_info.mapped_loc_addr, cm_info.mapped_loc_port);
cm_id->rem_ref(cm_id);
return -ENOMEM;
}
@@ -3424,13 +3616,16 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
nesvnic->local_ipaddr, laddr->sin_addr.s_addr);
/* setup listen params in our api call struct */
- cm_info.loc_addr = nesvnic->local_ipaddr;
- cm_info.loc_port = laddr->sin_port;
+ cm_info.loc_addr = ntohl(nesvnic->local_ipaddr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
cm_info.backlog = backlog;
cm_info.cm_id = cm_id;
cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
+ /* No port mapper available, go with the specified info */
+ cm_info.mapped_loc_addr = cm_info.loc_addr;
+ cm_info.mapped_loc_port = cm_info.loc_port;
cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
if (!cm_node) {
@@ -3442,7 +3637,10 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = cm_node;
if (!cm_node->reused_node) {
- err = nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),
+ if (nes_create_mapinfo(&cm_info))
+ return -ENOMEM;
+
+ err = nes_manage_apbvt(nesvnic, cm_node->mapped_loc_port,
PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_ADD);
if (err) {
@@ -3567,9 +3765,11 @@ static void cm_event_connected(struct nes_cm_event *event)
nes_cm_init_tsa_conn(nesqp, cm_node);
/* set the QP tsa context */
- nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port));
- nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port));
- nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr));
+ nesqp->nesqp_context->tcpPorts[0] =
+ cpu_to_le16(cm_node->mapped_loc_port);
+ nesqp->nesqp_context->tcpPorts[1] =
+ cpu_to_le16(cm_node->mapped_rem_port);
+ nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3599,9 +3799,9 @@ static void cm_event_connected(struct nes_cm_event *event)
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = raddr->sin_addr.s_addr;
- nes_quad.TcpPorts[0] = raddr->sin_port;
- nes_quad.TcpPorts[1] = laddr->sin_port;
+ nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
+ nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
+ nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
/* Produce hash key */
crc_value = get_crc_value(&nes_quad);
@@ -3629,7 +3829,7 @@ static void cm_event_connected(struct nes_cm_event *event)
cm_event.ird = cm_node->ird_size;
cm_event.ord = cm_node->ord_size;
- cm_event_laddr->sin_addr.s_addr = event->cm_info.rem_addr;
+ cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 522c99cd07c4..f522cf639789 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2014 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -293,8 +293,8 @@ struct nes_cm_listener {
struct list_head list;
struct nes_cm_core *cm_core;
u8 loc_mac[ETH_ALEN];
- nes_addr_t loc_addr;
- u16 loc_port;
+ nes_addr_t loc_addr, mapped_loc_addr;
+ u16 loc_port, mapped_loc_port;
struct iw_cm_id *cm_id;
enum nes_cm_conn_type conn_type;
atomic_t ref_count;
@@ -308,7 +308,9 @@ struct nes_cm_listener {
/* per connection node and node state information */
struct nes_cm_node {
nes_addr_t loc_addr, rem_addr;
+ nes_addr_t mapped_loc_addr, mapped_rem_addr;
u16 loc_port, rem_port;
+ u16 mapped_loc_port, mapped_rem_port;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
@@ -364,6 +366,10 @@ struct nes_cm_info {
u16 rem_port;
nes_addr_t loc_addr;
nes_addr_t rem_addr;
+ u16 mapped_loc_port;
+ u16 mapped_rem_port;
+ nes_addr_t mapped_loc_addr;
+ nes_addr_t mapped_rem_addr;
enum nes_cm_conn_type conn_type;
int backlog;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 6c54106f5e64..41a9aec9998d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -510,16 +510,9 @@ exit:
return status;
}
-static int ocrdma_debugfs_open(struct inode *inode, struct file *file)
-{
- if (inode->i_private)
- file->private_data = inode->i_private;
- return 0;
-}
-
static const struct file_operations ocrdma_dbg_ops = {
.owner = THIS_MODULE,
- .open = ocrdma_debugfs_open,
+ .open = simple_open,
.read = ocrdma_dbgfs_ops_read,
};
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 5b7aeb224a30..8d3c78ddc906 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1272,7 +1272,7 @@ static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
* Do all the generic driver unit- and chip-independent memory
* allocation and initialization.
*/
-static int __init qlogic_ib_init(void)
+static int __init qib_ib_init(void)
{
int ret;
@@ -1316,12 +1316,12 @@ bail:
return ret;
}
-module_init(qlogic_ib_init);
+module_init(qib_ib_init);
/*
* Do the non-unit driver cleanup, memory free, etc. at unload.
*/
-static void __exit qlogic_ib_cleanup(void)
+static void __exit qib_ib_cleanup(void)
{
int ret;
@@ -1346,7 +1346,7 @@ static void __exit qlogic_ib_cleanup(void)
qib_dev_cleanup();
}
-module_exit(qlogic_ib_cleanup);
+module_exit(qib_ib_cleanup);
/* this can only be called after a successful initialization */
static void cleanup_device_data(struct qib_devdata *dd)
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index edad991d60ed..22c720e5740d 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1028,7 +1028,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
event.event = IB_EVENT_PKEY_CHANGE;
event.device = &dd->verbs_dev.ibdev;
- event.element.port_num = 1;
+ event.element.port_num = port;
ib_dispatch_event(&event);
}
return 0;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 0cad0c40d742..7fcc150d603c 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -985,7 +985,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
struct ib_qp *ret;
if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
- init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
+ init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
+ init_attr->create_flags) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index d48d2c0a2e3c..53bd6a2d9cdb 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -466,6 +466,9 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
ucontext = to_uucontext(pd->uobject->context);
us_ibdev = to_usdev(pd->device);
+ if (init_attr->create_flags)
+ return ERR_PTR(-EINVAL);
+
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
usnic_err("%s: cannot copy udata for create_qp\n",
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
index d135ad90d914..3a4288e0fbac 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
@@ -1,3 +1,21 @@
+/*
+ * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile
new file mode 100644
index 000000000000..f3c7dcf03098
--- /dev/null
+++ b/drivers/infiniband/ulp/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_INFINIBAND_IPOIB) += ipoib/
+obj-$(CONFIG_INFINIBAND_SRP) += srp/
+obj-$(CONFIG_INFINIBAND_SRPT) += srpt/
+obj-$(CONFIG_INFINIBAND_ISER) += iser/
+obj-$(CONFIG_INFINIBAND_ISERT) += isert/
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 1377f85911c2..933efcea0d03 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1030,10 +1030,20 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
.cap.max_send_sge = 1,
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
- .qp_context = tx
+ .qp_context = tx,
+ .create_flags = IB_QP_CREATE_USE_GFP_NOIO
};
- return ib_create_qp(priv->pd, &attr);
+ struct ib_qp *tx_qp;
+
+ tx_qp = ib_create_qp(priv->pd, &attr);
+ if (PTR_ERR(tx_qp) == -EINVAL) {
+ ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
+ priv->ca->name);
+ attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
+ tx_qp = ib_create_qp(priv->pd, &attr);
+ }
+ return tx_qp;
}
static int ipoib_cm_send_req(struct net_device *dev,
@@ -1104,12 +1114,14 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
int ret;
- p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
+ p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
+ GFP_NOIO, PAGE_KERNEL);
if (!p->tx_ring) {
ipoib_warn(priv, "failed to allocate tx ring\n");
ret = -ENOMEM;
goto err_tx;
}
+ memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
if (IS_ERR(p->qp)) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index c4b3940845e6..078cadd6c797 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -105,5 +105,5 @@ static const struct ethtool_ops ipoib_ethtool_ops = {
void ipoib_set_ethtool_ops(struct net_device *dev)
{
- SET_ETHTOOL_OPS(dev, &ipoib_ethtool_ops);
+ dev->ethtool_ops = &ipoib_ethtool_ops;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 25f195ef44b0..eb7973957a6e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
module_param_named(pi_guard, iser_pi_guard, int, 0644);
MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:CRC)");
+static struct workqueue_struct *release_wq;
struct iser_global ig;
void
@@ -337,24 +338,6 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
return cls_conn;
}
-static void
-iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
-{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct iser_conn *ib_conn = conn->dd_data;
-
- iscsi_conn_teardown(cls_conn);
- /*
- * Userspace will normally call the stop callback and
- * already have freed the ib_conn, but if it goofed up then
- * we free it here.
- */
- if (ib_conn) {
- ib_conn->iscsi_conn = NULL;
- iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
- }
-}
-
static int
iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -392,29 +375,39 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
conn->dd_data = ib_conn;
ib_conn->iscsi_conn = conn;
- iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
return 0;
}
+static int
+iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *iscsi_conn;
+ struct iser_conn *ib_conn;
+
+ iscsi_conn = cls_conn->dd_data;
+ ib_conn = iscsi_conn->dd_data;
+ reinit_completion(&ib_conn->stop_completion);
+
+ return iscsi_conn_start(cls_conn);
+}
+
static void
iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *ib_conn = conn->dd_data;
+ iser_dbg("stopping iscsi_conn: %p, ib_conn: %p\n", conn, ib_conn);
+ iscsi_conn_stop(cls_conn, flag);
+
/*
* Userspace may have goofed up and not bound the connection or
* might have only partially setup the connection.
*/
if (ib_conn) {
- iscsi_conn_stop(cls_conn, flag);
- /*
- * There is no unbind event so the stop callback
- * must release the ref from the bind.
- */
- iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
+ conn->dd_data = NULL;
+ complete(&ib_conn->stop_completion);
}
- conn->dd_data = NULL;
}
static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
@@ -515,28 +508,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_HDRDGST_EN:
sscanf(buf, "%d", &value);
if (value) {
- iser_err("DataDigest wasn't negotiated to None");
+ iser_err("DataDigest wasn't negotiated to None\n");
return -EPROTO;
}
break;
case ISCSI_PARAM_DATADGST_EN:
sscanf(buf, "%d", &value);
if (value) {
- iser_err("DataDigest wasn't negotiated to None");
+ iser_err("DataDigest wasn't negotiated to None\n");
return -EPROTO;
}
break;
case ISCSI_PARAM_IFMARKER_EN:
sscanf(buf, "%d", &value);
if (value) {
- iser_err("IFMarker wasn't negotiated to No");
+ iser_err("IFMarker wasn't negotiated to No\n");
return -EPROTO;
}
break;
case ISCSI_PARAM_OFMARKER_EN:
sscanf(buf, "%d", &value);
if (value) {
- iser_err("OFMarker wasn't negotiated to No");
+ iser_err("OFMarker wasn't negotiated to No\n");
return -EPROTO;
}
break;
@@ -652,19 +645,20 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
struct iser_conn *ib_conn;
ib_conn = ep->dd_data;
- if (ib_conn->iscsi_conn)
- /*
- * Must suspend xmit path if the ep is bound to the
- * iscsi_conn, so we know we are not accessing the ib_conn
- * when we free it.
- *
- * This may not be bound if the ep poll failed.
- */
- iscsi_suspend_tx(ib_conn->iscsi_conn);
-
-
- iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state);
+ iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state);
iser_conn_terminate(ib_conn);
+
+ /*
+ * if iser_conn and iscsi_conn are bound, we must wait iscsi_conn_stop
+ * call and ISER_CONN_DOWN state before freeing the iser resources.
+ * otherwise we are safe to free resources immediately.
+ */
+ if (ib_conn->iscsi_conn) {
+ INIT_WORK(&ib_conn->release_work, iser_release_work);
+ queue_work(release_wq, &ib_conn->release_work);
+ } else {
+ iser_conn_release(ib_conn);
+ }
}
static umode_t iser_attr_is_visible(int param_type, int param)
@@ -748,13 +742,13 @@ static struct iscsi_transport iscsi_iser_transport = {
/* connection management */
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
- .destroy_conn = iscsi_iser_conn_destroy,
+ .destroy_conn = iscsi_conn_teardown,
.attr_is_visible = iser_attr_is_visible,
.set_param = iscsi_iser_set_param,
.get_conn_param = iscsi_conn_get_param,
.get_ep_param = iscsi_iser_get_ep_param,
.get_session_param = iscsi_session_get_param,
- .start_conn = iscsi_conn_start,
+ .start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_iser_conn_stop,
/* iscsi host params */
.get_host_param = iscsi_host_get_param,
@@ -801,6 +795,12 @@ static int __init iser_init(void)
mutex_init(&ig.connlist_mutex);
INIT_LIST_HEAD(&ig.connlist);
+ release_wq = alloc_workqueue("release workqueue", 0, 0);
+ if (!release_wq) {
+ iser_err("failed to allocate release workqueue\n");
+ return -ENOMEM;
+ }
+
iscsi_iser_scsi_transport = iscsi_register_transport(
&iscsi_iser_transport);
if (!iscsi_iser_scsi_transport) {
@@ -819,7 +819,24 @@ register_transport_failure:
static void __exit iser_exit(void)
{
+ struct iser_conn *ib_conn, *n;
+ int connlist_empty;
+
iser_dbg("Removing iSER datamover...\n");
+ destroy_workqueue(release_wq);
+
+ mutex_lock(&ig.connlist_mutex);
+ connlist_empty = list_empty(&ig.connlist);
+ mutex_unlock(&ig.connlist_mutex);
+
+ if (!connlist_empty) {
+ iser_err("Error cleanup stage completed but we still have iser "
+ "connections, destroying them anyway.\n");
+ list_for_each_entry_safe(ib_conn, n, &ig.connlist, conn_list) {
+ iser_conn_release(ib_conn);
+ }
+ }
+
iscsi_unregister_transport(&iscsi_iser_transport);
kmem_cache_destroy(ig.desc_cache);
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 324129f80d40..97cd385bf7f7 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -69,7 +69,7 @@
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
-#define DRV_VER "1.3"
+#define DRV_VER "1.4"
#define iser_dbg(fmt, arg...) \
do { \
@@ -333,6 +333,8 @@ struct iser_conn {
int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
char name[ISER_OBJECT_NAME_SIZE];
+ struct work_struct release_work;
+ struct completion stop_completion;
struct list_head conn_list; /* entry in ig conn list */
char *login_buf;
@@ -417,12 +419,12 @@ void iscsi_iser_recv(struct iscsi_conn *conn,
void iser_conn_init(struct iser_conn *ib_conn);
-void iser_conn_get(struct iser_conn *ib_conn);
-
-int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
+void iser_conn_release(struct iser_conn *ib_conn);
void iser_conn_terminate(struct iser_conn *ib_conn);
+void iser_release_work(struct work_struct *work);
+
void iser_rcv_completion(struct iser_rx_desc *desc,
unsigned long dto_xfer_len,
struct iser_conn *ib_conn);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2e2d903db838..8d44a4060634 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -41,11 +41,11 @@
#include "iscsi_iser.h"
/* Register user buffer memory and initialize passive rdma
- * dto descriptor. Total data size is stored in
- * iser_task->data[ISER_DIR_IN].data_len
+ * dto descriptor. Data size is stored in
+ * task->data[ISER_DIR_IN].data_len, Protection size
+ * os stored in task->prot[ISER_DIR_IN].data_len
*/
-static int iser_prepare_read_cmd(struct iscsi_task *task,
- unsigned int edtl)
+static int iser_prepare_read_cmd(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -73,14 +73,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
return err;
}
- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
- iser_err("Total data length: %ld, less than EDTL: "
- "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
- iser_task->data[ISER_DIR_IN].data_len, edtl,
- task->itt, iser_task->ib_conn);
- return -EINVAL;
- }
-
err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
@@ -100,8 +92,9 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
}
/* Register user buffer memory and initialize passive rdma
- * dto descriptor. Total data size is stored in
- * task->data[ISER_DIR_OUT].data_len
+ * dto descriptor. Data size is stored in
+ * task->data[ISER_DIR_OUT].data_len, Protection size
+ * is stored at task->prot[ISER_DIR_OUT].data_len
*/
static int
iser_prepare_write_cmd(struct iscsi_task *task,
@@ -135,14 +128,6 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return err;
}
- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
- iser_err("Total data length: %ld, less than EDTL: %d, "
- "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
- iser_task->data[ISER_DIR_OUT].data_len,
- edtl, task->itt, task->conn);
- return -EINVAL;
- }
-
err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
@@ -417,11 +402,12 @@ int iser_send_command(struct iscsi_conn *conn,
if (scsi_prot_sg_count(sc)) {
prot_buf->buf = scsi_prot_sglist(sc);
prot_buf->size = scsi_prot_sg_count(sc);
- prot_buf->data_len = sc->prot_sdb->length;
+ prot_buf->data_len = data_buf->data_len >>
+ ilog2(sc->device->sector_size) * 8;
}
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
- err = iser_prepare_read_cmd(task, edtl);
+ err = iser_prepare_read_cmd(task);
if (err)
goto send_command_error;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 32849f2becde..ea01075f9f9b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -581,14 +581,30 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
return ret;
}
+void iser_release_work(struct work_struct *work)
+{
+ struct iser_conn *ib_conn;
+
+ ib_conn = container_of(work, struct iser_conn, release_work);
+
+ /* wait for .conn_stop callback */
+ wait_for_completion(&ib_conn->stop_completion);
+
+ /* wait for the qp`s post send and post receive buffers to empty */
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+ iser_conn_release(ib_conn);
+}
+
/**
* Frees all conn objects and deallocs conn descriptor
*/
-static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
+void iser_conn_release(struct iser_conn *ib_conn)
{
struct iser_device *device = ib_conn->device;
- BUG_ON(ib_conn->state != ISER_CONN_DOWN);
+ BUG_ON(ib_conn->state == ISER_CONN_UP);
mutex_lock(&ig.connlist_mutex);
list_del(&ib_conn->conn_list);
@@ -600,27 +616,13 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
if (device != NULL)
iser_device_try_release(device);
/* if cma handler context, the caller actually destroy the id */
- if (ib_conn->cma_id != NULL && can_destroy_id) {
+ if (ib_conn->cma_id != NULL) {
rdma_destroy_id(ib_conn->cma_id);
ib_conn->cma_id = NULL;
}
iscsi_destroy_endpoint(ib_conn->ep);
}
-void iser_conn_get(struct iser_conn *ib_conn)
-{
- atomic_inc(&ib_conn->refcount);
-}
-
-int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
-{
- if (atomic_dec_and_test(&ib_conn->refcount)) {
- iser_conn_release(ib_conn, can_destroy_id);
- return 1;
- }
- return 0;
-}
-
/**
* triggers start of the disconnect procedures and wait for them to be done
*/
@@ -638,24 +640,19 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
if (err)
iser_err("Failed to disconnect, conn: 0x%p err %d\n",
ib_conn,err);
-
- wait_event_interruptible(ib_conn->wait,
- ib_conn->state == ISER_CONN_DOWN);
-
- iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
}
-static int iser_connect_error(struct rdma_cm_id *cma_id)
+static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
+
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
- return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
}
-static int iser_addr_handler(struct rdma_cm_id *cma_id)
+static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
struct iser_conn *ib_conn;
@@ -664,7 +661,8 @@ static int iser_addr_handler(struct rdma_cm_id *cma_id)
device = iser_device_find_by_ib_device(cma_id);
if (!device) {
iser_err("device lookup/creation failed\n");
- return iser_connect_error(cma_id);
+ iser_connect_error(cma_id);
+ return;
}
ib_conn = (struct iser_conn *)cma_id->context;
@@ -686,13 +684,12 @@ static int iser_addr_handler(struct rdma_cm_id *cma_id)
ret = rdma_resolve_route(cma_id, 1000);
if (ret) {
iser_err("resolve route failed: %d\n", ret);
- return iser_connect_error(cma_id);
+ iser_connect_error(cma_id);
+ return;
}
-
- return 0;
}
-static int iser_route_handler(struct rdma_cm_id *cma_id)
+static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
int ret;
@@ -720,9 +717,9 @@ static int iser_route_handler(struct rdma_cm_id *cma_id)
goto failure;
}
- return 0;
+ return;
failure:
- return iser_connect_error(cma_id);
+ iser_connect_error(cma_id);
}
static void iser_connected_handler(struct rdma_cm_id *cma_id)
@@ -735,14 +732,13 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
ib_conn = (struct iser_conn *)cma_id->context;
- ib_conn->state = ISER_CONN_UP;
- wake_up_interruptible(&ib_conn->wait);
+ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP))
+ wake_up_interruptible(&ib_conn->wait);
}
-static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
+static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
- int ret;
ib_conn = (struct iser_conn *)cma_id->context;
@@ -762,24 +758,19 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
}
-
- ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
- return ret;
}
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
- int ret = 0;
-
iser_info("event %d status %d conn %p id %p\n",
event->event, event->status, cma_id->context, cma_id);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
- ret = iser_addr_handler(cma_id);
+ iser_addr_handler(cma_id);
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- ret = iser_route_handler(cma_id);
+ iser_route_handler(cma_id);
break;
case RDMA_CM_EVENT_ESTABLISHED:
iser_connected_handler(cma_id);
@@ -789,18 +780,18 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_REJECTED:
- ret = iser_connect_error(cma_id);
+ iser_connect_error(cma_id);
break;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_CHANGE:
- ret = iser_disconnected_handler(cma_id);
+ iser_disconnected_handler(cma_id);
break;
default:
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
break;
}
- return ret;
+ return 0;
}
void iser_conn_init(struct iser_conn *ib_conn)
@@ -809,7 +800,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
init_waitqueue_head(&ib_conn->wait);
ib_conn->post_recv_buf_count = 0;
atomic_set(&ib_conn->post_send_buf_count, 0);
- atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
+ init_completion(&ib_conn->stop_completion);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
}
@@ -837,7 +828,6 @@ int iser_connect(struct iser_conn *ib_conn,
ib_conn->state = ISER_CONN_PENDING;
- iser_conn_get(ib_conn); /* ref ib conn's cma id */
ib_conn->cma_id = rdma_create_id(iser_cma_handler,
(void *)ib_conn,
RDMA_PS_TCP, IB_QPT_RC);
@@ -874,9 +864,8 @@ id_failure:
ib_conn->cma_id = NULL;
addr_failure:
ib_conn->state = ISER_CONN_DOWN;
- iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */
connect_failure:
- iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
+ iser_conn_release(ib_conn);
return err;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index b9d647468b99..d4c7928a0f36 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -663,8 +663,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
if (pi_support && !device->pi_capable) {
- pr_err("Protection information requested but not supported\n");
- ret = -EINVAL;
+ pr_err("Protection information requested but not supported, "
+ "rejecting connect request\n");
+ ret = rdma_reject(cma_id, NULL, 0);
goto out_mr;
}
@@ -787,14 +788,12 @@ isert_disconnect_work(struct work_struct *work)
isert_put_conn(isert_conn);
return;
}
- if (!isert_conn->logout_posted) {
- pr_debug("Calling rdma_disconnect for !logout_posted from"
- " isert_disconnect_work\n");
+
+ if (isert_conn->disconnect) {
+ /* Send DREQ/DREP towards our initiator */
rdma_disconnect(isert_conn->conn_cm_id);
- mutex_unlock(&isert_conn->conn_mutex);
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
- goto wake_up;
}
+
mutex_unlock(&isert_conn->conn_mutex);
wake_up:
@@ -803,10 +802,11 @@ wake_up:
}
static void
-isert_disconnected_handler(struct rdma_cm_id *cma_id)
+isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
{
struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
+ isert_conn->disconnect = disconnect;
INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
schedule_work(&isert_conn->conn_logout_work);
}
@@ -815,29 +815,28 @@ static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
int ret = 0;
+ bool disconnect = false;
pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
event->event, event->status, cma_id->context, cma_id);
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
- pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
ret = isert_connect_request(cma_id, event);
break;
case RDMA_CM_EVENT_ESTABLISHED:
- pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
isert_connected_handler(cma_id);
break;
- case RDMA_CM_EVENT_DISCONNECTED:
- pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
- isert_disconnected_handler(cma_id);
- break;
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
- case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
+ case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+ disconnect = true;
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
+ isert_disconnected_handler(cma_id, disconnect);
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
default:
- pr_err("Unknown RDMA CMA event: %d\n", event->event);
+ pr_err("Unhandled RDMA CMA event: %d\n", event->event);
break;
}
@@ -1054,7 +1053,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
}
if (!login->login_failed) {
if (login->login_complete) {
- if (isert_conn->conn_device->use_fastreg) {
+ if (!conn->sess->sess_ops->SessionType &&
+ isert_conn->conn_device->use_fastreg) {
+ /* Normal Session and fastreg is used */
u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
ret = isert_conn_create_fastreg_pool(isert_conn,
@@ -1824,11 +1825,8 @@ isert_do_control_comp(struct work_struct *work)
break;
case ISTATE_SEND_LOGOUTRSP:
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
- /*
- * Call atomic_dec(&isert_conn->post_send_buf_count)
- * from isert_wait_conn()
- */
- isert_conn->logout_posted = true;
+
+ atomic_dec(&isert_conn->post_send_buf_count);
iscsit_logout_post_handler(cmd, cmd->conn);
break;
case ISTATE_SEND_TEXTRSP:
@@ -2034,6 +2032,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
isert_conn->state = ISER_CONN_DOWN;
mutex_unlock(&isert_conn->conn_mutex);
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+
complete(&isert_conn->conn_wait_comp_err);
}
@@ -2320,7 +2320,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
int rc;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
- rc = iscsit_build_text_rsp(cmd, conn, hdr);
+ rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
if (rc < 0)
return rc;
@@ -3156,9 +3156,14 @@ accept_wait:
return -ENODEV;
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+ pr_debug("np_thread_state %d for isert_accept_np\n",
+ np->np_thread_state);
+ /**
+ * No point in stalling here when np_thread
+ * is in state RESET/SHUTDOWN/EXIT - bail
+ **/
return -ENODEV;
}
spin_unlock_bh(&np->np_thread_lock);
@@ -3208,15 +3213,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
struct isert_conn *isert_conn = conn->context;
pr_debug("isert_wait_conn: Starting \n");
- /*
- * Decrement post_send_buf_count for special case when called
- * from isert_do_control_comp() -> iscsit_logout_post_handler()
- */
- mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->logout_posted)
- atomic_dec(&isert_conn->post_send_buf_count);
- if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
+ mutex_lock(&isert_conn->conn_mutex);
+ if (isert_conn->conn_cm_id) {
pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
}
@@ -3293,6 +3292,7 @@ destroy_rx_wq:
static void __exit isert_exit(void)
{
+ flush_scheduled_work();
destroy_workqueue(isert_comp_wq);
destroy_workqueue(isert_rx_wq);
iscsit_unregister_transport(&iser_target_transport);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index da6612e68000..04f51f7bf614 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -116,7 +116,6 @@ struct isert_device;
struct isert_conn {
enum iser_conn_state state;
- bool logout_posted;
int post_recv_buf_count;
atomic_t post_send_buf_count;
u32 responder_resources;
@@ -151,6 +150,7 @@ struct isert_conn {
#define ISERT_COMP_BATCH_COUNT 8
int conn_comp_batch;
struct llist_head conn_comp_llist;
+ bool disconnect;
};
#define ISERT_MAX_CQ 64
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 66a908bf3fb9..e3c2c5b4297f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -30,7 +30,7 @@
* SOFTWARE.
*/
-#define pr_fmt(fmt) PFX fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
@@ -66,6 +66,8 @@ static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
+static bool prefer_fr;
+static bool register_always;
static int topspin_workarounds = 1;
module_param(srp_sg_tablesize, uint, 0444);
@@ -87,6 +89,14 @@ module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
+module_param(prefer_fr, bool, 0444);
+MODULE_PARM_DESC(prefer_fr,
+"Whether to use fast registration if both FMR and fast registration are supported");
+
+module_param(register_always, bool, 0444);
+MODULE_PARM_DESC(register_always,
+ "Use memory registration even for contiguous memory regions");
+
static struct kernel_param_ops srp_tmo_ops;
static int srp_reconnect_delay = 10;
@@ -288,28 +298,174 @@ static int srp_new_cm_id(struct srp_target_port *target)
return 0;
}
+static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
+{
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_fmr_pool_param fmr_param;
+
+ memset(&fmr_param, 0, sizeof(fmr_param));
+ fmr_param.pool_size = target->scsi_host->can_queue;
+ fmr_param.dirty_watermark = fmr_param.pool_size / 4;
+ fmr_param.cache = 1;
+ fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
+ fmr_param.page_shift = ilog2(dev->mr_page_size);
+ fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ);
+
+ return ib_create_fmr_pool(dev->pd, &fmr_param);
+}
+
+/**
+ * srp_destroy_fr_pool() - free the resources owned by a pool
+ * @pool: Fast registration pool to be destroyed.
+ */
+static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
+{
+ int i;
+ struct srp_fr_desc *d;
+
+ if (!pool)
+ return;
+
+ for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
+ if (d->frpl)
+ ib_free_fast_reg_page_list(d->frpl);
+ if (d->mr)
+ ib_dereg_mr(d->mr);
+ }
+ kfree(pool);
+}
+
+/**
+ * srp_create_fr_pool() - allocate and initialize a pool for fast registration
+ * @device: IB device to allocate fast registration descriptors for.
+ * @pd: Protection domain associated with the FR descriptors.
+ * @pool_size: Number of descriptors to allocate.
+ * @max_page_list_len: Maximum fast registration work request page list length.
+ */
+static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
+ struct ib_pd *pd, int pool_size,
+ int max_page_list_len)
+{
+ struct srp_fr_pool *pool;
+ struct srp_fr_desc *d;
+ struct ib_mr *mr;
+ struct ib_fast_reg_page_list *frpl;
+ int i, ret = -EINVAL;
+
+ if (pool_size <= 0)
+ goto err;
+ ret = -ENOMEM;
+ pool = kzalloc(sizeof(struct srp_fr_pool) +
+ pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
+ if (!pool)
+ goto err;
+ pool->size = pool_size;
+ pool->max_page_list_len = max_page_list_len;
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free_list);
+
+ for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
+ mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
+ if (IS_ERR(mr)) {
+ ret = PTR_ERR(mr);
+ goto destroy_pool;
+ }
+ d->mr = mr;
+ frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
+ if (IS_ERR(frpl)) {
+ ret = PTR_ERR(frpl);
+ goto destroy_pool;
+ }
+ d->frpl = frpl;
+ list_add_tail(&d->entry, &pool->free_list);
+ }
+
+out:
+ return pool;
+
+destroy_pool:
+ srp_destroy_fr_pool(pool);
+
+err:
+ pool = ERR_PTR(ret);
+ goto out;
+}
+
+/**
+ * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
+ * @pool: Pool to obtain descriptor from.
+ */
+static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
+{
+ struct srp_fr_desc *d = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ if (!list_empty(&pool->free_list)) {
+ d = list_first_entry(&pool->free_list, typeof(*d), entry);
+ list_del(&d->entry);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return d;
+}
+
+/**
+ * srp_fr_pool_put() - put an FR descriptor back in the free list
+ * @pool: Pool the descriptor was allocated from.
+ * @desc: Pointer to an array of fast registration descriptor pointers.
+ * @n: Number of descriptors to put back.
+ *
+ * Note: The caller must already have queued an invalidation request for
+ * desc->mr->rkey before calling this function.
+ */
+static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
+ int n)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ for (i = 0; i < n; i++)
+ list_add(&desc[i]->entry, &pool->free_list);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
+{
+ struct srp_device *dev = target->srp_host->srp_dev;
+
+ return srp_create_fr_pool(dev->dev, dev->pd,
+ target->scsi_host->can_queue,
+ dev->max_pages_per_mr);
+}
+
static int srp_create_target_ib(struct srp_target_port *target)
{
+ struct srp_device *dev = target->srp_host->srp_dev;
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
+ struct ib_fmr_pool *fmr_pool = NULL;
+ struct srp_fr_pool *fr_pool = NULL;
+ const int m = 1 + dev->use_fast_reg;
int ret;
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
if (!init_attr)
return -ENOMEM;
- recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_recv_completion, NULL, target,
+ recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, target,
target->queue_size, target->comp_vector);
if (IS_ERR(recv_cq)) {
ret = PTR_ERR(recv_cq);
goto err;
}
- send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_send_completion, NULL, target,
- target->queue_size, target->comp_vector);
+ send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target,
+ m * target->queue_size, target->comp_vector);
if (IS_ERR(send_cq)) {
ret = PTR_ERR(send_cq);
goto err_recv_cq;
@@ -318,16 +474,16 @@ static int srp_create_target_ib(struct srp_target_port *target)
ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
- init_attr->cap.max_send_wr = target->queue_size;
+ init_attr->cap.max_send_wr = m * target->queue_size;
init_attr->cap.max_recv_wr = target->queue_size;
init_attr->cap.max_recv_sge = 1;
init_attr->cap.max_send_sge = 1;
- init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr->qp_type = IB_QPT_RC;
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
- qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
+ qp = ib_create_qp(dev->pd, init_attr);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto err_send_cq;
@@ -337,6 +493,30 @@ static int srp_create_target_ib(struct srp_target_port *target)
if (ret)
goto err_qp;
+ if (dev->use_fast_reg && dev->has_fr) {
+ fr_pool = srp_alloc_fr_pool(target);
+ if (IS_ERR(fr_pool)) {
+ ret = PTR_ERR(fr_pool);
+ shost_printk(KERN_WARNING, target->scsi_host, PFX
+ "FR pool allocation failed (%d)\n", ret);
+ goto err_qp;
+ }
+ if (target->fr_pool)
+ srp_destroy_fr_pool(target->fr_pool);
+ target->fr_pool = fr_pool;
+ } else if (!dev->use_fast_reg && dev->has_fmr) {
+ fmr_pool = srp_alloc_fmr_pool(target);
+ if (IS_ERR(fmr_pool)) {
+ ret = PTR_ERR(fmr_pool);
+ shost_printk(KERN_WARNING, target->scsi_host, PFX
+ "FMR pool allocation failed (%d)\n", ret);
+ goto err_qp;
+ }
+ if (target->fmr_pool)
+ ib_destroy_fmr_pool(target->fmr_pool);
+ target->fmr_pool = fmr_pool;
+ }
+
if (target->qp)
ib_destroy_qp(target->qp);
if (target->recv_cq)
@@ -371,8 +551,16 @@ err:
*/
static void srp_free_target_ib(struct srp_target_port *target)
{
+ struct srp_device *dev = target->srp_host->srp_dev;
int i;
+ if (dev->use_fast_reg) {
+ if (target->fr_pool)
+ srp_destroy_fr_pool(target->fr_pool);
+ } else {
+ if (target->fmr_pool)
+ ib_destroy_fmr_pool(target->fmr_pool);
+ }
ib_destroy_qp(target->qp);
ib_destroy_cq(target->send_cq);
ib_destroy_cq(target->recv_cq);
@@ -577,7 +765,8 @@ static void srp_disconnect_target(struct srp_target_port *target)
static void srp_free_req_data(struct srp_target_port *target)
{
- struct ib_device *ibdev = target->srp_host->srp_dev->dev;
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = dev->dev;
struct srp_request *req;
int i;
@@ -586,7 +775,10 @@ static void srp_free_req_data(struct srp_target_port *target)
for (i = 0; i < target->req_ring_size; ++i) {
req = &target->req_ring[i];
- kfree(req->fmr_list);
+ if (dev->use_fast_reg)
+ kfree(req->fr_list);
+ else
+ kfree(req->fmr_list);
kfree(req->map_page);
if (req->indirect_dma_addr) {
ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
@@ -605,6 +797,7 @@ static int srp_alloc_req_data(struct srp_target_port *target)
struct srp_device *srp_dev = target->srp_host->srp_dev;
struct ib_device *ibdev = srp_dev->dev;
struct srp_request *req;
+ void *mr_list;
dma_addr_t dma_addr;
int i, ret = -ENOMEM;
@@ -617,12 +810,20 @@ static int srp_alloc_req_data(struct srp_target_port *target)
for (i = 0; i < target->req_ring_size; ++i) {
req = &target->req_ring[i];
- req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
- GFP_KERNEL);
- req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
- GFP_KERNEL);
+ mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
+ GFP_KERNEL);
+ if (!mr_list)
+ goto out;
+ if (srp_dev->use_fast_reg)
+ req->fr_list = mr_list;
+ else
+ req->fmr_list = mr_list;
+ req->map_page = kmalloc(srp_dev->max_pages_per_mr *
+ sizeof(void *), GFP_KERNEL);
+ if (!req->map_page)
+ goto out;
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
- if (!req->fmr_list || !req->map_page || !req->indirect_desc)
+ if (!req->indirect_desc)
goto out;
dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
@@ -759,21 +960,56 @@ static int srp_connect_target(struct srp_target_port *target)
}
}
+static int srp_inv_rkey(struct srp_target_port *target, u32 rkey)
+{
+ struct ib_send_wr *bad_wr;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_id = LOCAL_INV_WR_ID_MASK,
+ .next = NULL,
+ .num_sge = 0,
+ .send_flags = 0,
+ .ex.invalidate_rkey = rkey,
+ };
+
+ return ib_post_send(target->qp, &wr, &bad_wr);
+}
+
static void srp_unmap_data(struct scsi_cmnd *scmnd,
struct srp_target_port *target,
struct srp_request *req)
{
- struct ib_device *ibdev = target->srp_host->srp_dev->dev;
- struct ib_pool_fmr **pfmr;
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = dev->dev;
+ int i, res;
if (!scsi_sglist(scmnd) ||
(scmnd->sc_data_direction != DMA_TO_DEVICE &&
scmnd->sc_data_direction != DMA_FROM_DEVICE))
return;
- pfmr = req->fmr_list;
- while (req->nfmr--)
- ib_fmr_pool_unmap(*pfmr++);
+ if (dev->use_fast_reg) {
+ struct srp_fr_desc **pfr;
+
+ for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
+ res = srp_inv_rkey(target, (*pfr)->mr->rkey);
+ if (res < 0) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "Queueing INV WR for rkey %#x failed (%d)\n",
+ (*pfr)->mr->rkey, res);
+ queue_work(system_long_wq,
+ &target->tl_err_work);
+ }
+ }
+ if (req->nmdesc)
+ srp_fr_pool_put(target->fr_pool, req->fr_list,
+ req->nmdesc);
+ } else {
+ struct ib_pool_fmr **pfmr;
+
+ for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
+ ib_fmr_pool_unmap(*pfmr);
+ }
ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
scmnd->sc_data_direction);
@@ -813,6 +1049,10 @@ static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
/**
* srp_free_req() - Unmap data and add request to the free request list.
+ * @target: SRP target port.
+ * @req: Request to be freed.
+ * @scmnd: SCSI command associated with @req.
+ * @req_lim_delta: Amount to be added to @target->req_lim.
*/
static void srp_free_req(struct srp_target_port *target,
struct srp_request *req, struct scsi_cmnd *scmnd,
@@ -882,21 +1122,19 @@ static int srp_rport_reconnect(struct srp_rport *rport)
* callbacks will have finished before a new QP is allocated.
*/
ret = srp_new_cm_id(target);
- /*
- * Whether or not creating a new CM ID succeeded, create a new
- * QP. This guarantees that all completion callback function
- * invocations have finished before request resetting starts.
- */
- if (ret == 0)
- ret = srp_create_target_ib(target);
- else
- srp_create_target_ib(target);
for (i = 0; i < target->req_ring_size; ++i) {
struct srp_request *req = &target->req_ring[i];
srp_finish_req(target, req, NULL, DID_RESET << 16);
}
+ /*
+ * Whether or not creating a new CM ID succeeded, create a new
+ * QP. This guarantees that all callback functions for the old QP have
+ * finished before any send requests are posted on the new QP.
+ */
+ ret += srp_create_target_ib(target);
+
INIT_LIST_HEAD(&target->free_tx);
for (i = 0; i < target->queue_size; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
@@ -928,33 +1166,87 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
static int srp_map_finish_fmr(struct srp_map_state *state,
struct srp_target_port *target)
{
- struct srp_device *dev = target->srp_host->srp_dev;
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
- if (!state->npages)
- return 0;
-
- if (state->npages == 1) {
- srp_map_desc(state, state->base_dma_addr, state->fmr_len,
- target->rkey);
- state->npages = state->fmr_len = 0;
- return 0;
- }
-
- fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
+ fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages,
state->npages, io_addr);
if (IS_ERR(fmr))
return PTR_ERR(fmr);
*state->next_fmr++ = fmr;
- state->nfmr++;
+ state->nmdesc++;
+
+ srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
- srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
- state->npages = state->fmr_len = 0;
return 0;
}
+static int srp_map_finish_fr(struct srp_map_state *state,
+ struct srp_target_port *target)
+{
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_send_wr *bad_wr;
+ struct ib_send_wr wr;
+ struct srp_fr_desc *desc;
+ u32 rkey;
+
+ desc = srp_fr_pool_get(target->fr_pool);
+ if (!desc)
+ return -ENOMEM;
+
+ rkey = ib_inc_rkey(desc->mr->rkey);
+ ib_update_fast_reg_key(desc->mr, rkey);
+
+ memcpy(desc->frpl->page_list, state->pages,
+ sizeof(state->pages[0]) * state->npages);
+
+ memset(&wr, 0, sizeof(wr));
+ wr.opcode = IB_WR_FAST_REG_MR;
+ wr.wr_id = FAST_REG_WR_ID_MASK;
+ wr.wr.fast_reg.iova_start = state->base_dma_addr;
+ wr.wr.fast_reg.page_list = desc->frpl;
+ wr.wr.fast_reg.page_list_len = state->npages;
+ wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
+ wr.wr.fast_reg.length = state->dma_len;
+ wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
+ wr.wr.fast_reg.rkey = desc->mr->lkey;
+
+ *state->next_fr++ = desc;
+ state->nmdesc++;
+
+ srp_map_desc(state, state->base_dma_addr, state->dma_len,
+ desc->mr->rkey);
+
+ return ib_post_send(target->qp, &wr, &bad_wr);
+}
+
+static int srp_finish_mapping(struct srp_map_state *state,
+ struct srp_target_port *target)
+{
+ int ret = 0;
+
+ if (state->npages == 0)
+ return 0;
+
+ if (state->npages == 1 && !register_always)
+ srp_map_desc(state, state->base_dma_addr, state->dma_len,
+ target->rkey);
+ else
+ ret = target->srp_host->srp_dev->use_fast_reg ?
+ srp_map_finish_fr(state, target) :
+ srp_map_finish_fmr(state, target);
+
+ if (ret == 0) {
+ state->npages = 0;
+ state->dma_len = 0;
+ }
+
+ return ret;
+}
+
static void srp_map_update_start(struct srp_map_state *state,
struct scatterlist *sg, int sg_index,
dma_addr_t dma_addr)
@@ -967,7 +1259,7 @@ static void srp_map_update_start(struct srp_map_state *state,
static int srp_map_sg_entry(struct srp_map_state *state,
struct srp_target_port *target,
struct scatterlist *sg, int sg_index,
- int use_fmr)
+ bool use_mr)
{
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_device *ibdev = dev->dev;
@@ -979,23 +1271,25 @@ static int srp_map_sg_entry(struct srp_map_state *state,
if (!dma_len)
return 0;
- if (use_fmr == SRP_MAP_NO_FMR) {
- /* Once we're in direct map mode for a request, we don't
- * go back to FMR mode, so no need to update anything
+ if (!use_mr) {
+ /*
+ * Once we're in direct map mode for a request, we don't
+ * go back to FMR or FR mode, so no need to update anything
* other than the descriptor.
*/
srp_map_desc(state, dma_addr, dma_len, target->rkey);
return 0;
}
- /* If we start at an offset into the FMR page, don't merge into
- * the current FMR. Finish it out, and use the kernel's MR for this
- * sg entry. This is to avoid potential bugs on some SRP targets
- * that were never quite defined, but went away when the initiator
- * avoided using FMR on such page fragments.
+ /*
+ * Since not all RDMA HW drivers support non-zero page offsets for
+ * FMR, if we start at an offset into a page, don't merge into the
+ * current FMR mapping. Finish it out, and use the kernel's MR for
+ * this sg entry.
*/
- if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
- ret = srp_map_finish_fmr(state, target);
+ if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
+ dma_len > dev->mr_max_size) {
+ ret = srp_finish_mapping(state, target);
if (ret)
return ret;
@@ -1004,52 +1298,106 @@ static int srp_map_sg_entry(struct srp_map_state *state,
return 0;
}
- /* If this is the first sg to go into the FMR, save our position.
- * We need to know the first unmapped entry, its index, and the
- * first unmapped address within that entry to be able to restart
- * mapping after an error.
+ /*
+ * If this is the first sg that will be mapped via FMR or via FR, save
+ * our position. We need to know the first unmapped entry, its index,
+ * and the first unmapped address within that entry to be able to
+ * restart mapping after an error.
*/
if (!state->unmapped_sg)
srp_map_update_start(state, sg, sg_index, dma_addr);
while (dma_len) {
- if (state->npages == SRP_FMR_SIZE) {
- ret = srp_map_finish_fmr(state, target);
+ unsigned offset = dma_addr & ~dev->mr_page_mask;
+ if (state->npages == dev->max_pages_per_mr || offset != 0) {
+ ret = srp_finish_mapping(state, target);
if (ret)
return ret;
srp_map_update_start(state, sg, sg_index, dma_addr);
}
- len = min_t(unsigned int, dma_len, dev->fmr_page_size);
+ len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
if (!state->npages)
state->base_dma_addr = dma_addr;
- state->pages[state->npages++] = dma_addr;
- state->fmr_len += len;
+ state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
+ state->dma_len += len;
dma_addr += len;
dma_len -= len;
}
- /* If the last entry of the FMR wasn't a full page, then we need to
+ /*
+ * If the last entry of the MR wasn't a full page, then we need to
* close it out and start a new one -- we can only merge at page
* boundries.
*/
ret = 0;
- if (len != dev->fmr_page_size) {
- ret = srp_map_finish_fmr(state, target);
+ if (len != dev->mr_page_size) {
+ ret = srp_finish_mapping(state, target);
if (!ret)
srp_map_update_start(state, NULL, 0, 0);
}
return ret;
}
+static int srp_map_sg(struct srp_map_state *state,
+ struct srp_target_port *target, struct srp_request *req,
+ struct scatterlist *scat, int count)
+{
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = dev->dev;
+ struct scatterlist *sg;
+ int i;
+ bool use_mr;
+
+ state->desc = req->indirect_desc;
+ state->pages = req->map_page;
+ if (dev->use_fast_reg) {
+ state->next_fr = req->fr_list;
+ use_mr = !!target->fr_pool;
+ } else {
+ state->next_fmr = req->fmr_list;
+ use_mr = !!target->fmr_pool;
+ }
+
+ for_each_sg(scat, sg, count, i) {
+ if (srp_map_sg_entry(state, target, sg, i, use_mr)) {
+ /*
+ * Memory registration failed, so backtrack to the
+ * first unmapped entry and continue on without using
+ * memory registration.
+ */
+ dma_addr_t dma_addr;
+ unsigned int dma_len;
+
+backtrack:
+ sg = state->unmapped_sg;
+ i = state->unmapped_index;
+
+ dma_addr = ib_sg_dma_address(ibdev, sg);
+ dma_len = ib_sg_dma_len(ibdev, sg);
+ dma_len -= (state->unmapped_addr - dma_addr);
+ dma_addr = state->unmapped_addr;
+ use_mr = false;
+ srp_map_desc(state, dma_addr, dma_len, target->rkey);
+ }
+ }
+
+ if (use_mr && srp_finish_mapping(state, target))
+ goto backtrack;
+
+ req->nmdesc = state->nmdesc;
+
+ return 0;
+}
+
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_request *req)
{
- struct scatterlist *scat, *sg;
+ struct scatterlist *scat;
struct srp_cmd *cmd = req->cmd->buf;
- int i, len, nents, count, use_fmr;
+ int len, nents, count;
struct srp_device *dev;
struct ib_device *ibdev;
struct srp_map_state state;
@@ -1081,7 +1429,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
- if (count == 1) {
+ if (count == 1 && !register_always) {
/*
* The midlayer only generated a single gather/scatter
* entry, or DMA mapping coalesced everything to a
@@ -1094,13 +1442,13 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
buf->key = cpu_to_be32(target->rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
- req->nfmr = 0;
+ req->nmdesc = 0;
goto map_complete;
}
- /* We have more than one scatter/gather entry, so build our indirect
- * descriptor table, trying to merge as many entries with FMR as we
- * can.
+ /*
+ * We have more than one scatter/gather entry, so build our indirect
+ * descriptor table, trying to merge as many entries as we can.
*/
indirect_hdr = (void *) cmd->add_data;
@@ -1108,35 +1456,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
- state.desc = req->indirect_desc;
- state.pages = req->map_page;
- state.next_fmr = req->fmr_list;
-
- use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
-
- for_each_sg(scat, sg, count, i) {
- if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
- /* FMR mapping failed, so backtrack to the first
- * unmapped entry and continue on without using FMR.
- */
- dma_addr_t dma_addr;
- unsigned int dma_len;
-
-backtrack:
- sg = state.unmapped_sg;
- i = state.unmapped_index;
-
- dma_addr = ib_sg_dma_address(ibdev, sg);
- dma_len = ib_sg_dma_len(ibdev, sg);
- dma_len -= (state.unmapped_addr - dma_addr);
- dma_addr = state.unmapped_addr;
- use_fmr = SRP_MAP_NO_FMR;
- srp_map_desc(&state, dma_addr, dma_len, target->rkey);
- }
- }
-
- if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
- goto backtrack;
+ srp_map_sg(&state, target, req, scat, count);
/* We've mapped the request, now pull as much of the indirect
* descriptor table as we can into the command buffer. If this
@@ -1144,9 +1464,9 @@ backtrack:
* guaranteed to fit into the command, as the SCSI layer won't
* give us more S/G entries than we allow.
*/
- req->nfmr = state.nfmr;
if (state.ndesc == 1) {
- /* FMR mapping was able to collapse this to one entry,
+ /*
+ * Memory registration collapsed the sg-list into one entry,
* so use a direct descriptor.
*/
struct srp_direct_buf *buf = (void *) cmd->add_data;
@@ -1455,6 +1775,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
/**
* srp_tl_err_work() - handle a transport layer error
+ * @work: Work structure embedded in an SRP target port.
*
* Note: This function may get invoked before the rport has been created,
* hence the target->rport test.
@@ -1468,14 +1789,24 @@ static void srp_tl_err_work(struct work_struct *work)
srp_start_tl_fail_timers(target->rport);
}
-static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err,
- struct srp_target_port *target)
+static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
+ bool send_err, struct srp_target_port *target)
{
if (target->connected && !target->qp_in_error) {
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed %s status %d\n",
- send_err ? "send" : "receive",
- wc_status);
+ if (wr_id & LOCAL_INV_WR_ID_MASK) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "LOCAL_INV failed with status %d\n",
+ wc_status);
+ } else if (wr_id & FAST_REG_WR_ID_MASK) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "FAST_REG_MR failed status %d\n",
+ wc_status);
+ } else {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "failed %s status %d for iu %p\n",
+ send_err ? "send" : "receive",
+ wc_status, (void *)(uintptr_t)wr_id);
+ }
queue_work(system_long_wq, &target->tl_err_work);
}
target->qp_in_error = true;
@@ -1491,7 +1822,7 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
if (likely(wc.status == IB_WC_SUCCESS)) {
srp_handle_recv(target, &wc);
} else {
- srp_handle_qp_err(wc.status, false, target);
+ srp_handle_qp_err(wc.wr_id, wc.status, false, target);
}
}
}
@@ -1507,7 +1838,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
list_add(&iu->list, &target->free_tx);
} else {
- srp_handle_qp_err(wc.status, true, target);
+ srp_handle_qp_err(wc.wr_id, wc.status, true, target);
}
}
}
@@ -1521,7 +1852,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
struct srp_cmd *cmd;
struct ib_device *dev;
unsigned long flags;
- int len, result;
+ int len, ret;
const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
/*
@@ -1533,12 +1864,9 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
if (in_scsi_eh)
mutex_lock(&rport->mutex);
- result = srp_chkready(target->rport);
- if (unlikely(result)) {
- scmnd->result = result;
- scmnd->scsi_done(scmnd);
- goto unlock_rport;
- }
+ scmnd->result = srp_chkready(target->rport);
+ if (unlikely(scmnd->result))
+ goto err;
spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
@@ -1553,7 +1881,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
DMA_TO_DEVICE);
- scmnd->result = 0;
scmnd->host_scribble = (void *) req;
cmd = iu->buf;
@@ -1570,7 +1897,15 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
len = srp_map_data(scmnd, target, req);
if (len < 0) {
shost_printk(KERN_ERR, target->scsi_host,
- PFX "Failed to map data\n");
+ PFX "Failed to map data (%d)\n", len);
+ /*
+ * If we ran out of memory descriptors (-ENOMEM) because an
+ * application is queuing many requests with more than
+ * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
+ * to reduce queue depth temporarily.
+ */
+ scmnd->result = len == -ENOMEM ?
+ DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
goto err_iu;
}
@@ -1582,11 +1917,13 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
goto err_unmap;
}
+ ret = 0;
+
unlock_rport:
if (in_scsi_eh)
mutex_unlock(&rport->mutex);
- return 0;
+ return ret;
err_unmap:
srp_unmap_data(scmnd, target, req);
@@ -1594,16 +1931,27 @@ err_unmap:
err_iu:
srp_put_tx_iu(target, iu, SRP_IU_CMD);
+ /*
+ * Avoid that the loops that iterate over the request ring can
+ * encounter a dangling SCSI command pointer.
+ */
+ req->scmnd = NULL;
+
spin_lock_irqsave(&target->lock, flags);
list_add(&req->list, &target->free_reqs);
err_unlock:
spin_unlock_irqrestore(&target->lock, flags);
- if (in_scsi_eh)
- mutex_unlock(&rport->mutex);
+err:
+ if (scmnd->result) {
+ scmnd->scsi_done(scmnd);
+ ret = 0;
+ } else {
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ }
- return SCSI_MLQUEUE_HOST_BUSY;
+ goto unlock_rport;
}
/*
@@ -2310,6 +2658,8 @@ static struct class srp_class = {
/**
* srp_conn_unique() - check whether the connection to a target is unique
+ * @host: SRP host.
+ * @target: SRP target port.
*/
static bool srp_conn_unique(struct srp_host *host,
struct srp_target_port *target)
@@ -2605,7 +2955,8 @@ static ssize_t srp_create_target(struct device *dev,
container_of(dev, struct srp_host, dev);
struct Scsi_Host *target_host;
struct srp_target_port *target;
- struct ib_device *ibdev = host->srp_dev->dev;
+ struct srp_device *srp_dev = host->srp_dev;
+ struct ib_device *ibdev = srp_dev->dev;
int ret;
target_host = scsi_host_alloc(&srp_template,
@@ -2650,9 +3001,9 @@ static ssize_t srp_create_target(struct device *dev,
goto err;
}
- if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
- target->cmd_sg_cnt < target->sg_tablesize) {
- pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
+ if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
+ target->cmd_sg_cnt < target->sg_tablesize) {
+ pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
target->sg_tablesize = target->cmd_sg_cnt;
}
@@ -2790,9 +3141,9 @@ static void srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct ib_device_attr *dev_attr;
- struct ib_fmr_pool_param fmr_param;
struct srp_host *host;
- int max_pages_per_fmr, fmr_page_shift, s, e, p;
+ int mr_page_shift, s, e, p;
+ u64 max_pages_per_mr;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
if (!dev_attr)
@@ -2807,15 +3158,39 @@ static void srp_add_one(struct ib_device *device)
if (!srp_dev)
goto free_attr;
+ srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
+ device->map_phys_fmr && device->unmap_fmr);
+ srp_dev->has_fr = (dev_attr->device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS);
+ if (!srp_dev->has_fmr && !srp_dev->has_fr)
+ dev_warn(&device->dev, "neither FMR nor FR is supported\n");
+
+ srp_dev->use_fast_reg = (srp_dev->has_fr &&
+ (!srp_dev->has_fmr || prefer_fr));
+
/*
* Use the smallest page size supported by the HCA, down to a
* minimum of 4096 bytes. We're unlikely to build large sglists
* out of smaller entries.
*/
- fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
- srp_dev->fmr_page_size = 1 << fmr_page_shift;
- srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
- srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
+ mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
+ srp_dev->mr_page_size = 1 << mr_page_shift;
+ srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
+ max_pages_per_mr = dev_attr->max_mr_size;
+ do_div(max_pages_per_mr, srp_dev->mr_page_size);
+ srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
+ max_pages_per_mr);
+ if (srp_dev->use_fast_reg) {
+ srp_dev->max_pages_per_mr =
+ min_t(u32, srp_dev->max_pages_per_mr,
+ dev_attr->max_fast_reg_page_list_len);
+ }
+ srp_dev->mr_max_size = srp_dev->mr_page_size *
+ srp_dev->max_pages_per_mr;
+ pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
+ device->name, mr_page_shift, dev_attr->max_mr_size,
+ dev_attr->max_fast_reg_page_list_len,
+ srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
INIT_LIST_HEAD(&srp_dev->dev_list);
@@ -2831,27 +3206,6 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->mr))
goto err_pd;
- for (max_pages_per_fmr = SRP_FMR_SIZE;
- max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
- max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
- memset(&fmr_param, 0, sizeof fmr_param);
- fmr_param.pool_size = SRP_FMR_POOL_SIZE;
- fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
- fmr_param.cache = 1;
- fmr_param.max_pages_per_fmr = max_pages_per_fmr;
- fmr_param.page_shift = fmr_page_shift;
- fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
- if (!IS_ERR(srp_dev->fmr_pool))
- break;
- }
-
- if (IS_ERR(srp_dev->fmr_pool))
- srp_dev->fmr_pool = NULL;
-
if (device->node_type == RDMA_NODE_IB_SWITCH) {
s = 0;
e = 0;
@@ -2914,8 +3268,6 @@ static void srp_remove_one(struct ib_device *device)
kfree(host);
}
- if (srp_dev->fmr_pool)
- ib_destroy_fmr_pool(srp_dev->fmr_pool);
ib_dereg_mr(srp_dev->mr);
ib_dealloc_pd(srp_dev->pd);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index aad27b7b4a46..e46ecb15aa0d 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -66,13 +66,10 @@ enum {
SRP_TAG_NO_REQ = ~0U,
SRP_TAG_TSK_MGMT = 1U << 31,
- SRP_FMR_SIZE = 512,
- SRP_FMR_MIN_SIZE = 128,
- SRP_FMR_POOL_SIZE = 1024,
- SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4,
+ SRP_MAX_PAGES_PER_MR = 512,
- SRP_MAP_ALLOW_FMR = 0,
- SRP_MAP_NO_FMR = 1,
+ LOCAL_INV_WR_ID_MASK = 1,
+ FAST_REG_WR_ID_MASK = 2,
};
enum srp_target_state {
@@ -86,15 +83,24 @@ enum srp_iu_type {
SRP_IU_RSP,
};
+/*
+ * @mr_page_mask: HCA memory registration page mask.
+ * @mr_page_size: HCA memory registration page size.
+ * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
+ * request.
+ */
struct srp_device {
struct list_head dev_list;
struct ib_device *dev;
struct ib_pd *pd;
struct ib_mr *mr;
- struct ib_fmr_pool *fmr_pool;
- u64 fmr_page_mask;
- int fmr_page_size;
- int fmr_max_size;
+ u64 mr_page_mask;
+ int mr_page_size;
+ int mr_max_size;
+ int max_pages_per_mr;
+ bool has_fmr;
+ bool has_fr;
+ bool use_fast_reg;
};
struct srp_host {
@@ -112,11 +118,14 @@ struct srp_request {
struct list_head list;
struct scsi_cmnd *scmnd;
struct srp_iu *cmd;
- struct ib_pool_fmr **fmr_list;
+ union {
+ struct ib_pool_fmr **fmr_list;
+ struct srp_fr_desc **fr_list;
+ };
u64 *map_page;
struct srp_direct_buf *indirect_desc;
dma_addr_t indirect_dma_addr;
- short nfmr;
+ short nmdesc;
short index;
};
@@ -131,6 +140,10 @@ struct srp_target_port {
struct ib_cq *send_cq ____cacheline_aligned_in_smp;
struct ib_cq *recv_cq;
struct ib_qp *qp;
+ union {
+ struct ib_fmr_pool *fmr_pool;
+ struct srp_fr_pool *fr_pool;
+ };
u32 lkey;
u32 rkey;
enum srp_target_state state;
@@ -197,15 +210,66 @@ struct srp_iu {
enum dma_data_direction direction;
};
+/**
+ * struct srp_fr_desc - fast registration work request arguments
+ * @entry: Entry in srp_fr_pool.free_list.
+ * @mr: Memory region.
+ * @frpl: Fast registration page list.
+ */
+struct srp_fr_desc {
+ struct list_head entry;
+ struct ib_mr *mr;
+ struct ib_fast_reg_page_list *frpl;
+};
+
+/**
+ * struct srp_fr_pool - pool of fast registration descriptors
+ *
+ * An entry is available for allocation if and only if it occurs in @free_list.
+ *
+ * @size: Number of descriptors in this pool.
+ * @max_page_list_len: Maximum fast registration work request page list length.
+ * @lock: Protects free_list.
+ * @free_list: List of free descriptors.
+ * @desc: Fast registration descriptor pool.
+ */
+struct srp_fr_pool {
+ int size;
+ int max_page_list_len;
+ spinlock_t lock;
+ struct list_head free_list;
+ struct srp_fr_desc desc[0];
+};
+
+/**
+ * struct srp_map_state - per-request DMA memory mapping state
+ * @desc: Pointer to the element of the SRP buffer descriptor array
+ * that is being filled in.
+ * @pages: Array with DMA addresses of pages being considered for
+ * memory registration.
+ * @base_dma_addr: DMA address of the first page that has not yet been mapped.
+ * @dma_len: Number of bytes that will be registered with the next
+ * FMR or FR memory registration call.
+ * @total_len: Total number of bytes in the sg-list being mapped.
+ * @npages: Number of page addresses in the pages[] array.
+ * @nmdesc: Number of FMR or FR memory descriptors used for mapping.
+ * @ndesc: Number of SRP buffer descriptors that have been filled in.
+ * @unmapped_sg: First element of the sg-list that is mapped via FMR or FR.
+ * @unmapped_index: Index of the first element mapped via FMR or FR.
+ * @unmapped_addr: DMA address of the first element mapped via FMR or FR.
+ */
struct srp_map_state {
- struct ib_pool_fmr **next_fmr;
+ union {
+ struct ib_pool_fmr **next_fmr;
+ struct srp_fr_desc **next_fr;
+ };
struct srp_direct_buf *desc;
u64 *pages;
dma_addr_t base_dma_addr;
- u32 fmr_len;
+ u32 dma_len;
u32 total_len;
unsigned int npages;
- unsigned int nfmr;
+ unsigned int nmdesc;
unsigned int ndesc;
struct scatterlist *unmapped_sg;
int unmapped_index;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index ce953d895f5b..fd325ec9f064 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -629,12 +629,10 @@ static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
return copy_to_user(p, str, len) ? -EFAULT : len;
}
-#define OLD_KEY_MAX 0x1ff
static int handle_eviocgbit(struct input_dev *dev,
unsigned int type, unsigned int size,
void __user *p, int compat_mode)
{
- static unsigned long keymax_warn_time;
unsigned long *bits;
int len;
@@ -652,24 +650,8 @@ static int handle_eviocgbit(struct input_dev *dev,
default: return -EINVAL;
}
- /*
- * Work around bugs in userspace programs that like to do
- * EVIOCGBIT(EV_KEY, KEY_MAX) and not realize that 'len'
- * should be in bytes, not in bits.
- */
- if (type == EV_KEY && size == OLD_KEY_MAX) {
- len = OLD_KEY_MAX;
- if (printk_timed_ratelimit(&keymax_warn_time, 10 * 1000))
- pr_warning("(EVIOCGBIT): Suspicious buffer size %u, "
- "limiting output to %zu bytes. See "
- "http://userweb.kernel.org/~dtor/eviocgbit-bug.html\n",
- OLD_KEY_MAX,
- BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long));
- }
-
return bits_to_user(bits, len, size, p, compat_mode);
}
-#undef OLD_KEY_MAX
static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p)
{
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 7f161d93203c..3664f81655ca 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -147,6 +147,11 @@ static struct attribute_group input_polldev_attribute_group = {
.attrs = sysfs_attrs
};
+static const struct attribute_group *input_polldev_attribute_groups[] = {
+ &input_polldev_attribute_group,
+ NULL
+};
+
/**
* input_allocate_polled_device - allocate memory for polled device
*
@@ -171,6 +176,91 @@ struct input_polled_dev *input_allocate_polled_device(void)
}
EXPORT_SYMBOL(input_allocate_polled_device);
+struct input_polled_devres {
+ struct input_polled_dev *polldev;
+};
+
+static int devm_input_polldev_match(struct device *dev, void *res, void *data)
+{
+ struct input_polled_devres *devres = res;
+
+ return devres->polldev == data;
+}
+
+static void devm_input_polldev_release(struct device *dev, void *res)
+{
+ struct input_polled_devres *devres = res;
+ struct input_polled_dev *polldev = devres->polldev;
+
+ dev_dbg(dev, "%s: dropping reference/freeing %s\n",
+ __func__, dev_name(&polldev->input->dev));
+
+ input_put_device(polldev->input);
+ kfree(polldev);
+}
+
+static void devm_input_polldev_unregister(struct device *dev, void *res)
+{
+ struct input_polled_devres *devres = res;
+ struct input_polled_dev *polldev = devres->polldev;
+
+ dev_dbg(dev, "%s: unregistering device %s\n",
+ __func__, dev_name(&polldev->input->dev));
+ input_unregister_device(polldev->input);
+
+ /*
+ * Note that we are still holding extra reference to the input
+ * device so it will stick around until devm_input_polldev_release()
+ * is called.
+ */
+}
+
+/**
+ * devm_input_allocate_polled_device - allocate managed polled device
+ * @dev: device owning the polled device being created
+ *
+ * Returns prepared &struct input_polled_dev or %NULL.
+ *
+ * Managed polled input devices do not need to be explicitly unregistered
+ * or freed as it will be done automatically when owner device unbinds
+ * from * its driver (or binding fails). Once such managed polled device
+ * is allocated, it is ready to be set up and registered in the same
+ * fashion as regular polled input devices (using
+ * input_register_polled_device() function).
+ *
+ * If you want to manually unregister and free such managed polled devices,
+ * it can be still done by calling input_unregister_polled_device() and
+ * input_free_polled_device(), although it is rarely needed.
+ *
+ * NOTE: the owner device is set up as parent of input device and users
+ * should not override it.
+ */
+struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev)
+{
+ struct input_polled_dev *polldev;
+ struct input_polled_devres *devres;
+
+ devres = devres_alloc(devm_input_polldev_release, sizeof(*devres),
+ GFP_KERNEL);
+ if (!devres)
+ return NULL;
+
+ polldev = input_allocate_polled_device();
+ if (!polldev) {
+ devres_free(devres);
+ return NULL;
+ }
+
+ polldev->input->dev.parent = dev;
+ polldev->devres_managed = true;
+
+ devres->polldev = polldev;
+ devres_add(dev, devres);
+
+ return polldev;
+}
+EXPORT_SYMBOL(devm_input_allocate_polled_device);
+
/**
* input_free_polled_device - free memory allocated for polled device
* @dev: device to free
@@ -181,7 +271,12 @@ EXPORT_SYMBOL(input_allocate_polled_device);
void input_free_polled_device(struct input_polled_dev *dev)
{
if (dev) {
- input_free_device(dev->input);
+ if (dev->devres_managed)
+ WARN_ON(devres_destroy(dev->input->dev.parent,
+ devm_input_polldev_release,
+ devm_input_polldev_match,
+ dev));
+ input_put_device(dev->input);
kfree(dev);
}
}
@@ -199,26 +294,35 @@ EXPORT_SYMBOL(input_free_polled_device);
*/
int input_register_polled_device(struct input_polled_dev *dev)
{
+ struct input_polled_devres *devres = NULL;
struct input_dev *input = dev->input;
int error;
+ if (dev->devres_managed) {
+ devres = devres_alloc(devm_input_polldev_unregister,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ devres->polldev = dev;
+ }
+
input_set_drvdata(input, dev);
INIT_DELAYED_WORK(&dev->work, input_polled_device_work);
+
if (!dev->poll_interval)
dev->poll_interval = 500;
if (!dev->poll_interval_max)
dev->poll_interval_max = dev->poll_interval;
+
input->open = input_open_polled_device;
input->close = input_close_polled_device;
- error = input_register_device(input);
- if (error)
- return error;
+ input->dev.groups = input_polldev_attribute_groups;
- error = sysfs_create_group(&input->dev.kobj,
- &input_polldev_attribute_group);
+ error = input_register_device(input);
if (error) {
- input_unregister_device(input);
+ devres_free(devres);
return error;
}
@@ -231,6 +335,12 @@ int input_register_polled_device(struct input_polled_dev *dev)
*/
input_get_device(input);
+ if (dev->devres_managed) {
+ dev_dbg(input->dev.parent, "%s: registering %s with devres.\n",
+ __func__, dev_name(&input->dev));
+ devres_add(input->dev.parent, devres);
+ }
+
return 0;
}
EXPORT_SYMBOL(input_register_polled_device);
@@ -245,8 +355,11 @@ EXPORT_SYMBOL(input_register_polled_device);
*/
void input_unregister_polled_device(struct input_polled_dev *dev)
{
- sysfs_remove_group(&dev->input->dev.kobj,
- &input_polldev_attribute_group);
+ if (dev->devres_managed)
+ WARN_ON(devres_destroy(dev->input->dev.parent,
+ devm_input_polldev_unregister,
+ devm_input_polldev_match,
+ dev));
input_unregister_device(dev->input);
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index ffc7ad3a2c88..f7e79b481349 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -524,6 +524,17 @@ config KEYBOARD_STOWAWAY
To compile this driver as a module, choose M here: the
module will be called stowaway.
+config KEYBOARD_ST_KEYSCAN
+ tristate "STMicroelectronics keyscan support"
+ depends on ARCH_STI || COMPILE_TEST
+ select INPUT_MATRIXKMAP
+ help
+ Say Y here if you want to use a keypad attached to the keyscan block
+ on some STMicroelectronics SoC devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called st-keyscan.
+
config KEYBOARD_SUNKBD
tristate "Sun Type 4 and Type 5 keyboard"
select SERIO
@@ -578,7 +589,7 @@ config KEYBOARD_OMAP
config KEYBOARD_OMAP4
tristate "TI OMAP4+ keypad support"
- depends on ARCH_OMAP2PLUS
+ depends on OF || ARCH_OMAP2PLUS
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the OMAP4+ keypad.
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 11cff7b84b47..7504ae19049d 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
obj-$(CONFIG_KEYBOARD_SPEAR) += spear-keyboard.o
obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
+obj-$(CONFIG_KEYBOARD_ST_KEYSCAN) += st-keyscan.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
obj-$(CONFIG_KEYBOARD_TC3589X) += tc3589x-keypad.o
obj-$(CONFIG_KEYBOARD_TEGRA) += tegra-kbc.o
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index 4cc14c2fa7d5..7f4a8b58efc1 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -12,6 +12,7 @@
#include <linux/input.h>
#include <linux/mfd/adp5520.h>
#include <linux/slab.h>
+#include <linux/device.h>
struct adp5520_keys {
struct input_dev *input;
@@ -81,7 +82,7 @@ static int adp5520_keys_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (pdata == NULL) {
+ if (!pdata) {
dev_err(&pdev->dev, "missing platform data\n");
return -EINVAL;
}
@@ -89,17 +90,15 @@ static int adp5520_keys_probe(struct platform_device *pdev)
if (!(pdata->rows_en_mask && pdata->cols_en_mask))
return -EINVAL;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
dev_err(&pdev->dev, "failed to alloc memory\n");
return -ENOMEM;
}
- input = input_allocate_device();
- if (!input) {
- ret = -ENOMEM;
- goto err;
- }
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
dev->master = pdev->dev.parent;
dev->input = input;
@@ -135,7 +134,7 @@ static int adp5520_keys_probe(struct platform_device *pdev)
ret = input_register_device(input);
if (ret) {
dev_err(&pdev->dev, "unable to register input device\n");
- goto err;
+ return ret;
}
en_mask = pdata->rows_en_mask | pdata->cols_en_mask;
@@ -157,8 +156,7 @@ static int adp5520_keys_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to write\n");
- ret = -EIO;
- goto err1;
+ return -EIO;
}
dev->notifier.notifier_call = adp5520_keys_notifier;
@@ -166,19 +164,11 @@ static int adp5520_keys_probe(struct platform_device *pdev)
ADP5520_KP_IEN | ADP5520_KR_IEN);
if (ret) {
dev_err(&pdev->dev, "failed to register notifier\n");
- goto err1;
+ return ret;
}
platform_set_drvdata(pdev, dev);
return 0;
-
-err1:
- input_unregister_device(input);
- input = NULL;
-err:
- input_free_device(input);
- kfree(dev);
- return ret;
}
static int adp5520_keys_remove(struct platform_device *pdev)
@@ -188,8 +178,6 @@ static int adp5520_keys_remove(struct platform_device *pdev)
adp5520_unregister_notifier(dev->master, &dev->notifier,
ADP5520_KP_IEN | ADP5520_KR_IEN);
- input_unregister_device(dev->input);
- kfree(dev);
return 0;
}
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
index 3955aecee44b..552b65c6e6b0 100644
--- a/drivers/input/keyboard/clps711x-keypad.c
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -185,7 +185,7 @@ static int clps711x_keypad_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id clps711x_keypad_of_match[] = {
+static const struct of_device_id clps711x_keypad_of_match[] = {
{ .compatible = "cirrus,clps711x-keypad", },
{ }
};
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 2db13246eb8e..8c98e97f8e41 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -424,6 +424,16 @@ out:
return IRQ_HANDLED;
}
+static void gpio_keys_quiesce_key(void *data)
+{
+ struct gpio_button_data *bdata = data;
+
+ if (bdata->timer_debounce)
+ del_timer_sync(&bdata->timer);
+
+ cancel_work_sync(&bdata->work);
+}
+
static int gpio_keys_setup_key(struct platform_device *pdev,
struct input_dev *input,
struct gpio_button_data *bdata,
@@ -433,7 +443,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
struct device *dev = &pdev->dev;
irq_handler_t isr;
unsigned long irqflags;
- int irq, error;
+ int irq;
+ int error;
bdata->input = input;
bdata->button = button;
@@ -441,7 +452,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
if (gpio_is_valid(button->gpio)) {
- error = gpio_request_one(button->gpio, GPIOF_IN, desc);
+ error = devm_gpio_request_one(&pdev->dev, button->gpio,
+ GPIOF_IN, desc);
if (error < 0) {
dev_err(dev, "Failed to request GPIO %d, error %d\n",
button->gpio, error);
@@ -463,7 +475,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
dev_err(dev,
"Unable to get irq number for GPIO %d, error %d\n",
button->gpio, error);
- goto fail;
+ return error;
}
bdata->irq = irq;
@@ -497,26 +509,33 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
input_set_capability(input, button->type ?: EV_KEY, button->code);
/*
+ * Install custom action to cancel debounce timer and
+ * workqueue item.
+ */
+ error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to register quiesce action, error: %d\n",
+ error);
+ return error;
+ }
+
+ /*
* If platform has specified that the button can be disabled,
* we don't want it to share the interrupt line.
*/
if (!button->can_disable)
irqflags |= IRQF_SHARED;
- error = request_any_context_irq(bdata->irq, isr, irqflags, desc, bdata);
+ error = devm_request_any_context_irq(&pdev->dev, bdata->irq,
+ isr, irqflags, desc, bdata);
if (error < 0) {
dev_err(dev, "Unable to claim irq %d; error %d\n",
bdata->irq, error);
- goto fail;
+ return error;
}
return 0;
-
-fail:
- if (gpio_is_valid(button->gpio))
- gpio_free(button->gpio);
-
- return error;
}
static void gpio_keys_report_state(struct gpio_keys_drvdata *ddata)
@@ -578,23 +597,18 @@ gpio_keys_get_devtree_pdata(struct device *dev)
int i;
node = dev->of_node;
- if (!node) {
- error = -ENODEV;
- goto err_out;
- }
+ if (!node)
+ return ERR_PTR(-ENODEV);
nbuttons = of_get_child_count(node);
- if (nbuttons == 0) {
- error = -ENODEV;
- goto err_out;
- }
+ if (nbuttons == 0)
+ return ERR_PTR(-ENODEV);
- pdata = kzalloc(sizeof(*pdata) + nbuttons * (sizeof *button),
- GFP_KERNEL);
- if (!pdata) {
- error = -ENOMEM;
- goto err_out;
- }
+ pdata = devm_kzalloc(dev,
+ sizeof(*pdata) + nbuttons * sizeof(*button),
+ GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
pdata->nbuttons = nbuttons;
@@ -619,7 +633,7 @@ gpio_keys_get_devtree_pdata(struct device *dev)
dev_err(dev,
"Failed to get gpio flags, error: %d\n",
error);
- goto err_free_pdata;
+ return ERR_PTR(error);
}
button = &pdata->buttons[i++];
@@ -630,8 +644,7 @@ gpio_keys_get_devtree_pdata(struct device *dev)
if (of_property_read_u32(pp, "linux,code", &button->code)) {
dev_err(dev, "Button without keycode: 0x%x\n",
button->gpio);
- error = -EINVAL;
- goto err_free_pdata;
+ return ERR_PTR(-EINVAL);
}
button->desc = of_get_property(pp, "label", NULL);
@@ -646,20 +659,13 @@ gpio_keys_get_devtree_pdata(struct device *dev)
button->debounce_interval = 5;
}
- if (pdata->nbuttons == 0) {
- error = -EINVAL;
- goto err_free_pdata;
- }
+ if (pdata->nbuttons == 0)
+ return ERR_PTR(-EINVAL);
return pdata;
-
-err_free_pdata:
- kfree(pdata);
-err_out:
- return ERR_PTR(error);
}
-static struct of_device_id gpio_keys_of_match[] = {
+static const struct of_device_id gpio_keys_of_match[] = {
{ .compatible = "gpio-keys", },
{ },
};
@@ -675,22 +681,13 @@ gpio_keys_get_devtree_pdata(struct device *dev)
#endif
-static void gpio_remove_key(struct gpio_button_data *bdata)
-{
- free_irq(bdata->irq, bdata);
- if (bdata->timer_debounce)
- del_timer_sync(&bdata->timer);
- cancel_work_sync(&bdata->work);
- if (gpio_is_valid(bdata->button->gpio))
- gpio_free(bdata->button->gpio);
-}
-
static int gpio_keys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
struct gpio_keys_drvdata *ddata;
struct input_dev *input;
+ size_t size;
int i, error;
int wakeup = 0;
@@ -700,14 +697,18 @@ static int gpio_keys_probe(struct platform_device *pdev)
return PTR_ERR(pdata);
}
- ddata = kzalloc(sizeof(struct gpio_keys_drvdata) +
- pdata->nbuttons * sizeof(struct gpio_button_data),
- GFP_KERNEL);
- input = input_allocate_device();
- if (!ddata || !input) {
+ size = sizeof(struct gpio_keys_drvdata) +
+ pdata->nbuttons * sizeof(struct gpio_button_data);
+ ddata = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!ddata) {
dev_err(dev, "failed to allocate state\n");
- error = -ENOMEM;
- goto fail1;
+ return -ENOMEM;
+ }
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "failed to allocate input device\n");
+ return -ENOMEM;
}
ddata->pdata = pdata;
@@ -738,7 +739,7 @@ static int gpio_keys_probe(struct platform_device *pdev)
error = gpio_keys_setup_key(pdev, input, bdata, button);
if (error)
- goto fail2;
+ return error;
if (button->wakeup)
wakeup = 1;
@@ -748,57 +749,31 @@ static int gpio_keys_probe(struct platform_device *pdev)
if (error) {
dev_err(dev, "Unable to export keys/switches, error: %d\n",
error);
- goto fail2;
+ return error;
}
error = input_register_device(input);
if (error) {
dev_err(dev, "Unable to register input device, error: %d\n",
error);
- goto fail3;
+ goto err_remove_group;
}
device_init_wakeup(&pdev->dev, wakeup);
return 0;
- fail3:
+err_remove_group:
sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group);
- fail2:
- while (--i >= 0)
- gpio_remove_key(&ddata->data[i]);
-
- fail1:
- input_free_device(input);
- kfree(ddata);
- /* If we have no platform data, we allocated pdata dynamically. */
- if (!dev_get_platdata(&pdev->dev))
- kfree(pdata);
-
return error;
}
static int gpio_keys_remove(struct platform_device *pdev)
{
- struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
- struct input_dev *input = ddata->input;
- int i;
-
sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group);
device_init_wakeup(&pdev->dev, 0);
- for (i = 0; i < ddata->pdata->nbuttons; i++)
- gpio_remove_key(&ddata->data[i]);
-
- input_unregister_device(input);
-
- /* If we have no platform data, we allocated pdata dynamically. */
- if (!dev_get_platdata(&pdev->dev))
- kfree(ddata->pdata);
-
- kfree(ddata);
-
return 0;
}
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index e571e194ff84..432d36395f35 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -120,12 +120,10 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
if (nbuttons == 0)
return NULL;
- pdata = kzalloc(sizeof(*pdata) + nbuttons * (sizeof *button),
- GFP_KERNEL);
- if (!pdata) {
- error = -ENOMEM;
- goto err_out;
- }
+ pdata = devm_kzalloc(dev, sizeof(*pdata) + nbuttons * sizeof(*button),
+ GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
pdata->nbuttons = nbuttons;
@@ -151,7 +149,7 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
dev_err(dev,
"Failed to get gpio flags, error: %d\n",
error);
- goto err_free_pdata;
+ return ERR_PTR(error);
}
button = &pdata->buttons[i++];
@@ -162,8 +160,7 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
if (of_property_read_u32(pp, "linux,code", &button->code)) {
dev_err(dev, "Button without keycode: 0x%x\n",
button->gpio);
- error = -EINVAL;
- goto err_free_pdata;
+ return ERR_PTR(-EINVAL);
}
button->desc = of_get_property(pp, "label", NULL);
@@ -178,20 +175,13 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
button->debounce_interval = 5;
}
- if (pdata->nbuttons == 0) {
- error = -EINVAL;
- goto err_free_pdata;
- }
+ if (pdata->nbuttons == 0)
+ return ERR_PTR(-EINVAL);
return pdata;
-
-err_free_pdata:
- kfree(pdata);
-err_out:
- return ERR_PTR(error);
}
-static struct of_device_id gpio_keys_polled_of_match[] = {
+static const struct of_device_id gpio_keys_polled_of_match[] = {
{ .compatible = "gpio-keys-polled", },
{ },
};
@@ -213,6 +203,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
struct gpio_keys_polled_dev *bdev;
struct input_polled_dev *poll_dev;
struct input_dev *input;
+ size_t size;
int error;
int i;
@@ -228,24 +219,21 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
if (!pdata->poll_interval) {
dev_err(dev, "missing poll_interval value\n");
- error = -EINVAL;
- goto err_free_pdata;
+ return -EINVAL;
}
- bdev = kzalloc(sizeof(struct gpio_keys_polled_dev) +
- pdata->nbuttons * sizeof(struct gpio_keys_button_data),
- GFP_KERNEL);
+ size = sizeof(struct gpio_keys_polled_dev) +
+ pdata->nbuttons * sizeof(struct gpio_keys_button_data);
+ bdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!bdev) {
dev_err(dev, "no memory for private data\n");
- error = -ENOMEM;
- goto err_free_pdata;
+ return -ENOMEM;
}
- poll_dev = input_allocate_polled_device();
+ poll_dev = devm_input_allocate_polled_device(&pdev->dev);
if (!poll_dev) {
dev_err(dev, "no memory for polled device\n");
- error = -ENOMEM;
- goto err_free_bdev;
+ return -ENOMEM;
}
poll_dev->private = bdev;
@@ -258,7 +246,6 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
input->name = pdev->name;
input->phys = DRV_NAME"/input0";
- input->dev.parent = &pdev->dev;
input->id.bustype = BUS_HOST;
input->id.vendor = 0x0001;
@@ -277,16 +264,15 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
if (button->wakeup) {
dev_err(dev, DRV_NAME " does not support wakeup\n");
- error = -EINVAL;
- goto err_free_gpio;
+ return -EINVAL;
}
- error = gpio_request_one(gpio, GPIOF_IN,
- button->desc ?: DRV_NAME);
+ error = devm_gpio_request_one(&pdev->dev, gpio, GPIOF_IN,
+ button->desc ? : DRV_NAME);
if (error) {
dev_err(dev, "unable to claim gpio %u, err=%d\n",
gpio, error);
- goto err_free_gpio;
+ return error;
}
bdata->can_sleep = gpio_cansleep(gpio);
@@ -306,7 +292,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
if (error) {
dev_err(dev, "unable to register polled device, err=%d\n",
error);
- goto err_free_gpio;
+ return error;
}
/* report initial state of the buttons */
@@ -315,52 +301,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
&bdev->data[i]);
return 0;
-
-err_free_gpio:
- while (--i >= 0)
- gpio_free(pdata->buttons[i].gpio);
-
- input_free_polled_device(poll_dev);
-
-err_free_bdev:
- kfree(bdev);
-
-err_free_pdata:
- /* If we have no platform_data, we allocated pdata dynamically. */
- if (!dev_get_platdata(&pdev->dev))
- kfree(pdata);
-
- return error;
-}
-
-static int gpio_keys_polled_remove(struct platform_device *pdev)
-{
- struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev);
- const struct gpio_keys_platform_data *pdata = bdev->pdata;
- int i;
-
- input_unregister_polled_device(bdev->poll_dev);
-
- for (i = 0; i < pdata->nbuttons; i++)
- gpio_free(pdata->buttons[i].gpio);
-
- input_free_polled_device(bdev->poll_dev);
-
- /*
- * If we had no platform_data, we allocated pdata dynamically and
- * must free it here.
- */
- if (!dev_get_platdata(&pdev->dev))
- kfree(pdata);
-
- kfree(bdev);
-
- return 0;
}
static struct platform_driver gpio_keys_polled_driver = {
.probe = gpio_keys_polled_probe,
- .remove = gpio_keys_polled_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 97ec33572e56..8280cb16260b 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -415,7 +415,7 @@ open_err:
}
#ifdef CONFIG_OF
-static struct of_device_id imx_keypad_of_match[] = {
+static const struct of_device_id imx_keypad_of_match[] = {
{ .compatible = "fsl,imx21-kpp", },
{ /* sentinel */ }
};
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 69b1f002ff52..0ba4428da24a 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -16,6 +16,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/device.h>
#include <linux/input.h>
#include <linux/input-polldev.h>
#include <linux/interrupt.h>
@@ -185,14 +186,15 @@ static int jornada680kbd_probe(struct platform_device *pdev)
struct input_dev *input_dev;
int i, error;
- jornadakbd = kzalloc(sizeof(struct jornadakbd), GFP_KERNEL);
+ jornadakbd = devm_kzalloc(&pdev->dev, sizeof(struct jornadakbd),
+ GFP_KERNEL);
if (!jornadakbd)
return -ENOMEM;
- poll_dev = input_allocate_polled_device();
+ poll_dev = devm_input_allocate_polled_device(&pdev->dev);
if (!poll_dev) {
- error = -ENOMEM;
- goto failed;
+ dev_err(&pdev->dev, "failed to allocate polled input device\n");
+ return -ENOMEM;
}
platform_set_drvdata(pdev, jornadakbd);
@@ -224,27 +226,10 @@ static int jornada680kbd_probe(struct platform_device *pdev)
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
error = input_register_polled_device(jornadakbd->poll_dev);
- if (error)
- goto failed;
-
- return 0;
-
- failed:
- printk(KERN_ERR "Jornadakbd: failed to register driver, error: %d\n",
- error);
- input_free_polled_device(poll_dev);
- kfree(jornadakbd);
- return error;
-
-}
-
-static int jornada680kbd_remove(struct platform_device *pdev)
-{
- struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
-
- input_unregister_polled_device(jornadakbd->poll_dev);
- input_free_polled_device(jornadakbd->poll_dev);
- kfree(jornadakbd);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register polled input device\n");
+ return error;
+ }
return 0;
}
@@ -255,7 +240,6 @@ static struct platform_driver jornada680kbd_driver = {
.owner = THIS_MODULE,
},
.probe = jornada680kbd_probe,
- .remove = jornada680kbd_remove,
};
module_platform_driver(jornada680kbd_driver);
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 1da8e0b44b56..375b05ca8e2a 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -147,7 +147,7 @@ static int mcs_touchkey_probe(struct i2c_client *client,
}
dev_info(&client->dev, "Firmware version: %d\n", fw_ver);
- input_dev->name = "MELPAS MCS Touchkey";
+ input_dev->name = "MELFAS MCS Touchkey";
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY);
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 0400b3f2b4b9..024b7bdffe5b 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -28,11 +28,10 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#include <linux/platform_data/omap4-keypad.h>
-
/* OMAP4 registers */
#define OMAP4_KBD_REVISION 0x00
#define OMAP4_KBD_SYSCONFIG 0x10
@@ -218,7 +217,6 @@ static void omap4_keypad_close(struct input_dev *input)
pm_runtime_put_sync(input->dev.parent);
}
-#ifdef CONFIG_OF
static int omap4_keypad_parse_dt(struct device *dev,
struct omap4_keypad *keypad_data)
{
@@ -235,20 +233,9 @@ static int omap4_keypad_parse_dt(struct device *dev,
return 0;
}
-#else
-static inline int omap4_keypad_parse_dt(struct device *dev,
- struct omap4_keypad *keypad_data)
-{
- return -ENOSYS;
-}
-#endif
static int omap4_keypad_probe(struct platform_device *pdev)
{
- const struct omap4_keypad_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
- const struct matrix_keymap_data *keymap_data =
- pdata ? pdata->keymap_data : NULL;
struct omap4_keypad *keypad_data;
struct input_dev *input_dev;
struct resource *res;
@@ -277,14 +264,9 @@ static int omap4_keypad_probe(struct platform_device *pdev)
keypad_data->irq = irq;
- if (pdata) {
- keypad_data->rows = pdata->rows;
- keypad_data->cols = pdata->cols;
- } else {
- error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
- if (error)
- return error;
- }
+ error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
+ if (error)
+ return error;
res = request_mem_region(res->start, resource_size(res), pdev->name);
if (!res) {
@@ -363,7 +345,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
goto err_free_input;
}
- error = matrix_keypad_build_keymap(keymap_data, NULL,
+ error = matrix_keypad_build_keymap(NULL, NULL,
keypad_data->rows, keypad_data->cols,
keypad_data->keymap, input_dev);
if (error) {
@@ -434,13 +416,11 @@ static int omap4_keypad_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id omap_keypad_dt_match[] = {
{ .compatible = "ti,omap4-keypad" },
{},
};
MODULE_DEVICE_TABLE(of, omap_keypad_dt_match);
-#endif
#ifdef CONFIG_PM_SLEEP
static int omap4_keypad_suspend(struct device *dev)
@@ -482,7 +462,7 @@ static struct platform_driver omap4_keypad_driver = {
.name = "omap4-keypad",
.owner = THIS_MODULE,
.pm = &omap4_keypad_pm_ops,
- .of_match_table = of_match_ptr(omap_keypad_dt_match),
+ .of_match_table = omap_keypad_dt_match,
},
};
module_platform_driver(omap4_keypad_driver);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
new file mode 100644
index 000000000000..758b48731415
--- /dev/null
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -0,0 +1,274 @@
+/*
+ * STMicroelectronics Key Scanning driver
+ *
+ * Copyright (c) 2014 STMicroelectonics Ltd.
+ * Author: Stuart Menefy <stuart.menefy@st.com>
+ *
+ * Based on sh_keysc.c, copyright 2008 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/input/matrix_keypad.h>
+
+#define ST_KEYSCAN_MAXKEYS 16
+
+#define KEYSCAN_CONFIG_OFF 0x0
+#define KEYSCAN_CONFIG_ENABLE 0x1
+#define KEYSCAN_DEBOUNCE_TIME_OFF 0x4
+#define KEYSCAN_MATRIX_STATE_OFF 0x8
+#define KEYSCAN_MATRIX_DIM_OFF 0xc
+#define KEYSCAN_MATRIX_DIM_X_SHIFT 0x0
+#define KEYSCAN_MATRIX_DIM_Y_SHIFT 0x2
+
+struct st_keyscan {
+ void __iomem *base;
+ int irq;
+ struct clk *clk;
+ struct input_dev *input_dev;
+ unsigned long last_state;
+ unsigned int n_rows;
+ unsigned int n_cols;
+ unsigned int debounce_us;
+};
+
+static irqreturn_t keyscan_isr(int irq, void *dev_id)
+{
+ struct st_keyscan *keypad = dev_id;
+ unsigned short *keycode = keypad->input_dev->keycode;
+ unsigned long state, change;
+ int bit_nr;
+
+ state = readl(keypad->base + KEYSCAN_MATRIX_STATE_OFF) & 0xffff;
+ change = keypad->last_state ^ state;
+ keypad->last_state = state;
+
+ for_each_set_bit(bit_nr, &change, BITS_PER_LONG)
+ input_report_key(keypad->input_dev,
+ keycode[bit_nr], state & BIT(bit_nr));
+
+ input_sync(keypad->input_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int keyscan_start(struct st_keyscan *keypad)
+{
+ int error;
+
+ error = clk_enable(keypad->clk);
+ if (error)
+ return error;
+
+ writel(keypad->debounce_us * (clk_get_rate(keypad->clk) / 1000000),
+ keypad->base + KEYSCAN_DEBOUNCE_TIME_OFF);
+
+ writel(((keypad->n_cols - 1) << KEYSCAN_MATRIX_DIM_X_SHIFT) |
+ ((keypad->n_rows - 1) << KEYSCAN_MATRIX_DIM_Y_SHIFT),
+ keypad->base + KEYSCAN_MATRIX_DIM_OFF);
+
+ writel(KEYSCAN_CONFIG_ENABLE, keypad->base + KEYSCAN_CONFIG_OFF);
+
+ return 0;
+}
+
+static void keyscan_stop(struct st_keyscan *keypad)
+{
+ writel(0, keypad->base + KEYSCAN_CONFIG_OFF);
+
+ clk_disable(keypad->clk);
+}
+
+static int keyscan_open(struct input_dev *dev)
+{
+ struct st_keyscan *keypad = input_get_drvdata(dev);
+
+ return keyscan_start(keypad);
+}
+
+static void keyscan_close(struct input_dev *dev)
+{
+ struct st_keyscan *keypad = input_get_drvdata(dev);
+
+ keyscan_stop(keypad);
+}
+
+static int keypad_matrix_key_parse_dt(struct st_keyscan *keypad_data)
+{
+ struct device *dev = keypad_data->input_dev->dev.parent;
+ struct device_node *np = dev->of_node;
+ int error;
+
+ error = matrix_keypad_parse_of_params(dev, &keypad_data->n_rows,
+ &keypad_data->n_cols);
+ if (error) {
+ dev_err(dev, "failed to parse keypad params\n");
+ return error;
+ }
+
+ of_property_read_u32(np, "st,debounce-us", &keypad_data->debounce_us);
+
+ dev_dbg(dev, "n_rows=%d n_col=%d debounce=%d\n",
+ keypad_data->n_rows, keypad_data->n_cols,
+ keypad_data->debounce_us);
+
+ return 0;
+}
+
+static int keyscan_probe(struct platform_device *pdev)
+{
+ struct st_keyscan *keypad_data;
+ struct input_dev *input_dev;
+ struct resource *res;
+ int error;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "no DT data present\n");
+ return -EINVAL;
+ }
+
+ keypad_data = devm_kzalloc(&pdev->dev, sizeof(*keypad_data),
+ GFP_KERNEL);
+ if (!keypad_data)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev) {
+ dev_err(&pdev->dev, "failed to allocate the input device\n");
+ return -ENOMEM;
+ }
+
+ input_dev->name = pdev->name;
+ input_dev->phys = "keyscan-keys/input0";
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->open = keyscan_open;
+ input_dev->close = keyscan_close;
+
+ input_dev->id.bustype = BUS_HOST;
+
+ error = keypad_matrix_key_parse_dt(keypad_data);
+ if (error)
+ return error;
+
+ error = matrix_keypad_build_keymap(NULL, NULL,
+ keypad_data->n_rows,
+ keypad_data->n_cols,
+ NULL, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ return error;
+ }
+
+ input_set_drvdata(input_dev, keypad_data);
+
+ keypad_data->input_dev = input_dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(keypad_data->base))
+ return PTR_ERR(keypad_data->base);
+
+ keypad_data->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(keypad_data->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(keypad_data->clk);
+ }
+
+ error = clk_enable(keypad_data->clk);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return error;
+ }
+
+ keyscan_stop(keypad_data);
+
+ keypad_data->irq = platform_get_irq(pdev, 0);
+ if (keypad_data->irq < 0) {
+ dev_err(&pdev->dev, "no IRQ specified\n");
+ return -EINVAL;
+ }
+
+ error = devm_request_irq(&pdev->dev, keypad_data->irq, keyscan_isr, 0,
+ pdev->name, keypad_data);
+ if (error) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ return error;
+ }
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ return error;
+ }
+
+ platform_set_drvdata(pdev, keypad_data);
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ return 0;
+}
+
+static int keyscan_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct st_keyscan *keypad = platform_get_drvdata(pdev);
+ struct input_dev *input = keypad->input_dev;
+
+ mutex_lock(&input->mutex);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(keypad->irq);
+ else if (input->users)
+ keyscan_stop(keypad);
+
+ mutex_unlock(&input->mutex);
+ return 0;
+}
+
+static int keyscan_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct st_keyscan *keypad = platform_get_drvdata(pdev);
+ struct input_dev *input = keypad->input_dev;
+ int retval = 0;
+
+ mutex_lock(&input->mutex);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(keypad->irq);
+ else if (input->users)
+ retval = keyscan_start(keypad);
+
+ mutex_unlock(&input->mutex);
+ return retval;
+}
+
+static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
+
+static const struct of_device_id keyscan_of_match[] = {
+ { .compatible = "st,sti-keyscan" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, keyscan_of_match);
+
+static struct platform_driver keyscan_device_driver = {
+ .probe = keyscan_probe,
+ .driver = {
+ .name = "st-keyscan",
+ .pm = &keyscan_dev_pm_ops,
+ .of_match_table = of_match_ptr(keyscan_of_match),
+ }
+};
+
+module_platform_driver(keyscan_device_driver);
+
+MODULE_AUTHOR("Stuart Menefy <stuart.menefy@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics keyscan device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 74494a357522..ad7abae69078 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -296,6 +296,65 @@ static void tc3589x_keypad_close(struct input_dev *input)
tc3589x_keypad_disable(keypad);
}
+#ifdef CONFIG_OF
+static const struct tc3589x_keypad_platform_data *
+tc3589x_keypad_of_probe(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct tc3589x_keypad_platform_data *plat;
+ u32 cols, rows;
+ u32 debounce_ms;
+ int proplen;
+
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return ERR_PTR(-ENOMEM);
+
+ of_property_read_u32(np, "keypad,num-columns", &cols);
+ of_property_read_u32(np, "keypad,num-rows", &rows);
+ plat->kcol = (u8) cols;
+ plat->krow = (u8) rows;
+ if (!plat->krow || !plat->kcol ||
+ plat->krow > TC_KPD_ROWS || plat->kcol > TC_KPD_COLUMNS) {
+ dev_err(dev,
+ "keypad columns/rows not properly specified (%ux%u)\n",
+ plat->kcol, plat->krow);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!of_get_property(np, "linux,keymap", &proplen)) {
+ dev_err(dev, "property linux,keymap not found\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ plat->no_autorepeat = of_property_read_bool(np, "linux,no-autorepeat");
+ plat->enable_wakeup = of_property_read_bool(np, "linux,wakeup");
+
+ /* The custom delay format is ms/16 */
+ of_property_read_u32(np, "debounce-delay-ms", &debounce_ms);
+ if (debounce_ms)
+ plat->debounce_period = debounce_ms * 16;
+ else
+ plat->debounce_period = TC_KPD_DEBOUNCE_PERIOD;
+
+ plat->settle_time = TC_KPD_SETTLE_TIME;
+ /* FIXME: should be property of the IRQ resource? */
+ plat->irqtype = IRQF_TRIGGER_FALLING;
+
+ return plat;
+}
+#else
+static inline const struct tc3589x_keypad_platform_data *
+tc3589x_keypad_of_probe(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+
static int tc3589x_keypad_probe(struct platform_device *pdev)
{
struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent);
@@ -306,8 +365,11 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
plat = tc3589x->pdata->keypad;
if (!plat) {
- dev_err(&pdev->dev, "invalid keypad platform data\n");
- return -EINVAL;
+ plat = tc3589x_keypad_of_probe(&pdev->dev);
+ if (IS_ERR(plat)) {
+ dev_err(&pdev->dev, "invalid keypad platform data\n");
+ return PTR_ERR(plat);
+ }
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index abd8453e5212..220ce0fa15d9 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/mfd/88pm860x.h>
#include <linux/slab.h>
+#include <linux/device.h>
#define PM8607_WAKEUP 0x0b
@@ -68,7 +69,8 @@ static int pm860x_onkey_probe(struct platform_device *pdev)
return -EINVAL;
}
- info = kzalloc(sizeof(struct pm860x_onkey_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_onkey_info),
+ GFP_KERNEL);
if (!info)
return -ENOMEM;
info->chip = chip;
@@ -76,11 +78,10 @@ static int pm860x_onkey_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
info->irq = irq;
- info->idev = input_allocate_device();
+ info->idev = devm_input_allocate_device(&pdev->dev);
if (!info->idev) {
dev_err(chip->dev, "Failed to allocate input dev\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
info->idev->name = "88pm860x_on";
@@ -93,42 +94,22 @@ static int pm860x_onkey_probe(struct platform_device *pdev)
ret = input_register_device(info->idev);
if (ret) {
dev_err(chip->dev, "Can't register input device: %d\n", ret);
- goto out_reg;
+ return ret;
}
- ret = request_threaded_irq(info->irq, NULL, pm860x_onkey_handler,
- IRQF_ONESHOT, "onkey", info);
+ ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+ pm860x_onkey_handler, IRQF_ONESHOT,
+ "onkey", info);
if (ret < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
info->irq, ret);
- goto out_irq;
+ return ret;
}
platform_set_drvdata(pdev, info);
device_init_wakeup(&pdev->dev, 1);
return 0;
-
-out_irq:
- input_unregister_device(info->idev);
- kfree(info);
- return ret;
-
-out_reg:
- input_free_device(info->idev);
-out:
- kfree(info);
- return ret;
-}
-
-static int pm860x_onkey_remove(struct platform_device *pdev)
-{
- struct pm860x_onkey_info *info = platform_get_drvdata(pdev);
-
- free_irq(info->irq, info);
- input_unregister_device(info->idev);
- kfree(info);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -161,7 +142,6 @@ static struct platform_driver pm860x_onkey_driver = {
.pm = &pm860x_onkey_pm_ops,
},
.probe = pm860x_onkey_probe,
- .remove = pm860x_onkey_remove,
};
module_platform_driver(pm860x_onkey_driver);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 5928ea71dd69..2ff4425a893b 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -224,7 +224,7 @@ config INPUT_GP2A
config INPUT_GPIO_BEEPER
tristate "Generic GPIO Beeper support"
- depends on OF_GPIO
+ depends on GPIOLIB
help
Say Y here if you have a beeper connected to a GPIO pin.
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index f2fbdd88ed20..95ef7dd6442d 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -7,6 +7,7 @@
* AB8500 Power-On Key handler
*/
+#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -65,12 +66,14 @@ static int ab8500_ponkey_probe(struct platform_device *pdev)
return irq_dbr;
}
- ponkey = kzalloc(sizeof(struct ab8500_ponkey), GFP_KERNEL);
- input = input_allocate_device();
- if (!ponkey || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ ponkey = devm_kzalloc(&pdev->dev, sizeof(struct ab8500_ponkey),
+ GFP_KERNEL);
+ if (!ponkey)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
ponkey->idev = input;
ponkey->ab8500 = ab8500;
@@ -82,52 +85,32 @@ static int ab8500_ponkey_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_any_context_irq(ponkey->irq_dbf, ab8500_ponkey_handler,
- 0, "ab8500-ponkey-dbf", ponkey);
+ error = devm_request_any_context_irq(&pdev->dev, ponkey->irq_dbf,
+ ab8500_ponkey_handler, 0,
+ "ab8500-ponkey-dbf", ponkey);
if (error < 0) {
dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n",
ponkey->irq_dbf, error);
- goto err_free_mem;
+ return error;
}
- error = request_any_context_irq(ponkey->irq_dbr, ab8500_ponkey_handler,
- 0, "ab8500-ponkey-dbr", ponkey);
+ error = devm_request_any_context_irq(&pdev->dev, ponkey->irq_dbr,
+ ab8500_ponkey_handler, 0,
+ "ab8500-ponkey-dbr", ponkey);
if (error < 0) {
dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n",
ponkey->irq_dbr, error);
- goto err_free_dbf_irq;
+ return error;
}
error = input_register_device(ponkey->idev);
if (error) {
dev_err(ab8500->dev, "Can't register input device: %d\n", error);
- goto err_free_dbr_irq;
+ return error;
}
platform_set_drvdata(pdev, ponkey);
return 0;
-
-err_free_dbr_irq:
- free_irq(ponkey->irq_dbr, ponkey);
-err_free_dbf_irq:
- free_irq(ponkey->irq_dbf, ponkey);
-err_free_mem:
- input_free_device(input);
- kfree(ponkey);
-
- return error;
-}
-
-static int ab8500_ponkey_remove(struct platform_device *pdev)
-{
- struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev);
-
- free_irq(ponkey->irq_dbf, ponkey);
- free_irq(ponkey->irq_dbr, ponkey);
- input_unregister_device(ponkey->idev);
- kfree(ponkey);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -144,7 +127,6 @@ static struct platform_driver ab8500_ponkey_driver = {
.of_match_table = of_match_ptr(ab8500_ponkey_match),
},
.probe = ab8500_ponkey_probe,
- .remove = ab8500_ponkey_remove,
};
module_platform_driver(ab8500_ponkey_driver);
diff --git a/drivers/input/misc/gpio-beeper.c b/drivers/input/misc/gpio-beeper.c
index b757435e2b3d..8886af63eae3 100644
--- a/drivers/input/misc/gpio-beeper.c
+++ b/drivers/input/misc/gpio-beeper.c
@@ -1,7 +1,7 @@
/*
* Generic GPIO beeper driver
*
- * Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
+ * Copyright (C) 2013-2014 Alexander Shiyan <shc_work@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,7 +11,8 @@
#include <linux/input.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
@@ -19,14 +20,13 @@
struct gpio_beeper {
struct work_struct work;
- int gpio;
- bool active_low;
+ struct gpio_desc *desc;
bool beeping;
};
static void gpio_beeper_toggle(struct gpio_beeper *beep, bool on)
{
- gpio_set_value_cansleep(beep->gpio, on ^ beep->active_low);
+ gpiod_set_value_cansleep(beep->desc, on);
}
static void gpio_beeper_work(struct work_struct *work)
@@ -65,18 +65,16 @@ static void gpio_beeper_close(struct input_dev *input)
static int gpio_beeper_probe(struct platform_device *pdev)
{
struct gpio_beeper *beep;
- enum of_gpio_flags flags;
struct input_dev *input;
- unsigned long gflags;
int err;
beep = devm_kzalloc(&pdev->dev, sizeof(*beep), GFP_KERNEL);
if (!beep)
return -ENOMEM;
- beep->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
- if (!gpio_is_valid(beep->gpio))
- return beep->gpio;
+ beep->desc = devm_gpiod_get(&pdev->dev, NULL);
+ if (IS_ERR(beep->desc))
+ return PTR_ERR(beep->desc);
input = devm_input_allocate_device(&pdev->dev);
if (!input)
@@ -94,10 +92,7 @@ static int gpio_beeper_probe(struct platform_device *pdev)
input_set_capability(input, EV_SND, SND_BELL);
- beep->active_low = flags & OF_GPIO_ACTIVE_LOW;
- gflags = beep->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
-
- err = devm_gpio_request_one(&pdev->dev, beep->gpio, gflags, pdev->name);
+ err = gpiod_direction_output(beep->desc, 0);
if (err)
return err;
@@ -106,17 +101,19 @@ static int gpio_beeper_probe(struct platform_device *pdev)
return input_register_device(input);
}
-static struct of_device_id gpio_beeper_of_match[] = {
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_beeper_of_match[] = {
{ .compatible = BEEPER_MODNAME, },
{ }
};
MODULE_DEVICE_TABLE(of, gpio_beeper_of_match);
+#endif
static struct platform_driver gpio_beeper_platform_driver = {
.driver = {
.name = BEEPER_MODNAME,
.owner = THIS_MODULE,
- .of_match_table = gpio_beeper_of_match,
+ .of_match_table = of_match_ptr(gpio_beeper_of_match),
},
.probe = gpio_beeper_probe,
};
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 5a736397d9c8..719410feb84b 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1566,6 +1566,7 @@ static int ims_pcu_buffers_alloc(struct ims_pcu *pcu)
if (!pcu->urb_ctrl_buf) {
dev_err(pcu->dev,
"Failed to allocate memory for read buffer\n");
+ error = -ENOMEM;
goto err_free_urb_out_buf;
}
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index eef41cfc054d..3809618e6a5d 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/mfd/max8925.h>
#include <linux/slab.h>
+#include <linux/device.h>
#define SW_INPUT (1 << 7) /* 0/1 -- up/down */
#define HARDRESET_EN (1 << 7)
@@ -81,12 +82,14 @@ static int max8925_onkey_probe(struct platform_device *pdev)
return -EINVAL;
}
- info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
- input = input_allocate_device();
- if (!info || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max8925_onkey_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
info->idev = input;
info->i2c = chip->i2c;
@@ -100,55 +103,34 @@ static int max8925_onkey_probe(struct platform_device *pdev)
input->dev.parent = &pdev->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(irq[0], NULL, max8925_onkey_handler,
- IRQF_ONESHOT, "onkey-down", info);
+ error = devm_request_threaded_irq(&pdev->dev, irq[0], NULL,
+ max8925_onkey_handler, IRQF_ONESHOT,
+ "onkey-down", info);
if (error < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
irq[0], error);
- goto err_free_mem;
+ return error;
}
- error = request_threaded_irq(irq[1], NULL, max8925_onkey_handler,
- IRQF_ONESHOT, "onkey-up", info);
+ error = devm_request_threaded_irq(&pdev->dev, irq[1], NULL,
+ max8925_onkey_handler, IRQF_ONESHOT,
+ "onkey-up", info);
if (error < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
irq[1], error);
- goto err_free_irq0;
+ return error;
}
error = input_register_device(info->idev);
if (error) {
dev_err(chip->dev, "Can't register input device: %d\n", error);
- goto err_free_irq1;
+ return error;
}
platform_set_drvdata(pdev, info);
device_init_wakeup(&pdev->dev, 1);
return 0;
-
-err_free_irq1:
- free_irq(irq[1], info);
-err_free_irq0:
- free_irq(irq[0], info);
-err_free_mem:
- input_free_device(input);
- kfree(info);
-
- return error;
-}
-
-static int max8925_onkey_remove(struct platform_device *pdev)
-{
- struct max8925_onkey_info *info = platform_get_drvdata(pdev);
- struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
-
- free_irq(info->irq[0] + chip->irq_base, info);
- free_irq(info->irq[1] + chip->irq_base, info);
- input_unregister_device(info->idev);
- kfree(info);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -190,7 +172,6 @@ static struct platform_driver max8925_onkey_driver = {
.pm = &max8925_onkey_pm_ops,
},
.probe = max8925_onkey_probe,
- .remove = max8925_onkey_remove,
};
module_platform_driver(max8925_onkey_driver);
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 1fea5484941f..a363ebbd9cc0 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -181,11 +181,21 @@ static void max8997_haptic_enable(struct max8997_haptic *chip)
}
if (!chip->enabled) {
- chip->enabled = true;
- regulator_enable(chip->regulator);
+ error = regulator_enable(chip->regulator);
+ if (error) {
+ dev_err(chip->dev, "Failed to enable regulator\n");
+ goto out;
+ }
max8997_haptic_configure(chip);
- if (chip->mode == MAX8997_EXTERNAL_MODE)
- pwm_enable(chip->pwm);
+ if (chip->mode == MAX8997_EXTERNAL_MODE) {
+ error = pwm_enable(chip->pwm);
+ if (error) {
+ dev_err(chip->dev, "Failed to enable PWM\n");
+ regulator_disable(chip->regulator);
+ goto out;
+ }
+ }
+ chip->enabled = true;
}
out:
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 1cb8fda7a166..c91e3d33aea9 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -92,15 +92,15 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
bool pull_up;
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
- kpd_delay = 0;
+ kpd_delay = 15625;
- pull_up = of_property_read_bool(pdev->dev.of_node, "pull-up");
-
- if (kpd_delay > 62500) {
+ if (kpd_delay > 62500 || kpd_delay == 0) {
dev_err(&pdev->dev, "invalid power key trigger delay\n");
return -EINVAL;
}
+ pull_up = of_property_read_bool(pdev->dev.of_node, "pull-up");
+
regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!regmap) {
dev_err(&pdev->dev, "failed to locate regmap for the device\n");
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 99b9e42aa748..93558a1c7f70 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -143,7 +143,7 @@ static irqreturn_t rotary_encoder_half_period_irq(int irq, void *dev_id)
}
#ifdef CONFIG_OF
-static struct of_device_id rotary_encoder_of_match[] = {
+static const struct of_device_id rotary_encoder_of_match[] = {
{ .compatible = "rotary-encoder", },
{ },
};
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 20c80f543d5e..5a6334be30b8 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -17,7 +17,6 @@
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio_keys.h>
-#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/pnp.h>
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 77dc23b94eb1..6d26eecc278c 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -262,7 +262,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
struct vibra_info *info;
int vddvibl_uV = 0;
int vddvibr_uV = 0;
- int ret;
+ int error;
twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
"vibra");
@@ -309,12 +309,12 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
mutex_init(&info->mutex);
- ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
- twl6040_vib_irq_handler, 0,
- "twl6040_irq_vib", info);
- if (ret) {
- dev_err(info->dev, "VIB IRQ request failed: %d\n", ret);
- return ret;
+ error = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+ twl6040_vib_irq_handler, 0,
+ "twl6040_irq_vib", info);
+ if (error) {
+ dev_err(info->dev, "VIB IRQ request failed: %d\n", error);
+ return error;
}
info->supplies[0].supply = "vddvibl";
@@ -323,40 +323,40 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
* When booted with Device tree the regulators are attached to the
* parent device (twl6040 MFD core)
*/
- ret = regulator_bulk_get(twl6040_core_dev, ARRAY_SIZE(info->supplies),
- info->supplies);
- if (ret) {
- dev_err(info->dev, "couldn't get regulators %d\n", ret);
- return ret;
+ error = devm_regulator_bulk_get(twl6040_core_dev,
+ ARRAY_SIZE(info->supplies),
+ info->supplies);
+ if (error) {
+ dev_err(info->dev, "couldn't get regulators %d\n", error);
+ return error;
}
if (vddvibl_uV) {
- ret = regulator_set_voltage(info->supplies[0].consumer,
- vddvibl_uV, vddvibl_uV);
- if (ret) {
+ error = regulator_set_voltage(info->supplies[0].consumer,
+ vddvibl_uV, vddvibl_uV);
+ if (error) {
dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
- ret);
- goto err_regulator;
+ error);
+ return error;
}
}
if (vddvibr_uV) {
- ret = regulator_set_voltage(info->supplies[1].consumer,
- vddvibr_uV, vddvibr_uV);
- if (ret) {
+ error = regulator_set_voltage(info->supplies[1].consumer,
+ vddvibr_uV, vddvibr_uV);
+ if (error) {
dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
- ret);
- goto err_regulator;
+ error);
+ return error;
}
}
INIT_WORK(&info->play_work, vibra_play_work);
- info->input_dev = input_allocate_device();
- if (info->input_dev == NULL) {
+ info->input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!info->input_dev) {
dev_err(info->dev, "couldn't allocate input device\n");
- ret = -ENOMEM;
- goto err_regulator;
+ return -ENOMEM;
}
input_set_drvdata(info->input_dev, info);
@@ -367,44 +367,25 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
info->input_dev->close = twl6040_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
- ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
- if (ret < 0) {
+ error = input_ff_create_memless(info->input_dev, NULL, vibra_play);
+ if (error) {
dev_err(info->dev, "couldn't register vibrator to FF\n");
- goto err_ialloc;
+ return error;
}
- ret = input_register_device(info->input_dev);
- if (ret < 0) {
+ error = input_register_device(info->input_dev);
+ if (error) {
dev_err(info->dev, "couldn't register input device\n");
- goto err_iff;
+ return error;
}
platform_set_drvdata(pdev, info);
return 0;
-
-err_iff:
- input_ff_destroy(info->input_dev);
-err_ialloc:
- input_free_device(info->input_dev);
-err_regulator:
- regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
- return ret;
-}
-
-static int twl6040_vibra_remove(struct platform_device *pdev)
-{
- struct vibra_info *info = platform_get_drvdata(pdev);
-
- input_unregister_device(info->input_dev);
- regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
-
- return 0;
}
static struct platform_driver twl6040_vibra_driver = {
.probe = twl6040_vibra_probe,
- .remove = twl6040_vibra_remove,
.driver = {
.name = "twl6040-vibra",
.owner = THIS_MODULE,
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 6b8441f7bc32..366fc7ad5eb6 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -53,7 +53,7 @@ config MOUSE_PS2_LOGIPS2PP
default y
depends on MOUSE_PS2
help
- Say Y here if you have a Logictech PS/2++ mouse connected to
+ Say Y here if you have a Logitech PS/2++ mouse connected to
your system.
If unsure, say Y.
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b96e978a37b7..ee2a04d90d20 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -473,8 +473,15 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
- input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
- input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+
+ /* For clickpads map both buttons to BTN_LEFT */
+ if (etd->fw_version & 0x001000) {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
+ } else {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ }
+
input_report_abs(dev, ABS_PRESSURE, pres);
input_report_abs(dev, ABS_TOOL_WIDTH, width);
@@ -484,10 +491,17 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
static void elantech_input_sync_v4(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
- input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
- input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ /* For clickpads map both buttons to BTN_LEFT */
+ if (etd->fw_version & 0x001000) {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
+ } else {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ }
+
input_mt_report_pointer_emulation(dev, true);
input_sync(dev);
}
@@ -835,7 +849,7 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
if (etd->set_hw_resolution)
etd->reg_10 = 0x0b;
else
- etd->reg_10 = 0x03;
+ etd->reg_10 = 0x01;
if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
rc = -1;
@@ -1336,7 +1350,8 @@ static int elantech_reconnect(struct psmouse *psmouse)
}
/*
- * Some hw_version 3 models go into error state when we try to set bit 3 of r10
+ * Some hw_version 3 models go into error state when we try to set
+ * bit 3 and/or bit 1 of r10.
*/
static const struct dmi_system_id no_hw_res_dmi_table[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index c5ec703c727e..ec772d962f06 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -347,15 +347,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
unsigned char resp[3];
int i;
- for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
- if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
- priv->x_min = min_max_pnpid_table[i].x_min;
- priv->x_max = min_max_pnpid_table[i].x_max;
- priv->y_min = min_max_pnpid_table[i].y_min;
- priv->y_max = min_max_pnpid_table[i].y_max;
- return 0;
- }
-
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
@@ -366,6 +357,16 @@ static int synaptics_resolution(struct psmouse *psmouse)
}
}
+ for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
+ if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
+ priv->x_min = min_max_pnpid_table[i].x_min;
+ priv->x_max = min_max_pnpid_table[i].x_max;
+ priv->y_min = min_max_pnpid_table[i].y_min;
+ priv->y_max = min_max_pnpid_table[i].y_max;
+ return 0;
+ }
+ }
+
if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index 17e01a807ddc..98be824544a5 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -203,7 +203,7 @@ static int apbps2_of_remove(struct platform_device *of_dev)
return 0;
}
-static struct of_device_id apbps2_of_match[] = {
+static const struct of_device_id apbps2_of_match[] = {
{ .name = "GAISLER_APBPS2", },
{ .name = "01_060", },
{}
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index 5d2fe7ece7ca..d906f3ebc8c8 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -262,7 +262,7 @@ static int olpc_apsp_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id olpc_apsp_dt_ids[] = {
+static const struct of_device_id olpc_apsp_dt_ids[] = {
{ .compatible = "olpc,ap-sp", },
{}
};
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 611fc3905d00..2c613cd41dd6 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -349,6 +349,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
break;
case MTTPC:
+ case MTTPC_B:
features->pktlen = WACOM_PKGLEN_MTTPC;
break;
@@ -380,6 +381,16 @@ static int wacom_parse_hid(struct usb_interface *intf,
i += 12;
break;
+ case MTTPC_B:
+ features->x_max =
+ get_unaligned_le16(&report[i + 3]);
+ features->x_phy =
+ get_unaligned_le16(&report[i + 6]);
+ features->unit = report[i - 5];
+ features->unitExpo = report[i - 3];
+ i += 9;
+ break;
+
default:
features->x_max =
get_unaligned_le16(&report[i + 3]);
@@ -430,6 +441,14 @@ static int wacom_parse_hid(struct usb_interface *intf,
i += 12;
break;
+ case MTTPC_B:
+ features->y_max =
+ get_unaligned_le16(&report[i + 3]);
+ features->y_phy =
+ get_unaligned_le16(&report[i + 6]);
+ i += 9;
+ break;
+
default:
features->y_max =
features->x_max;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 4822c57a3756..977d05cd9e2e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -484,6 +484,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
input_report_key(input, BTN_TOUCH, 0);
input_report_abs(input, ABS_PRESSURE, 0);
input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
+ if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
+ wacom->shared->stylus_in_proximity = true;
}
/* Exit report */
@@ -928,12 +930,12 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
if (touch) {
- int t_x = le16_to_cpup((__le16 *)&data[offset + 2]);
- int c_x = le16_to_cpup((__le16 *)&data[offset + 4]);
- int t_y = le16_to_cpup((__le16 *)&data[offset + 6]);
- int c_y = le16_to_cpup((__le16 *)&data[offset + 8]);
- int w = le16_to_cpup((__le16 *)&data[offset + 10]);
- int h = le16_to_cpup((__le16 *)&data[offset + 12]);
+ int t_x = get_unaligned_le16(&data[offset + 2]);
+ int c_x = get_unaligned_le16(&data[offset + 4]);
+ int t_y = get_unaligned_le16(&data[offset + 6]);
+ int c_y = get_unaligned_le16(&data[offset + 8]);
+ int w = get_unaligned_le16(&data[offset + 10]);
+ int h = get_unaligned_le16(&data[offset + 12]);
input_report_abs(input, ABS_MT_POSITION_X, t_x);
input_report_abs(input, ABS_MT_POSITION_Y, t_y);
@@ -962,7 +964,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
int x_offset = 0;
/* MTTPC does not support Height and Width */
- if (wacom->features.type == MTTPC)
+ if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B)
x_offset = -4;
/*
@@ -978,7 +980,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
for (i = 0; i < contacts_to_send; i++) {
int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3;
bool touch = data[offset] & 0x1;
- int id = le16_to_cpup((__le16 *)&data[offset + 1]);
+ int id = get_unaligned_le16(&data[offset + 1]);
int slot = input_mt_get_slot_by_key(input, id);
if (slot < 0)
@@ -987,8 +989,8 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
if (touch) {
- int x = le16_to_cpup((__le16 *)&data[offset + x_offset + 7]);
- int y = le16_to_cpup((__le16 *)&data[offset + x_offset + 9]);
+ int x = get_unaligned_le16(&data[offset + x_offset + 7]);
+ int y = get_unaligned_le16(&data[offset + x_offset + 9]);
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
}
@@ -1047,6 +1049,10 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
prox = data[0] & 0x01;
x = get_unaligned_le16(&data[1]);
y = get_unaligned_le16(&data[3]);
+ } else if (len == WACOM_PKGLEN_TPC1FG_B) {
+ prox = data[2] & 0x01;
+ x = get_unaligned_le16(&data[3]);
+ y = get_unaligned_le16(&data[5]);
} else {
prox = data[1] & 0x01;
x = le16_to_cpup((__le16 *)&data[2]);
@@ -1110,6 +1116,9 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
case WACOM_PKGLEN_TPC2FG:
return wacom_tpc_mt_touch(wacom);
+ case WACOM_PKGLEN_PENABLED:
+ return wacom_tpc_pen(wacom);
+
default:
switch (data[0]) {
case WACOM_REPORT_TPC1FG:
@@ -1119,6 +1128,7 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return wacom_tpc_single_touch(wacom, len);
case WACOM_REPORT_TPCMT:
+ case WACOM_REPORT_TPCMT2:
return wacom_mt_touch(wacom);
case WACOM_REPORT_PENABLED:
@@ -1461,6 +1471,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case TABLETPC2FG:
case MTSCREEN:
case MTTPC:
+ case MTTPC_B:
sync = wacom_tpc_irq(wacom_wac, len);
break;
@@ -1565,10 +1576,10 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
struct wacom_features *features = &wacom_wac->features;
if (features->device_type == BTN_TOOL_PEN) {
- input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
- features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
- features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X, features->x_min,
+ features->x_max, features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, features->y_min,
+ features->y_max, features->y_fuzz, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0,
features->pressure_max, features->pressure_fuzz, 0);
@@ -1802,6 +1813,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case MTSCREEN:
case MTTPC:
+ case MTTPC_B:
case TABLETPC2FG:
if (features->device_type == BTN_TOOL_FINGER) {
unsigned int flags = INPUT_MT_DIRECT;
@@ -2123,11 +2135,11 @@ static const struct wacom_features wacom_features_0x317 =
63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
.touch_max = 16 };
static const struct wacom_features wacom_features_0xF4 =
- { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
- 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104280, 65400, 2047,
+ 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0xF8 =
- { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, /* Pen */
- 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104280, 65400, 2047, /* Pen */
+ 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
@@ -2142,8 +2154,8 @@ static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023,
63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x304 =
- { "Wacom Cintiq 13HD", WACOM_PKGLEN_INTUOS, 59552, 33848, 1023,
- 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Cintiq 13HD", WACOM_PKGLEN_INTUOS, 59352, 33648, 1023,
+ 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0xC7 =
{ "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511,
0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2157,24 +2169,24 @@ static const struct wacom_features wacom_features_0xFB =
{ "Wacom DTU1031", WACOM_PKGLEN_DTUS, 22096, 13960, 511,
0, DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x57 =
- { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
- 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES};
+ { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047,
+ 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0x59 = /* Pen */
- { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
- 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047,
+ 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
static const struct wacom_features wacom_features_0x5D = /* Touch */
{ "Wacom DTH2242", .type = WACOM_24HDT,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10 };
static const struct wacom_features wacom_features_0xCC =
- { "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87200, 65600, 2047,
- 63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87000, 65400, 2047,
+ 63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0xFA =
- { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
- 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047,
+ 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0x5B =
- { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
- 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047,
+ 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
static const struct wacom_features wacom_features_0x5E =
{ "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
@@ -2233,9 +2245,21 @@ static const struct wacom_features wacom_features_0x10E =
static const struct wacom_features wacom_features_0x10F =
{ "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x116 =
+ { "Wacom ISDv4 116", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
+ 0, TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x4001 =
{ "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x4004 =
+ { "Wacom ISDv4 4004", WACOM_PKGLEN_MTTPC, 11060, 6220, 255,
+ 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x5000 =
+ { "Wacom ISDv4 5000", WACOM_PKGLEN_MTTPC, 27848, 15752, 1023,
+ 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x5002 =
+ { "Wacom ISDv4 5002", WACOM_PKGLEN_MTTPC, 29576, 16724, 1023,
+ 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2316,8 +2340,8 @@ static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x0307 =
- { "Wacom ISDv5 307", WACOM_PKGLEN_INTUOS, 59552, 33848, 2047,
- 63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ { "Wacom ISDv5 307", WACOM_PKGLEN_INTUOS, 59352, 33648, 2047,
+ 63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
static const struct wacom_features wacom_features_0x0309 =
{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
@@ -2447,6 +2471,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x10D) },
{ USB_DEVICE_WACOM(0x10E) },
{ USB_DEVICE_WACOM(0x10F) },
+ { USB_DEVICE_WACOM(0x116) },
{ USB_DEVICE_WACOM(0x300) },
{ USB_DEVICE_WACOM(0x301) },
{ USB_DEVICE_DETAILED(0x302, USB_CLASS_HID, 0, 0) },
@@ -2457,6 +2482,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_DETAILED(0x317, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0x4001) },
+ { USB_DEVICE_WACOM(0x4004) },
+ { USB_DEVICE_WACOM(0x5000) },
+ { USB_DEVICE_WACOM(0x5002) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index f69c0ebe7fa9..b2c9a9c1b551 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -22,6 +22,7 @@
#define WACOM_PKGLEN_BBFUN 9
#define WACOM_PKGLEN_INTUOS 10
#define WACOM_PKGLEN_TPC1FG 5
+#define WACOM_PKGLEN_TPC1FG_B 10
#define WACOM_PKGLEN_TPC2FG 14
#define WACOM_PKGLEN_BBTOUCH 20
#define WACOM_PKGLEN_BBTOUCH3 64
@@ -30,6 +31,7 @@
#define WACOM_PKGLEN_MTOUCH 62
#define WACOM_PKGLEN_MTTPC 40
#define WACOM_PKGLEN_DTUS 68
+#define WACOM_PKGLEN_PENABLED 8
/* wacom data size per MT contact */
#define WACOM_BYTES_PER_MT_PACKET 11
@@ -52,6 +54,7 @@
#define WACOM_REPORT_TPC1FG 6
#define WACOM_REPORT_TPC2FG 13
#define WACOM_REPORT_TPCMT 13
+#define WACOM_REPORT_TPCMT2 3
#define WACOM_REPORT_TPCHID 15
#define WACOM_REPORT_TPCST 16
#define WACOM_REPORT_DTUS 17
@@ -105,6 +108,7 @@ enum {
TABLETPC2FG,
MTSCREEN,
MTTPC,
+ MTTPC_B,
MAX_TYPE
};
@@ -118,6 +122,8 @@ struct wacom_features {
int type;
int x_resolution;
int y_resolution;
+ int x_min;
+ int y_min;
int device_type;
int x_phy;
int y_phy;
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 544e20c551f8..0d4a9fad4a78 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -16,6 +16,7 @@
#include <linux/input.h>
#include <linux/mfd/88pm860x.h>
#include <linux/slab.h>
+#include <linux/device.h>
#define MEAS_LEN (8)
#define ACCURATE_BIT (12)
@@ -234,16 +235,17 @@ static int pm860x_touch_probe(struct platform_device *pdev)
if (ret)
return ret;
- touch = kzalloc(sizeof(struct pm860x_touch), GFP_KERNEL);
- if (touch == NULL)
+ touch = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_touch),
+ GFP_KERNEL);
+ if (!touch)
return -ENOMEM;
+
platform_set_drvdata(pdev, touch);
- touch->idev = input_allocate_device();
- if (touch->idev == NULL) {
+ touch->idev = devm_input_allocate_device(&pdev->dev);
+ if (!touch->idev) {
dev_err(&pdev->dev, "Failed to allocate input device!\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
touch->idev->name = "88pm860x-touch";
@@ -258,10 +260,11 @@ static int pm860x_touch_probe(struct platform_device *pdev)
touch->res_x = res_x;
input_set_drvdata(touch->idev, touch);
- ret = request_threaded_irq(touch->irq, NULL, pm860x_touch_handler,
- IRQF_ONESHOT, "touch", touch);
+ ret = devm_request_threaded_irq(&pdev->dev, touch->irq, NULL,
+ pm860x_touch_handler, IRQF_ONESHOT,
+ "touch", touch);
if (ret < 0)
- goto out_irq;
+ return ret;
__set_bit(EV_ABS, touch->idev->evbit);
__set_bit(ABS_X, touch->idev->absbit);
@@ -279,28 +282,11 @@ static int pm860x_touch_probe(struct platform_device *pdev)
ret = input_register_device(touch->idev);
if (ret < 0) {
dev_err(chip->dev, "Failed to register touch!\n");
- goto out_rg;
+ return ret;
}
platform_set_drvdata(pdev, touch);
return 0;
-out_rg:
- free_irq(touch->irq, touch);
-out_irq:
- input_free_device(touch->idev);
-out:
- kfree(touch);
- return ret;
-}
-
-static int pm860x_touch_remove(struct platform_device *pdev)
-{
- struct pm860x_touch *touch = platform_get_drvdata(pdev);
-
- input_unregister_device(touch->idev);
- free_irq(touch->irq, touch);
- kfree(touch);
- return 0;
}
static struct platform_driver pm860x_touch_driver = {
@@ -309,7 +295,6 @@ static struct platform_driver pm860x_touch_driver = {
.owner = THIS_MODULE,
},
.probe = pm860x_touch_probe,
- .remove = pm860x_touch_remove,
};
module_platform_driver(pm860x_touch_driver);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index d4e5ab57909f..a23a94bb4bcb 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -11,6 +11,10 @@ menuconfig INPUT_TOUCHSCREEN
if INPUT_TOUCHSCREEN
+config OF_TOUCHSCREEN
+ def_tristate INPUT
+ depends on INPUT && OF
+
config TOUCHSCREEN_88PM860X
tristate "Marvell 88PM860x touchscreen"
depends on MFD_88PM860X
@@ -89,6 +93,7 @@ config TOUCHSCREEN_AD7879_SPI
config TOUCHSCREEN_ATMEL_MXT
tristate "Atmel mXT I2C Touchscreen"
depends on I2C
+ select FW_LOADER
help
Say Y here if you have Atmel mXT series I2C touchscreen,
such as AT42QT602240/ATMXT224, connected to your system.
@@ -846,7 +851,7 @@ config TOUCHSCREEN_TSC2007
config TOUCHSCREEN_W90X900
tristate "W90P910 touchscreen driver"
- depends on HAVE_CLK
+ depends on ARCH_W90X900
help
Say Y here if you have a W90P910 based touchscreen.
@@ -885,6 +890,17 @@ config TOUCHSCREEN_STMPE
To compile this driver as a module, choose M here: the
module will be called stmpe-ts.
+config TOUCHSCREEN_SUN4I
+ tristate "Allwinner sun4i resistive touchscreen controller support"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HWMON
+ help
+ This selects support for the resistive touchscreen controller
+ found on Allwinner sunxi SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sun4i-ts.
+
config TOUCHSCREEN_SUR40
tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
depends on USB
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 03f12a1f2218..126479d8c29a 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -6,6 +6,7 @@
wm97xx-ts-y := wm97xx-core.o
+obj-$(CONFIG_OF_TOUCHSCREEN) += of_touchscreen.o
obj-$(CONFIG_TOUCHSCREEN_88PM860X) += 88pm860x-ts.o
obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
@@ -53,6 +54,7 @@ obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
+obj-$(CONFIG_TOUCHSCREEN_SUN4I) += sun4i-ts.o
obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o
obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 6793c85903ae..523865daa1d3 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -210,11 +210,6 @@ static bool gpio3;
module_param(gpio3, bool, 0);
MODULE_PARM_DESC(gpio3, "If gpio3 is set to 1 AUX3 acts as GPIO3");
-/*
- * ad7877_read/write are only used for initial setup and for sysfs controls.
- * The main traffic is done using spi_async() in the interrupt handler.
- */
-
static int ad7877_read(struct spi_device *spi, u16 reg)
{
struct ser_req *req;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 7f8aa981500d..da201b8e37dc 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -706,7 +706,7 @@ static void ads7846_read_state(struct ads7846 *ts)
m = &ts->msg[msg_idx];
error = spi_sync(ts->spi, m);
if (error) {
- dev_err(&ts->spi->dev, "spi_async --> %d\n", error);
+ dev_err(&ts->spi->dev, "spi_sync --> %d\n", error);
packet->tc.ignore = true;
return;
}
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index a70400754e92..6e0b4a2120d3 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2,6 +2,8 @@
* Atmel maXTouch Touchscreen driver
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Copyright (C) 2012 Google, Inc.
+ *
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -12,6 +14,8 @@
*/
#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
@@ -25,12 +29,6 @@
#define MXT_VER_21 21
#define MXT_VER_22 22
-/* Slave addresses */
-#define MXT_APP_LOW 0x4a
-#define MXT_APP_HIGH 0x4b
-#define MXT_BOOT_LOW 0x24
-#define MXT_BOOT_HIGH 0x25
-
/* Firmware */
#define MXT_FW_NAME "maxtouch.fw"
@@ -83,6 +81,9 @@
#define MXT_COMMAND_REPORTALL 3
#define MXT_COMMAND_DIAGNOSTIC 5
+/* Define for T6 status byte */
+#define MXT_T6_STATUS_RESET (1 << 7)
+
/* MXT_GEN_POWER_T7 field */
#define MXT_POWER_IDLEACQINT 0
#define MXT_POWER_ACTVACQINT 1
@@ -99,33 +100,26 @@
/* MXT_TOUCH_MULTI_T9 field */
#define MXT_TOUCH_CTRL 0
-#define MXT_TOUCH_XORIGIN 1
-#define MXT_TOUCH_YORIGIN 2
-#define MXT_TOUCH_XSIZE 3
-#define MXT_TOUCH_YSIZE 4
-#define MXT_TOUCH_BLEN 6
-#define MXT_TOUCH_TCHTHR 7
-#define MXT_TOUCH_TCHDI 8
-#define MXT_TOUCH_ORIENT 9
-#define MXT_TOUCH_MOVHYSTI 11
-#define MXT_TOUCH_MOVHYSTN 12
-#define MXT_TOUCH_NUMTOUCH 14
-#define MXT_TOUCH_MRGHYST 15
-#define MXT_TOUCH_MRGTHR 16
-#define MXT_TOUCH_AMPHYST 17
-#define MXT_TOUCH_XRANGE_LSB 18
-#define MXT_TOUCH_XRANGE_MSB 19
-#define MXT_TOUCH_YRANGE_LSB 20
-#define MXT_TOUCH_YRANGE_MSB 21
-#define MXT_TOUCH_XLOCLIP 22
-#define MXT_TOUCH_XHICLIP 23
-#define MXT_TOUCH_YLOCLIP 24
-#define MXT_TOUCH_YHICLIP 25
-#define MXT_TOUCH_XEDGECTRL 26
-#define MXT_TOUCH_XEDGEDIST 27
-#define MXT_TOUCH_YEDGECTRL 28
-#define MXT_TOUCH_YEDGEDIST 29
-#define MXT_TOUCH_JUMPLIMIT 30
+#define MXT_T9_ORIENT 9
+#define MXT_T9_RANGE 18
+
+/* MXT_TOUCH_MULTI_T9 status */
+#define MXT_T9_UNGRIP (1 << 0)
+#define MXT_T9_SUPPRESS (1 << 1)
+#define MXT_T9_AMP (1 << 2)
+#define MXT_T9_VECTOR (1 << 3)
+#define MXT_T9_MOVE (1 << 4)
+#define MXT_T9_RELEASE (1 << 5)
+#define MXT_T9_PRESS (1 << 6)
+#define MXT_T9_DETECT (1 << 7)
+
+struct t9_range {
+ u16 x;
+ u16 y;
+} __packed;
+
+/* MXT_TOUCH_MULTI_T9 orient */
+#define MXT_T9_ORIENT_SWITCH (1 << 0)
/* MXT_PROCI_GRIPFACE_T20 field */
#define MXT_GRIPFACE_CTRL 0
@@ -174,17 +168,16 @@
/* Define for MXT_GEN_COMMAND_T6 */
#define MXT_BOOT_VALUE 0xa5
+#define MXT_RESET_VALUE 0x01
#define MXT_BACKUP_VALUE 0x55
+
+/* Delay times */
#define MXT_BACKUP_TIME 50 /* msec */
#define MXT_RESET_TIME 200 /* msec */
-
-#define MXT_FWRESET_TIME 175 /* msec */
-
-/* MXT_SPT_GPIOPWM_T19 field */
-#define MXT_GPIO0_MASK 0x04
-#define MXT_GPIO1_MASK 0x08
-#define MXT_GPIO2_MASK 0x10
-#define MXT_GPIO3_MASK 0x20
+#define MXT_RESET_TIMEOUT 3000 /* msec */
+#define MXT_CRC_TIMEOUT 1000 /* msec */
+#define MXT_FW_RESET_TIME 3000 /* msec */
+#define MXT_FW_CHG_TIMEOUT 300 /* msec */
/* Command to unlock bootloader */
#define MXT_UNLOCK_CMD_MSB 0xaa
@@ -198,21 +191,8 @@
#define MXT_FRAME_CRC_PASS 0x04
#define MXT_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */
#define MXT_BOOT_STATUS_MASK 0x3f
-
-/* Touch status */
-#define MXT_UNGRIP (1 << 0)
-#define MXT_SUPPRESS (1 << 1)
-#define MXT_AMP (1 << 2)
-#define MXT_VECTOR (1 << 3)
-#define MXT_MOVE (1 << 4)
-#define MXT_RELEASE (1 << 5)
-#define MXT_PRESS (1 << 6)
-#define MXT_DETECT (1 << 7)
-
-/* Touch orient bits */
-#define MXT_XY_SWITCH (1 << 0)
-#define MXT_X_INVERT (1 << 1)
-#define MXT_Y_INVERT (1 << 2)
+#define MXT_BOOT_EXTENDED_ID (1 << 5)
+#define MXT_BOOT_ID_MASK 0x1f
/* Touchscreen absolute values */
#define MXT_MAX_AREA 0xff
@@ -232,8 +212,8 @@ struct mxt_info {
struct mxt_object {
u8 type;
u16 start_address;
- u8 size; /* Size of each instance - 1 */
- u8 instances; /* Number of instances - 1 */
+ u8 size_minus_one;
+ u8 instances_minus_one;
u8 num_report_ids;
} __packed;
@@ -250,19 +230,40 @@ struct mxt_data {
const struct mxt_platform_data *pdata;
struct mxt_object *object_table;
struct mxt_info info;
- bool is_tp;
-
unsigned int irq;
unsigned int max_x;
unsigned int max_y;
+ bool in_bootloader;
+ u32 config_crc;
+ u8 bootloader_addr;
/* Cached parameters from object table */
u8 T6_reportid;
+ u16 T6_address;
u8 T9_reportid_min;
u8 T9_reportid_max;
u8 T19_reportid;
+
+ /* for fw update in bootloader */
+ struct completion bl_completion;
+
+ /* for reset handling */
+ struct completion reset_completion;
+
+ /* for config update handling */
+ struct completion crc_completion;
};
+static size_t mxt_obj_size(const struct mxt_object *obj)
+{
+ return obj->size_minus_one + 1;
+}
+
+static size_t mxt_obj_instances(const struct mxt_object *obj)
+{
+ return obj->instances_minus_one + 1;
+}
+
static bool mxt_object_readable(unsigned int type)
{
switch (type) {
@@ -334,60 +335,190 @@ static void mxt_dump_message(struct device *dev,
message->reportid, 7, message->message);
}
-static int mxt_check_bootloader(struct i2c_client *client,
- unsigned int state)
+static int mxt_wait_for_completion(struct mxt_data *data,
+ struct completion *comp,
+ unsigned int timeout_ms)
+{
+ struct device *dev = &data->client->dev;
+ unsigned long timeout = msecs_to_jiffies(timeout_ms);
+ long ret;
+
+ ret = wait_for_completion_interruptible_timeout(comp, timeout);
+ if (ret < 0) {
+ return ret;
+ } else if (ret == 0) {
+ dev_err(dev, "Wait for completion timed out.\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int mxt_bootloader_read(struct mxt_data *data,
+ u8 *val, unsigned int count)
+{
+ int ret;
+ struct i2c_msg msg;
+
+ msg.addr = data->bootloader_addr;
+ msg.flags = data->client->flags & I2C_M_TEN;
+ msg.flags |= I2C_M_RD;
+ msg.len = count;
+ msg.buf = val;
+
+ ret = i2c_transfer(data->client->adapter, &msg, 1);
+
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ ret = ret < 0 ? ret : -EIO;
+ dev_err(&data->client->dev, "%s: i2c recv failed (%d)\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static int mxt_bootloader_write(struct mxt_data *data,
+ const u8 * const val, unsigned int count)
+{
+ int ret;
+ struct i2c_msg msg;
+
+ msg.addr = data->bootloader_addr;
+ msg.flags = data->client->flags & I2C_M_TEN;
+ msg.len = count;
+ msg.buf = (u8 *)val;
+
+ ret = i2c_transfer(data->client->adapter, &msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ ret = ret < 0 ? ret : -EIO;
+ dev_err(&data->client->dev, "%s: i2c send failed (%d)\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static int mxt_lookup_bootloader_address(struct mxt_data *data)
+{
+ u8 appmode = data->client->addr;
+ u8 bootloader;
+
+ switch (appmode) {
+ case 0x4a:
+ case 0x4b:
+ case 0x4c:
+ case 0x4d:
+ case 0x5a:
+ case 0x5b:
+ bootloader = appmode - 0x26;
+ break;
+ default:
+ dev_err(&data->client->dev,
+ "Appmode i2c address 0x%02x not found\n",
+ appmode);
+ return -EINVAL;
+ }
+
+ data->bootloader_addr = bootloader;
+ return 0;
+}
+
+static u8 mxt_get_bootloader_version(struct mxt_data *data, u8 val)
+{
+ struct device *dev = &data->client->dev;
+ u8 buf[3];
+
+ if (val & MXT_BOOT_EXTENDED_ID) {
+ if (mxt_bootloader_read(data, &buf[0], 3) != 0) {
+ dev_err(dev, "%s: i2c failure\n", __func__);
+ return val;
+ }
+
+ dev_dbg(dev, "Bootloader ID:%d Version:%d\n", buf[1], buf[2]);
+
+ return buf[0];
+ } else {
+ dev_dbg(dev, "Bootloader ID:%d\n", val & MXT_BOOT_ID_MASK);
+
+ return val;
+ }
+}
+
+static int mxt_check_bootloader(struct mxt_data *data, unsigned int state)
{
+ struct device *dev = &data->client->dev;
u8 val;
+ int ret;
recheck:
- if (i2c_master_recv(client, &val, 1) != 1) {
- dev_err(&client->dev, "%s: i2c recv failed\n", __func__);
- return -EIO;
+ if (state != MXT_WAITING_BOOTLOAD_CMD) {
+ /*
+ * In application update mode, the interrupt
+ * line signals state transitions. We must wait for the
+ * CHG assertion before reading the status byte.
+ * Once the status byte has been read, the line is deasserted.
+ */
+ ret = mxt_wait_for_completion(data, &data->bl_completion,
+ MXT_FW_CHG_TIMEOUT);
+ if (ret) {
+ /*
+ * TODO: handle -ERESTARTSYS better by terminating
+ * fw update process before returning to userspace
+ * by writing length 0x000 to device (iff we are in
+ * WAITING_FRAME_DATA state).
+ */
+ dev_err(dev, "Update wait error %d\n", ret);
+ return ret;
+ }
}
+ ret = mxt_bootloader_read(data, &val, 1);
+ if (ret)
+ return ret;
+
+ if (state == MXT_WAITING_BOOTLOAD_CMD)
+ val = mxt_get_bootloader_version(data, val);
+
switch (state) {
case MXT_WAITING_BOOTLOAD_CMD:
case MXT_WAITING_FRAME_DATA:
val &= ~MXT_BOOT_STATUS_MASK;
break;
case MXT_FRAME_CRC_PASS:
- if (val == MXT_FRAME_CRC_CHECK)
+ if (val == MXT_FRAME_CRC_CHECK) {
goto recheck;
+ } else if (val == MXT_FRAME_CRC_FAIL) {
+ dev_err(dev, "Bootloader CRC fail\n");
+ return -EINVAL;
+ }
break;
default:
return -EINVAL;
}
if (val != state) {
- dev_err(&client->dev, "Unvalid bootloader mode state\n");
+ dev_err(dev, "Invalid bootloader state %02X != %02X\n",
+ val, state);
return -EINVAL;
}
return 0;
}
-static int mxt_unlock_bootloader(struct i2c_client *client)
+static int mxt_unlock_bootloader(struct mxt_data *data)
{
+ int ret;
u8 buf[2];
buf[0] = MXT_UNLOCK_CMD_LSB;
buf[1] = MXT_UNLOCK_CMD_MSB;
- if (i2c_master_send(client, buf, 2) != 2) {
- dev_err(&client->dev, "%s: i2c send failed\n", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int mxt_fw_write(struct i2c_client *client,
- const u8 *data, unsigned int frame_size)
-{
- if (i2c_master_send(client, data, frame_size) != frame_size) {
- dev_err(&client->dev, "%s: i2c send failed\n", __func__);
- return -EIO;
- }
+ ret = mxt_bootloader_write(data, buf, 2);
+ if (ret)
+ return ret;
return 0;
}
@@ -427,11 +558,6 @@ static int __mxt_read_reg(struct i2c_client *client,
return ret;
}
-static int mxt_read_reg(struct i2c_client *client, u16 reg, u8 *val)
-{
- return __mxt_read_reg(client, reg, 1, val);
-}
-
static int __mxt_write_reg(struct i2c_client *client, u16 reg, u16 len,
const void *val)
{
@@ -479,7 +605,7 @@ mxt_get_object(struct mxt_data *data, u8 type)
return object;
}
- dev_err(&data->client->dev, "Invalid object type\n");
+ dev_err(&data->client->dev, "Invalid object type T%u\n", type);
return NULL;
}
@@ -505,7 +631,7 @@ static int mxt_write_object(struct mxt_data *data,
u16 reg;
object = mxt_get_object(data, type);
- if (!object || offset >= object->size + 1)
+ if (!object || offset >= mxt_obj_size(object))
return -EINVAL;
reg = object->start_address;
@@ -515,18 +641,25 @@ static int mxt_write_object(struct mxt_data *data,
static void mxt_input_button(struct mxt_data *data, struct mxt_message *message)
{
struct input_dev *input = data->input_dev;
+ const struct mxt_platform_data *pdata = data->pdata;
bool button;
int i;
/* Active-low switch */
- for (i = 0; i < MXT_NUM_GPIO; i++) {
- if (data->pdata->key_map[i] == KEY_RESERVED)
+ for (i = 0; i < pdata->t19_num_keys; i++) {
+ if (pdata->t19_keymap[i] == KEY_RESERVED)
continue;
- button = !(message->message[0] & MXT_GPIO0_MASK << i);
- input_report_key(input, data->pdata->key_map[i], button);
+ button = !(message->message[0] & (1 << i));
+ input_report_key(input, pdata->t19_keymap[i], button);
}
}
+static void mxt_input_sync(struct input_dev *input_dev)
+{
+ input_mt_report_pointer_emulation(input_dev, false);
+ input_sync(input_dev);
+}
+
static void mxt_input_touchevent(struct mxt_data *data,
struct mxt_message *message, int id)
{
@@ -536,44 +669,60 @@ static void mxt_input_touchevent(struct mxt_data *data,
int x;
int y;
int area;
- int pressure;
+ int amplitude;
x = (message->message[1] << 4) | ((message->message[3] >> 4) & 0xf);
y = (message->message[2] << 4) | ((message->message[3] & 0xf));
+
+ /* Handle 10/12 bit switching */
if (data->max_x < 1024)
- x = x >> 2;
+ x >>= 2;
if (data->max_y < 1024)
- y = y >> 2;
+ y >>= 2;
area = message->message[4];
- pressure = message->message[5];
+ amplitude = message->message[5];
dev_dbg(dev,
"[%u] %c%c%c%c%c%c%c%c x: %5u y: %5u area: %3u amp: %3u\n",
id,
- (status & MXT_DETECT) ? 'D' : '.',
- (status & MXT_PRESS) ? 'P' : '.',
- (status & MXT_RELEASE) ? 'R' : '.',
- (status & MXT_MOVE) ? 'M' : '.',
- (status & MXT_VECTOR) ? 'V' : '.',
- (status & MXT_AMP) ? 'A' : '.',
- (status & MXT_SUPPRESS) ? 'S' : '.',
- (status & MXT_UNGRIP) ? 'U' : '.',
- x, y, area, pressure);
+ (status & MXT_T9_DETECT) ? 'D' : '.',
+ (status & MXT_T9_PRESS) ? 'P' : '.',
+ (status & MXT_T9_RELEASE) ? 'R' : '.',
+ (status & MXT_T9_MOVE) ? 'M' : '.',
+ (status & MXT_T9_VECTOR) ? 'V' : '.',
+ (status & MXT_T9_AMP) ? 'A' : '.',
+ (status & MXT_T9_SUPPRESS) ? 'S' : '.',
+ (status & MXT_T9_UNGRIP) ? 'U' : '.',
+ x, y, area, amplitude);
input_mt_slot(input_dev, id);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
- status & MXT_DETECT);
- if (status & MXT_DETECT) {
+ if (status & MXT_T9_DETECT) {
+ /*
+ * Multiple bits may be set if the host is slow to read
+ * the status messages, indicating all the events that
+ * have happened.
+ */
+ if (status & MXT_T9_RELEASE) {
+ input_mt_report_slot_state(input_dev,
+ MT_TOOL_FINGER, 0);
+ mxt_input_sync(input_dev);
+ }
+
+ /* Touch active */
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 1);
input_report_abs(input_dev, ABS_MT_POSITION_X, x);
input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
- input_report_abs(input_dev, ABS_MT_PRESSURE, pressure);
+ input_report_abs(input_dev, ABS_MT_PRESSURE, amplitude);
input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, area);
+ } else {
+ /* Touch no longer active, close out slot */
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0);
}
}
-static unsigned mxt_extract_T6_csum(const u8 *csum)
+static u16 mxt_extract_T6_csum(const u8 *csum)
{
return csum[0] | (csum[1] << 8) | (csum[2] << 16);
}
@@ -584,28 +733,37 @@ static bool mxt_is_T9_message(struct mxt_data *data, struct mxt_message *msg)
return (id >= data->T9_reportid_min && id <= data->T9_reportid_max);
}
-static irqreturn_t mxt_interrupt(int irq, void *dev_id)
+static irqreturn_t mxt_process_messages_until_invalid(struct mxt_data *data)
{
- struct mxt_data *data = dev_id;
struct mxt_message message;
const u8 *payload = &message.message[0];
struct device *dev = &data->client->dev;
u8 reportid;
bool update_input = false;
+ u32 crc;
do {
if (mxt_read_message(data, &message)) {
dev_err(dev, "Failed to read message\n");
- goto end;
+ return IRQ_NONE;
}
reportid = message.reportid;
if (reportid == data->T6_reportid) {
u8 status = payload[0];
- unsigned csum = mxt_extract_T6_csum(&payload[1]);
+
+ crc = mxt_extract_T6_csum(&payload[1]);
+ if (crc != data->config_crc) {
+ data->config_crc = crc;
+ complete(&data->crc_completion);
+ }
+
dev_dbg(dev, "Status: %02x Config Checksum: %06x\n",
- status, csum);
+ status, data->config_crc);
+
+ if (status & MXT_T6_STATUS_RESET)
+ complete(&data->reset_completion);
} else if (mxt_is_T9_message(data, &message)) {
int id = reportid - data->T9_reportid_min;
mxt_input_touchevent(data, &message, id);
@@ -618,15 +776,96 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id)
}
} while (reportid != 0xff);
- if (update_input) {
- input_mt_report_pointer_emulation(data->input_dev, false);
- input_sync(data->input_dev);
- }
+ if (update_input)
+ mxt_input_sync(data->input_dev);
-end:
return IRQ_HANDLED;
}
+static irqreturn_t mxt_interrupt(int irq, void *dev_id)
+{
+ struct mxt_data *data = dev_id;
+
+ if (data->in_bootloader) {
+ /* bootloader state transition completion */
+ complete(&data->bl_completion);
+ return IRQ_HANDLED;
+ }
+
+ return mxt_process_messages_until_invalid(data);
+}
+
+static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
+ u8 value, bool wait)
+{
+ u16 reg;
+ u8 command_register;
+ int timeout_counter = 0;
+ int ret;
+
+ reg = data->T6_address + cmd_offset;
+
+ ret = mxt_write_reg(data->client, reg, value);
+ if (ret)
+ return ret;
+
+ if (!wait)
+ return 0;
+
+ do {
+ msleep(20);
+ ret = __mxt_read_reg(data->client, reg, 1, &command_register);
+ if (ret)
+ return ret;
+ } while (command_register != 0 && timeout_counter++ <= 100);
+
+ if (timeout_counter > 100) {
+ dev_err(&data->client->dev, "Command failed!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mxt_soft_reset(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret = 0;
+
+ dev_info(dev, "Resetting chip\n");
+
+ reinit_completion(&data->reset_completion);
+
+ ret = mxt_t6_command(data, MXT_COMMAND_RESET, MXT_RESET_VALUE, false);
+ if (ret)
+ return ret;
+
+ ret = mxt_wait_for_completion(data, &data->reset_completion,
+ MXT_RESET_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void mxt_update_crc(struct mxt_data *data, u8 cmd, u8 value)
+{
+ /*
+ * On failure, CRC is set to 0 and config will always be
+ * downloaded.
+ */
+ data->config_crc = 0;
+ reinit_completion(&data->crc_completion);
+
+ mxt_t6_command(data, cmd, value, true);
+
+ /*
+ * Wait for crc message. On failure, CRC is set to 0 and config will
+ * always be downloaded.
+ */
+ mxt_wait_for_completion(data, &data->crc_completion, MXT_CRC_TIMEOUT);
+}
+
static int mxt_check_reg_init(struct mxt_data *data)
{
const struct mxt_platform_data *pdata = data->pdata;
@@ -641,13 +880,23 @@ static int mxt_check_reg_init(struct mxt_data *data)
return 0;
}
+ mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1);
+
+ if (data->config_crc == pdata->config_crc) {
+ dev_info(dev, "Config CRC 0x%06X: OK\n", data->config_crc);
+ return 0;
+ }
+
+ dev_info(dev, "Config CRC 0x%06X: does not match 0x%06X\n",
+ data->config_crc, pdata->config_crc);
+
for (i = 0; i < data->info.object_num; i++) {
object = data->object_table + i;
if (!mxt_object_writable(object->type))
continue;
- size = (object->size + 1) * (object->instances + 1);
+ size = mxt_obj_size(object) * mxt_obj_instances(object);
if (index + size > pdata->config_length) {
dev_err(dev, "Not enough config data!\n");
return -EINVAL;
@@ -660,6 +909,14 @@ static int mxt_check_reg_init(struct mxt_data *data)
index += size;
}
+ mxt_update_crc(data, MXT_COMMAND_BACKUPNV, MXT_BACKUP_VALUE);
+
+ ret = mxt_soft_reset(data);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Config successfully updated\n");
+
return 0;
}
@@ -685,54 +942,6 @@ static int mxt_make_highchg(struct mxt_data *data)
return 0;
}
-static void mxt_handle_pdata(struct mxt_data *data)
-{
- const struct mxt_platform_data *pdata = data->pdata;
- u8 voltage;
-
- /* Set touchscreen lines */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_XSIZE,
- pdata->x_line);
- mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_YSIZE,
- pdata->y_line);
-
- /* Set touchscreen orient */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_ORIENT,
- pdata->orient);
-
- /* Set touchscreen burst length */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_BLEN, pdata->blen);
-
- /* Set touchscreen threshold */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_TCHTHR, pdata->threshold);
-
- /* Set touchscreen resolution */
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff);
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8);
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff);
- mxt_write_object(data, MXT_TOUCH_MULTI_T9,
- MXT_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8);
-
- /* Set touchscreen voltage */
- if (pdata->voltage) {
- if (pdata->voltage < MXT_VOLTAGE_DEFAULT) {
- voltage = (MXT_VOLTAGE_DEFAULT - pdata->voltage) /
- MXT_VOLTAGE_STEP;
- voltage = 0xff - voltage + 1;
- } else
- voltage = (pdata->voltage - MXT_VOLTAGE_DEFAULT) /
- MXT_VOLTAGE_STEP;
-
- mxt_write_object(data, MXT_SPT_CTECONFIG_T28,
- MXT_CTE_VOLTAGE, voltage);
- }
-}
-
static int mxt_get_info(struct mxt_data *data)
{
struct i2c_client *client = data->client;
@@ -772,7 +981,7 @@ static int mxt_get_object_table(struct mxt_data *data)
if (object->num_report_ids) {
min_id = reportid;
reportid += object->num_report_ids *
- (object->instances + 1);
+ mxt_obj_instances(object);
max_id = reportid - 1;
} else {
min_id = 0;
@@ -780,13 +989,15 @@ static int mxt_get_object_table(struct mxt_data *data)
}
dev_dbg(&data->client->dev,
- "Type %2d Start %3d Size %3d Instances %2d ReportIDs %3u : %3u\n",
- object->type, object->start_address, object->size + 1,
- object->instances + 1, min_id, max_id);
+ "T%u Start:%u Size:%zu Instances:%zu Report IDs:%u-%u\n",
+ object->type, object->start_address,
+ mxt_obj_size(object), mxt_obj_instances(object),
+ min_id, max_id);
switch (object->type) {
case MXT_GEN_COMMAND_T6:
data->T6_reportid = min_id;
+ data->T6_address = object->start_address;
break;
case MXT_TOUCH_MULTI_T9:
data->T9_reportid_min = min_id;
@@ -811,12 +1022,59 @@ static void mxt_free_object_table(struct mxt_data *data)
data->T19_reportid = 0;
}
+static int mxt_read_t9_resolution(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ struct t9_range range;
+ unsigned char orient;
+ struct mxt_object *object;
+
+ object = mxt_get_object(data, MXT_TOUCH_MULTI_T9);
+ if (!object)
+ return -EINVAL;
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T9_RANGE,
+ sizeof(range), &range);
+ if (error)
+ return error;
+
+ le16_to_cpus(&range.x);
+ le16_to_cpus(&range.y);
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T9_ORIENT,
+ 1, &orient);
+ if (error)
+ return error;
+
+ /* Handle default values */
+ if (range.x == 0)
+ range.x = 1023;
+
+ if (range.y == 0)
+ range.y = 1023;
+
+ if (orient & MXT_T9_ORIENT_SWITCH) {
+ data->max_x = range.y;
+ data->max_y = range.x;
+ } else {
+ data->max_x = range.x;
+ data->max_y = range.y;
+ }
+
+ dev_dbg(&client->dev,
+ "Touchscreen size X%uY%u\n", data->max_x, data->max_y);
+
+ return 0;
+}
+
static int mxt_initialize(struct mxt_data *data)
{
struct i2c_client *client = data->client;
struct mxt_info *info = &data->info;
int error;
- u8 val;
error = mxt_get_info(data);
if (error)
@@ -832,47 +1090,29 @@ static int mxt_initialize(struct mxt_data *data)
/* Get object table information */
error = mxt_get_object_table(data);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Error %d reading object table\n", error);
goto err_free_object_table;
+ }
/* Check register init values */
error = mxt_check_reg_init(data);
- if (error)
- goto err_free_object_table;
-
- mxt_handle_pdata(data);
-
- /* Backup to memory */
- mxt_write_object(data, MXT_GEN_COMMAND_T6,
- MXT_COMMAND_BACKUPNV,
- MXT_BACKUP_VALUE);
- msleep(MXT_BACKUP_TIME);
-
- /* Soft reset */
- mxt_write_object(data, MXT_GEN_COMMAND_T6,
- MXT_COMMAND_RESET, 1);
- msleep(MXT_RESET_TIME);
-
- /* Update matrix size at info struct */
- error = mxt_read_reg(client, MXT_MATRIX_X_SIZE, &val);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Error %d initializing configuration\n",
+ error);
goto err_free_object_table;
- info->matrix_xsize = val;
+ }
- error = mxt_read_reg(client, MXT_MATRIX_Y_SIZE, &val);
- if (error)
+ error = mxt_read_t9_resolution(data);
+ if (error) {
+ dev_err(&client->dev, "Failed to initialize T9 resolution\n");
goto err_free_object_table;
- info->matrix_ysize = val;
-
- dev_info(&client->dev,
- "Family ID: %u Variant ID: %u Major.Minor.Build: %u.%u.%02X\n",
- info->family_id, info->variant_id, info->version >> 4,
- info->version & 0xf, info->build);
+ }
dev_info(&client->dev,
- "Matrix X Size: %u Matrix Y Size: %u Object Num: %u\n",
- info->matrix_xsize, info->matrix_ysize,
- info->object_num);
+ "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
+ info->family_id, info->variant_id, info->version >> 4,
+ info->version & 0xf, info->build, info->object_num);
return 0;
@@ -881,20 +1121,6 @@ err_free_object_table:
return error;
}
-static void mxt_calc_resolution(struct mxt_data *data)
-{
- unsigned int max_x = data->pdata->x_size - 1;
- unsigned int max_y = data->pdata->y_size - 1;
-
- if (data->pdata->orient & MXT_XY_SWITCH) {
- data->max_x = max_y;
- data->max_y = max_x;
- } else {
- data->max_x = max_x;
- data->max_y = max_y;
- }
-}
-
/* Firmware Version is returned as Major.Minor.Build */
static ssize_t mxt_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -921,11 +1147,11 @@ static ssize_t mxt_show_instance(char *buf, int count,
{
int i;
- if (object->instances > 0)
+ if (mxt_obj_instances(object) > 1)
count += scnprintf(buf + count, PAGE_SIZE - count,
"Instance %u\n", instance);
- for (i = 0; i < object->size + 1; i++)
+ for (i = 0; i < mxt_obj_size(object); i++)
count += scnprintf(buf + count, PAGE_SIZE - count,
"\t[%2u]: %02x (%d)\n", i, val[i], val[i]);
count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
@@ -958,8 +1184,8 @@ static ssize_t mxt_object_show(struct device *dev,
count += scnprintf(buf + count, PAGE_SIZE - count,
"T%u:\n", object->type);
- for (j = 0; j < object->instances + 1; j++) {
- u16 size = object->size + 1;
+ for (j = 0; j < mxt_obj_instances(object); j++) {
+ u16 size = mxt_obj_size(object);
u16 addr = object->start_address + j * size;
error = __mxt_read_reg(data->client, addr, size, obuf);
@@ -975,13 +1201,38 @@ done:
return error ?: count;
}
+static int mxt_check_firmware_format(struct device *dev,
+ const struct firmware *fw)
+{
+ unsigned int pos = 0;
+ char c;
+
+ while (pos < fw->size) {
+ c = *(fw->data + pos);
+
+ if (c < '0' || (c > '9' && c < 'A') || c > 'F')
+ return 0;
+
+ pos++;
+ }
+
+ /*
+ * To convert file try:
+ * xxd -r -p mXTXXX__APP_VX-X-XX.enc > maxtouch.fw
+ */
+ dev_err(dev, "Aborting: firmware file must be in binary format\n");
+
+ return -EINVAL;
+}
+
static int mxt_load_fw(struct device *dev, const char *fn)
{
struct mxt_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
const struct firmware *fw = NULL;
unsigned int frame_size;
unsigned int pos = 0;
+ unsigned int retry = 0;
+ unsigned int frame = 0;
int ret;
ret = request_firmware(&fw, fn, dev);
@@ -990,59 +1241,91 @@ static int mxt_load_fw(struct device *dev, const char *fn)
return ret;
}
+ /* Check for incorrect enc file */
+ ret = mxt_check_firmware_format(dev, fw);
+ if (ret)
+ goto release_firmware;
+
+ ret = mxt_lookup_bootloader_address(data);
+ if (ret)
+ goto release_firmware;
+
/* Change to the bootloader mode */
- mxt_write_object(data, MXT_GEN_COMMAND_T6,
- MXT_COMMAND_RESET, MXT_BOOT_VALUE);
+ data->in_bootloader = true;
+
+ ret = mxt_t6_command(data, MXT_COMMAND_RESET, MXT_BOOT_VALUE, false);
+ if (ret)
+ goto release_firmware;
+
msleep(MXT_RESET_TIME);
- /* Change to slave address of bootloader */
- if (client->addr == MXT_APP_LOW)
- client->addr = MXT_BOOT_LOW;
- else
- client->addr = MXT_BOOT_HIGH;
+ reinit_completion(&data->bl_completion);
- ret = mxt_check_bootloader(client, MXT_WAITING_BOOTLOAD_CMD);
+ ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD);
if (ret)
- goto out;
+ goto disable_irq;
/* Unlock bootloader */
- mxt_unlock_bootloader(client);
+ mxt_unlock_bootloader(data);
while (pos < fw->size) {
- ret = mxt_check_bootloader(client,
- MXT_WAITING_FRAME_DATA);
+ ret = mxt_check_bootloader(data, MXT_WAITING_FRAME_DATA);
if (ret)
- goto out;
+ goto disable_irq;
frame_size = ((*(fw->data + pos) << 8) | *(fw->data + pos + 1));
- /* We should add 2 at frame size as the the firmware data is not
- * included the CRC bytes.
- */
+ /* Take account of CRC bytes */
frame_size += 2;
/* Write one frame to device */
- mxt_fw_write(client, fw->data + pos, frame_size);
-
- ret = mxt_check_bootloader(client,
- MXT_FRAME_CRC_PASS);
+ ret = mxt_bootloader_write(data, fw->data + pos, frame_size);
if (ret)
- goto out;
+ goto disable_irq;
- pos += frame_size;
+ ret = mxt_check_bootloader(data, MXT_FRAME_CRC_PASS);
+ if (ret) {
+ retry++;
- dev_dbg(dev, "Updated %d bytes / %zd bytes\n", pos, fw->size);
+ /* Back off by 20ms per retry */
+ msleep(retry * 20);
+
+ if (retry > 20) {
+ dev_err(dev, "Retry count exceeded\n");
+ goto disable_irq;
+ }
+ } else {
+ retry = 0;
+ pos += frame_size;
+ frame++;
+ }
+
+ if (frame % 50 == 0)
+ dev_dbg(dev, "Sent %d frames, %d/%zd bytes\n",
+ frame, pos, fw->size);
}
-out:
- release_firmware(fw);
+ /* Wait for flash. */
+ ret = mxt_wait_for_completion(data, &data->bl_completion,
+ MXT_FW_RESET_TIME);
+ if (ret)
+ goto disable_irq;
- /* Change to slave address of application */
- if (client->addr == MXT_BOOT_LOW)
- client->addr = MXT_APP_LOW;
- else
- client->addr = MXT_APP_HIGH;
+ dev_dbg(dev, "Sent %d frames, %d bytes\n", frame, pos);
+ /*
+ * Wait for device to reset. Some bootloader versions do not assert
+ * the CHG line after bootloading has finished, so ignore potential
+ * errors.
+ */
+ mxt_wait_for_completion(data, &data->bl_completion, MXT_FW_RESET_TIME);
+
+ data->in_bootloader = false;
+
+disable_irq:
+ disable_irq(data->irq);
+release_firmware:
+ release_firmware(fw);
return ret;
}
@@ -1053,28 +1336,23 @@ static ssize_t mxt_update_fw_store(struct device *dev,
struct mxt_data *data = dev_get_drvdata(dev);
int error;
- disable_irq(data->irq);
-
error = mxt_load_fw(dev, MXT_FW_NAME);
if (error) {
dev_err(dev, "The firmware update failed(%d)\n", error);
count = error;
} else {
- dev_dbg(dev, "The firmware update succeeded\n");
-
- /* Wait for reset */
- msleep(MXT_FWRESET_TIME);
+ dev_info(dev, "The firmware update succeeded\n");
mxt_free_object_table(data);
mxt_initialize(data);
- }
- enable_irq(data->irq);
+ enable_irq(data->irq);
- error = mxt_make_highchg(data);
- if (error)
- return error;
+ error = mxt_make_highchg(data);
+ if (error)
+ return error;
+ }
return count;
}
@@ -1134,6 +1412,8 @@ static int mxt_probe(struct i2c_client *client,
struct input_dev *input_dev;
int error;
unsigned int num_mt_slots;
+ unsigned int mt_flags = 0;
+ int i;
if (!pdata)
return -EINVAL;
@@ -1146,10 +1426,7 @@ static int mxt_probe(struct i2c_client *client,
goto err_free_mem;
}
- data->is_tp = pdata && pdata->is_tp;
-
- input_dev->name = (data->is_tp) ? "Atmel maXTouch Touchpad" :
- "Atmel maXTouch Touchscreen";
+ input_dev->name = "Atmel maXTouch Touchscreen";
snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0",
client->adapter->nr, client->addr);
@@ -1165,7 +1442,9 @@ static int mxt_probe(struct i2c_client *client,
data->pdata = pdata;
data->irq = client->irq;
- mxt_calc_resolution(data);
+ init_completion(&data->bl_completion);
+ init_completion(&data->reset_completion);
+ init_completion(&data->crc_completion);
error = mxt_initialize(data);
if (error)
@@ -1175,20 +1454,15 @@ static int mxt_probe(struct i2c_client *client,
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
- if (data->is_tp) {
- int i;
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+ if (pdata->t19_num_keys) {
__set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
- for (i = 0; i < MXT_NUM_GPIO; i++)
- if (pdata->key_map[i] != KEY_RESERVED)
- __set_bit(pdata->key_map[i], input_dev->keybit);
+ for (i = 0; i < pdata->t19_num_keys; i++)
+ if (pdata->t19_keymap[i] != KEY_RESERVED)
+ input_set_capability(input_dev, EV_KEY,
+ pdata->t19_keymap[i]);
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
- __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
- __set_bit(BTN_TOOL_QUINTTAP, input_dev->keybit);
+ mt_flags |= INPUT_MT_POINTER;
input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM);
input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM);
@@ -1196,6 +1470,8 @@ static int mxt_probe(struct i2c_client *client,
MXT_PIXELS_PER_MM);
input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
MXT_PIXELS_PER_MM);
+
+ input_dev->name = "Atmel maXTouch Touchpad";
}
/* For single touch */
@@ -1208,7 +1484,7 @@ static int mxt_probe(struct i2c_client *client,
/* For multi touch */
num_mt_slots = data->T9_reportid_max - data->T9_reportid_min + 1;
- error = input_mt_init_slots(input_dev, num_mt_slots, 0);
+ error = input_mt_init_slots(input_dev, num_mt_slots, mt_flags);
if (error)
goto err_free_object;
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
@@ -1236,12 +1512,18 @@ static int mxt_probe(struct i2c_client *client,
goto err_free_irq;
error = input_register_device(input_dev);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Error %d registering input device\n",
+ error);
goto err_free_irq;
+ }
error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Failure %d creating sysfs group\n",
+ error);
goto err_unregister_device;
+ }
return 0;
@@ -1294,11 +1576,7 @@ static int mxt_resume(struct device *dev)
struct mxt_data *data = i2c_get_clientdata(client);
struct input_dev *input_dev = data->input_dev;
- /* Soft reset */
- mxt_write_object(data, MXT_GEN_COMMAND_T6,
- MXT_COMMAND_RESET, 1);
-
- msleep(MXT_RESET_TIME);
+ mxt_soft_reset(data);
mutex_lock(&input_dev->mutex);
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index d3f9f6b0f9b7..7f3c94787787 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -679,7 +679,7 @@ static const struct i2c_device_id auo_pixcir_idtable[] = {
MODULE_DEVICE_TABLE(i2c, auo_pixcir_idtable);
#ifdef CONFIG_OF
-static struct of_device_id auo_pixcir_ts_dt_idtable[] = {
+static const struct of_device_id auo_pixcir_ts_dt_idtable[] = {
{ .compatible = "auo,auo_pixcir_ts" },
{},
};
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 8ccf7bb4028a..cf6f4b31db4d 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -301,10 +301,11 @@ static int da9034_touch_probe(struct platform_device *pdev)
struct da9034_touch_pdata *pdata = dev_get_platdata(&pdev->dev);
struct da9034_touch *touch;
struct input_dev *input_dev;
- int ret;
+ int error;
- touch = kzalloc(sizeof(struct da9034_touch), GFP_KERNEL);
- if (touch == NULL) {
+ touch = devm_kzalloc(&pdev->dev, sizeof(struct da9034_touch),
+ GFP_KERNEL);
+ if (!touch) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
@@ -315,18 +316,18 @@ static int da9034_touch_probe(struct platform_device *pdev)
touch->interval_ms = pdata->interval_ms;
touch->x_inverted = pdata->x_inverted;
touch->y_inverted = pdata->y_inverted;
- } else
+ } else {
/* fallback into default */
touch->interval_ms = 10;
+ }
INIT_DELAYED_WORK(&touch->tsi_work, da9034_tsi_work);
touch->notifier.notifier_call = da9034_touch_notifier;
- input_dev = input_allocate_device();
+ input_dev = devm_input_allocate_device(&pdev->dev);
if (!input_dev) {
dev_err(&pdev->dev, "failed to allocate input device\n");
- ret = -ENOMEM;
- goto err_free_touch;
+ return -ENOMEM;
}
input_dev->name = pdev->name;
@@ -346,26 +347,9 @@ static int da9034_touch_probe(struct platform_device *pdev)
touch->input_dev = input_dev;
input_set_drvdata(input_dev, touch);
- ret = input_register_device(input_dev);
- if (ret)
- goto err_free_input;
-
- platform_set_drvdata(pdev, touch);
- return 0;
-
-err_free_input:
- input_free_device(input_dev);
-err_free_touch:
- kfree(touch);
- return ret;
-}
-
-static int da9034_touch_remove(struct platform_device *pdev)
-{
- struct da9034_touch *touch = platform_get_drvdata(pdev);
-
- input_unregister_device(touch->input_dev);
- kfree(touch);
+ error = input_register_device(input_dev);
+ if (error)
+ return error;
return 0;
}
@@ -376,7 +360,6 @@ static struct platform_driver da9034_touch_driver = {
.owner = THIS_MODULE,
},
.probe = da9034_touch_probe,
- .remove = da9034_touch_remove,
};
module_platform_driver(da9034_touch_driver);
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index f8815bebc9ef..d4f33992ad8c 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -271,7 +271,7 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
wrbuf[0] = addr;
wrbuf[1] = value;
- return edt_ft5x06_ts_readwrite(tsdata->client, 3,
+ return edt_ft5x06_ts_readwrite(tsdata->client, 2,
wrbuf, 0, NULL);
default:
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index e6bcb13680b2..c8057847d71d 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -262,7 +262,7 @@ static int egalax_ts_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
-static struct of_device_id egalax_ts_dt_ids[] = {
+static const struct of_device_id egalax_ts_dt_ids[] = {
{ .compatible = "eeti,egalax_ts" },
{ /* sentinel */ }
};
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 4f6b156144e9..c38ca4a7e386 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -36,6 +36,7 @@
#include <linux/irq.h>
#include <linux/delay.h>
#include <asm/intel_scu_ipc.h>
+#include <linux/device.h>
/* PMIC Interrupt registers */
#define PMIC_REG_ID1 0x00 /* PMIC ID1 register */
@@ -580,12 +581,17 @@ static int mrstouch_probe(struct platform_device *pdev)
return -EINVAL;
}
- tsdev = kzalloc(sizeof(struct mrstouch_dev), GFP_KERNEL);
- input = input_allocate_device();
- if (!tsdev || !input) {
+ tsdev = devm_kzalloc(&pdev->dev, sizeof(struct mrstouch_dev),
+ GFP_KERNEL);
+ if (!tsdev) {
dev_err(&pdev->dev, "unable to allocate memory\n");
- err = -ENOMEM;
- goto err_free_mem;
+ return -ENOMEM;
+ }
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input) {
+ dev_err(&pdev->dev, "unable to allocate input device\n");
+ return -ENOMEM;
}
tsdev->dev = &pdev->dev;
@@ -598,7 +604,7 @@ static int mrstouch_probe(struct platform_device *pdev)
err = mrstouch_adc_init(tsdev);
if (err) {
dev_err(&pdev->dev, "ADC initialization failed\n");
- goto err_free_mem;
+ return err;
}
input->name = "mrst_touchscreen";
@@ -618,38 +624,20 @@ static int mrstouch_probe(struct platform_device *pdev)
input_set_abs_params(tsdev->input, ABS_PRESSURE,
MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
- err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
- IRQF_ONESHOT, "mrstouch", tsdev);
+ err = devm_request_threaded_irq(&pdev->dev, tsdev->irq, NULL,
+ mrstouch_pendet_irq, IRQF_ONESHOT,
+ "mrstouch", tsdev);
if (err) {
dev_err(tsdev->dev, "unable to allocate irq\n");
- goto err_free_mem;
+ return err;
}
err = input_register_device(tsdev->input);
if (err) {
dev_err(tsdev->dev, "unable to register input device\n");
- goto err_free_irq;
+ return err;
}
- platform_set_drvdata(pdev, tsdev);
- return 0;
-
-err_free_irq:
- free_irq(tsdev->irq, tsdev);
-err_free_mem:
- input_free_device(input);
- kfree(tsdev);
- return err;
-}
-
-static int mrstouch_remove(struct platform_device *pdev)
-{
- struct mrstouch_dev *tsdev = platform_get_drvdata(pdev);
-
- free_irq(tsdev->irq, tsdev);
- input_unregister_device(tsdev->input);
- kfree(tsdev);
-
return 0;
}
@@ -659,7 +647,6 @@ static struct platform_driver mrstouch_driver = {
.owner = THIS_MODULE,
},
.probe = mrstouch_probe,
- .remove = mrstouch_remove,
};
module_platform_driver(mrstouch_driver);
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 2058253b55d9..bb47d3442a35 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -384,7 +384,7 @@ static const struct dev_pm_ops lpc32xx_ts_pm_ops = {
#endif
#ifdef CONFIG_OF
-static struct of_device_id lpc32xx_tsc_of_match[] = {
+static const struct of_device_id lpc32xx_tsc_of_match[] = {
{ .compatible = "nxp,lpc3220-tsc", },
{ },
};
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index 647e36f5930e..00510a9836b3 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -161,10 +161,9 @@ static irqreturn_t mcs5000_ts_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data)
+static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data,
+ const struct mcs_platform_data *platform_data)
{
- const struct mcs_platform_data *platform_data =
- data->platform_data;
struct i2c_client *client = data->client;
/* Touch reset & sleep mode */
@@ -187,28 +186,32 @@ static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data)
}
static int mcs5000_ts_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
+ const struct mcs_platform_data *pdata;
struct mcs5000_ts_data *data;
struct input_dev *input_dev;
- int ret;
+ int error;
- if (!dev_get_platdata(&client->dev))
+ pdata = dev_get_platdata(&client->dev);
+ if (!pdata)
return -EINVAL;
- data = kzalloc(sizeof(struct mcs5000_ts_data), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!data || !input_dev) {
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
dev_err(&client->dev, "Failed to allocate memory\n");
- ret = -ENOMEM;
- goto err_free_mem;
+ return -ENOMEM;
}
data->client = client;
- data->input_dev = input_dev;
- data->platform_data = dev_get_platdata(&client->dev);
- input_dev->name = "MELPAS MCS-5000 Touchscreen";
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ input_dev->name = "MELFAS MCS-5000 Touchscreen";
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
@@ -219,44 +222,30 @@ static int mcs5000_ts_probe(struct i2c_client *client,
input_set_abs_params(input_dev, ABS_Y, 0, MCS5000_MAX_YC, 0, 0);
input_set_drvdata(input_dev, data);
+ data->input_dev = input_dev;
- if (data->platform_data->cfg_pin)
- data->platform_data->cfg_pin();
-
- ret = request_threaded_irq(client->irq, NULL, mcs5000_ts_interrupt,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT, "mcs5000_ts", data);
+ if (pdata->cfg_pin)
+ pdata->cfg_pin();
- if (ret < 0) {
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, mcs5000_ts_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "mcs5000_ts", data);
+ if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_mem;
+ return error;
}
- ret = input_register_device(data->input_dev);
- if (ret < 0)
- goto err_free_irq;
+ error = input_register_device(data->input_dev);
+ if (error) {
+ dev_err(&client->dev, "Failed to register input device\n");
+ return error;
+ }
- mcs5000_ts_phys_init(data);
+ mcs5000_ts_phys_init(data, pdata);
i2c_set_clientdata(client, data);
return 0;
-
-err_free_irq:
- free_irq(client->irq, data);
-err_free_mem:
- input_free_device(input_dev);
- kfree(data);
- return ret;
-}
-
-static int mcs5000_ts_remove(struct i2c_client *client)
-{
- struct mcs5000_ts_data *data = i2c_get_clientdata(client);
-
- free_irq(client->irq, data);
- input_unregister_device(data->input_dev);
- kfree(data);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -274,14 +263,15 @@ static int mcs5000_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mcs5000_ts_data *data = i2c_get_clientdata(client);
+ const struct mcs_platform_data *pdata = dev_get_platdata(dev);
- mcs5000_ts_phys_init(data);
+ mcs5000_ts_phys_init(data, pdata);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, mcs5000_ts_suspend, mcs5000_ts_resume);
-#endif
static const struct i2c_device_id mcs5000_ts_id[] = {
{ "mcs5000_ts", 0 },
@@ -291,12 +281,9 @@ MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id);
static struct i2c_driver mcs5000_ts_driver = {
.probe = mcs5000_ts_probe,
- .remove = mcs5000_ts_remove,
.driver = {
.name = "mcs5000_ts",
-#ifdef CONFIG_PM
.pm = &mcs5000_ts_pm,
-#endif
},
.id_table = mcs5000_ts_id,
};
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 8a598c065391..372bbf7658fe 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -456,7 +456,7 @@ static int mms114_probe(struct i2c_client *client,
data->input_dev = input_dev;
data->pdata = pdata;
- input_dev->name = "MELPAS MMS114 Touchscreen";
+ input_dev->name = "MELFAS MMS114 Touchscreen";
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
input_dev->open = mms114_input_open;
@@ -570,7 +570,7 @@ static const struct i2c_device_id mms114_id[] = {
MODULE_DEVICE_TABLE(i2c, mms114_id);
#ifdef CONFIG_OF
-static struct of_device_id mms114_dt_match[] = {
+static const struct of_device_id mms114_dt_match[] = {
{ .compatible = "melfas,mms114" },
{ }
};
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
new file mode 100644
index 000000000000..f8f9b84230b1
--- /dev/null
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -0,0 +1,45 @@
+/*
+ * Generic DT helper functions for touchscreen devices
+ *
+ * Copyright (c) 2014 Sebastian Reichel <sre@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/of.h>
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
+
+/**
+ * touchscreen_parse_of_params - parse common touchscreen DT properties
+ * @dev: device that should be parsed
+ *
+ * This function parses common DT properties for touchscreens and setups the
+ * input device accordingly. The function keeps previously setuped default
+ * values if no value is specified via DT.
+ */
+void touchscreen_parse_of_params(struct input_dev *dev)
+{
+ struct device_node *np = dev->dev.parent->of_node;
+ struct input_absinfo *absinfo;
+
+ input_alloc_absinfo(dev);
+ if (!dev->absinfo)
+ return;
+
+ absinfo = &dev->absinfo[ABS_X];
+ of_property_read_u32(np, "touchscreen-size-x", &absinfo->maximum);
+ of_property_read_u32(np, "touchscreen-fuzz-x", &absinfo->fuzz);
+
+ absinfo = &dev->absinfo[ABS_Y];
+ of_property_read_u32(np, "touchscreen-size-y", &absinfo->maximum);
+ of_property_read_u32(np, "touchscreen-fuzz-y", &absinfo->fuzz);
+
+ absinfo = &dev->absinfo[ABS_PRESSURE];
+ of_property_read_u32(np, "touchscreen-max-pressure", &absinfo->maximum);
+ of_property_read_u32(np, "touchscreen-fuzz-pressure", &absinfo->fuzz);
+}
+EXPORT_SYMBOL(touchscreen_parse_of_params);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 02392d2061d6..19c6c0fdc94b 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -24,12 +24,13 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/pixcir_ts.h>
+#include <linux/gpio.h>
struct pixcir_i2c_ts_data {
struct i2c_client *client;
struct input_dev *input;
const struct pixcir_ts_platform_data *chip;
- bool exiting;
+ bool running;
};
static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data)
@@ -87,11 +88,12 @@ static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data)
static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
{
struct pixcir_i2c_ts_data *tsdata = dev_id;
+ const struct pixcir_ts_platform_data *pdata = tsdata->chip;
- while (!tsdata->exiting) {
+ while (tsdata->running) {
pixcir_ts_poscheck(tsdata);
- if (tsdata->chip->attb_read_val())
+ if (gpio_get_value(pdata->gpio_attb))
break;
msleep(20);
@@ -100,25 +102,221 @@ static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int pixcir_set_power_mode(struct pixcir_i2c_ts_data *ts,
+ enum pixcir_power_mode mode)
+{
+ struct device *dev = &ts->client->dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(ts->client, PIXCIR_REG_POWER_MODE);
+ if (ret < 0) {
+ dev_err(dev, "%s: can't read reg 0x%x : %d\n",
+ __func__, PIXCIR_REG_POWER_MODE, ret);
+ return ret;
+ }
+
+ ret &= ~PIXCIR_POWER_MODE_MASK;
+ ret |= mode;
+
+ /* Always AUTO_IDLE */
+ ret |= PIXCIR_POWER_ALLOW_IDLE;
+
+ ret = i2c_smbus_write_byte_data(ts->client, PIXCIR_REG_POWER_MODE, ret);
+ if (ret < 0) {
+ dev_err(dev, "%s: can't write reg 0x%x : %d\n",
+ __func__, PIXCIR_REG_POWER_MODE, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Set the interrupt mode for the device i.e. ATTB line behaviour
+ *
+ * @polarity : 1 for active high, 0 for active low.
+ */
+static int pixcir_set_int_mode(struct pixcir_i2c_ts_data *ts,
+ enum pixcir_int_mode mode, bool polarity)
+{
+ struct device *dev = &ts->client->dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(ts->client, PIXCIR_REG_INT_MODE);
+ if (ret < 0) {
+ dev_err(dev, "%s: can't read reg 0x%x : %d\n",
+ __func__, PIXCIR_REG_INT_MODE, ret);
+ return ret;
+ }
+
+ ret &= ~PIXCIR_INT_MODE_MASK;
+ ret |= mode;
+
+ if (polarity)
+ ret |= PIXCIR_INT_POL_HIGH;
+ else
+ ret &= ~PIXCIR_INT_POL_HIGH;
+
+ ret = i2c_smbus_write_byte_data(ts->client, PIXCIR_REG_INT_MODE, ret);
+ if (ret < 0) {
+ dev_err(dev, "%s: can't write reg 0x%x : %d\n",
+ __func__, PIXCIR_REG_INT_MODE, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Enable/disable interrupt generation
+ */
+static int pixcir_int_enable(struct pixcir_i2c_ts_data *ts, bool enable)
+{
+ struct device *dev = &ts->client->dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(ts->client, PIXCIR_REG_INT_MODE);
+ if (ret < 0) {
+ dev_err(dev, "%s: can't read reg 0x%x : %d\n",
+ __func__, PIXCIR_REG_INT_MODE, ret);
+ return ret;
+ }
+
+ if (enable)
+ ret |= PIXCIR_INT_ENABLE;
+ else
+ ret &= ~PIXCIR_INT_ENABLE;
+
+ ret = i2c_smbus_write_byte_data(ts->client, PIXCIR_REG_INT_MODE, ret);
+ if (ret < 0) {
+ dev_err(dev, "%s: can't write reg 0x%x : %d\n",
+ __func__, PIXCIR_REG_INT_MODE, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pixcir_start(struct pixcir_i2c_ts_data *ts)
+{
+ struct device *dev = &ts->client->dev;
+ int error;
+
+ /* LEVEL_TOUCH interrupt with active low polarity */
+ error = pixcir_set_int_mode(ts, PIXCIR_INT_LEVEL_TOUCH, 0);
+ if (error) {
+ dev_err(dev, "Failed to set interrupt mode: %d\n", error);
+ return error;
+ }
+
+ ts->running = true;
+ mb(); /* Update status before IRQ can fire */
+
+ /* enable interrupt generation */
+ error = pixcir_int_enable(ts, true);
+ if (error) {
+ dev_err(dev, "Failed to enable interrupt generation: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int pixcir_stop(struct pixcir_i2c_ts_data *ts)
+{
+ int error;
+
+ /* Disable interrupt generation */
+ error = pixcir_int_enable(ts, false);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "Failed to disable interrupt generation: %d\n",
+ error);
+ return error;
+ }
+
+ /* Exit ISR if running, no more report parsing */
+ ts->running = false;
+ mb(); /* update status before we synchronize irq */
+
+ /* Wait till running ISR is complete */
+ synchronize_irq(ts->client->irq);
+
+ return 0;
+}
+
+static int pixcir_input_open(struct input_dev *dev)
+{
+ struct pixcir_i2c_ts_data *ts = input_get_drvdata(dev);
+
+ return pixcir_start(ts);
+}
+
+static void pixcir_input_close(struct input_dev *dev)
+{
+ struct pixcir_i2c_ts_data *ts = input_get_drvdata(dev);
+
+ pixcir_stop(ts);
+}
+
#ifdef CONFIG_PM_SLEEP
static int pixcir_i2c_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ struct pixcir_i2c_ts_data *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+
+ if (device_may_wakeup(&client->dev)) {
+ if (!input->users) {
+ ret = pixcir_start(ts);
+ if (ret) {
+ dev_err(dev, "Failed to start\n");
+ goto unlock;
+ }
+ }
- if (device_may_wakeup(&client->dev))
enable_irq_wake(client->irq);
+ } else if (input->users) {
+ ret = pixcir_stop(ts);
+ }
- return 0;
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
}
static int pixcir_i2c_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ struct pixcir_i2c_ts_data *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
- if (device_may_wakeup(&client->dev))
+ if (device_may_wakeup(&client->dev)) {
disable_irq_wake(client->irq);
- return 0;
+ if (!input->users) {
+ ret = pixcir_stop(ts);
+ if (ret) {
+ dev_err(dev, "Failed to stop\n");
+ goto unlock;
+ }
+ }
+ } else if (input->users) {
+ ret = pixcir_start(ts);
+ }
+
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
}
#endif
@@ -130,6 +328,7 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
{
const struct pixcir_ts_platform_data *pdata =
dev_get_platdata(&client->dev);
+ struct device *dev = &client->dev;
struct pixcir_i2c_ts_data *tsdata;
struct input_dev *input;
int error;
@@ -139,12 +338,19 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
return -EINVAL;
}
- tsdata = kzalloc(sizeof(*tsdata), GFP_KERNEL);
- input = input_allocate_device();
- if (!tsdata || !input) {
- dev_err(&client->dev, "Failed to allocate driver data!\n");
- error = -ENOMEM;
- goto err_free_mem;
+ if (!gpio_is_valid(pdata->gpio_attb)) {
+ dev_err(dev, "Invalid gpio_attb in pdata\n");
+ return -EINVAL;
+ }
+
+ tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
+ if (!tsdata)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "Failed to allocate input device\n");
+ return -ENOMEM;
}
tsdata->client = client;
@@ -153,6 +359,8 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
input->name = client->name;
input->id.bustype = BUS_I2C;
+ input->open = pixcir_input_open;
+ input->close = pixcir_input_close;
input->dev.parent = &client->dev;
__set_bit(EV_KEY, input->evbit);
@@ -165,44 +373,47 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
input_set_drvdata(input, tsdata);
- error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->name, tsdata);
+ error = devm_gpio_request_one(dev, pdata->gpio_attb,
+ GPIOF_DIR_IN, "pixcir_i2c_attb");
if (error) {
- dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
- goto err_free_mem;
+ dev_err(dev, "Failed to request ATTB gpio\n");
+ return error;
}
+ error = devm_request_threaded_irq(dev, client->irq, NULL, pixcir_ts_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->name, tsdata);
+ if (error) {
+ dev_err(dev, "failed to request irq %d\n", client->irq);
+ return error;
+ }
+
+ /* Always be in IDLE mode to save power, device supports auto wake */
+ error = pixcir_set_power_mode(tsdata, PIXCIR_POWER_IDLE);
+ if (error) {
+ dev_err(dev, "Failed to set IDLE mode\n");
+ return error;
+ }
+
+ /* Stop device till opened */
+ error = pixcir_stop(tsdata);
+ if (error)
+ return error;
+
error = input_register_device(input);
if (error)
- goto err_free_irq;
+ return error;
i2c_set_clientdata(client, tsdata);
device_init_wakeup(&client->dev, 1);
return 0;
-
-err_free_irq:
- free_irq(client->irq, tsdata);
-err_free_mem:
- input_free_device(input);
- kfree(tsdata);
- return error;
}
static int pixcir_i2c_ts_remove(struct i2c_client *client)
{
- struct pixcir_i2c_ts_data *tsdata = i2c_get_clientdata(client);
-
device_init_wakeup(&client->dev, 0);
- tsdata->exiting = true;
- mb();
- free_irq(client->irq, tsdata);
-
- input_unregister_device(tsdata->input);
- kfree(tsdata);
-
return 0;
}
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
new file mode 100644
index 000000000000..2ba826024954
--- /dev/null
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -0,0 +1,339 @@
+/*
+ * Allwinner sunxi resistive touchscreen controller driver
+ *
+ * Copyright (C) 2013 - 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * The hwmon parts are based on work by Corentin LABBE which is:
+ * Copyright (C) 2013 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * The sun4i-ts controller is capable of detecting a second touch, but when a
+ * second touch is present then the accuracy becomes so bad the reported touch
+ * location is not useable.
+ *
+ * The original android driver contains some complicated heuristics using the
+ * aprox. distance between the 2 touches to see if the user is making a pinch
+ * open / close movement, and then reports emulated multi-touch events around
+ * the last touch coordinate (as the dual-touch coordinates are worthless).
+ *
+ * These kinds of heuristics are just asking for trouble (and don't belong
+ * in the kernel). So this driver offers straight forward, reliable single
+ * touch functionality only.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define TP_CTRL0 0x00
+#define TP_CTRL1 0x04
+#define TP_CTRL2 0x08
+#define TP_CTRL3 0x0c
+#define TP_INT_FIFOC 0x10
+#define TP_INT_FIFOS 0x14
+#define TP_TPR 0x18
+#define TP_CDAT 0x1c
+#define TEMP_DATA 0x20
+#define TP_DATA 0x24
+
+/* TP_CTRL0 bits */
+#define ADC_FIRST_DLY(x) ((x) << 24) /* 8 bits */
+#define ADC_FIRST_DLY_MODE(x) ((x) << 23)
+#define ADC_CLK_SEL(x) ((x) << 22)
+#define ADC_CLK_DIV(x) ((x) << 20) /* 3 bits */
+#define FS_DIV(x) ((x) << 16) /* 4 bits */
+#define T_ACQ(x) ((x) << 0) /* 16 bits */
+
+/* TP_CTRL1 bits */
+#define STYLUS_UP_DEBOUN(x) ((x) << 12) /* 8 bits */
+#define STYLUS_UP_DEBOUN_EN(x) ((x) << 9)
+#define TOUCH_PAN_CALI_EN(x) ((x) << 6)
+#define TP_DUAL_EN(x) ((x) << 5)
+#define TP_MODE_EN(x) ((x) << 4)
+#define TP_ADC_SELECT(x) ((x) << 3)
+#define ADC_CHAN_SELECT(x) ((x) << 0) /* 3 bits */
+
+/* TP_CTRL2 bits */
+#define TP_SENSITIVE_ADJUST(x) ((x) << 28) /* 4 bits */
+#define TP_MODE_SELECT(x) ((x) << 26) /* 2 bits */
+#define PRE_MEA_EN(x) ((x) << 24)
+#define PRE_MEA_THRE_CNT(x) ((x) << 0) /* 24 bits */
+
+/* TP_CTRL3 bits */
+#define FILTER_EN(x) ((x) << 2)
+#define FILTER_TYPE(x) ((x) << 0) /* 2 bits */
+
+/* TP_INT_FIFOC irq and fifo mask / control bits */
+#define TEMP_IRQ_EN(x) ((x) << 18)
+#define OVERRUN_IRQ_EN(x) ((x) << 17)
+#define DATA_IRQ_EN(x) ((x) << 16)
+#define TP_DATA_XY_CHANGE(x) ((x) << 13)
+#define FIFO_TRIG(x) ((x) << 8) /* 5 bits */
+#define DATA_DRQ_EN(x) ((x) << 7)
+#define FIFO_FLUSH(x) ((x) << 4)
+#define TP_UP_IRQ_EN(x) ((x) << 1)
+#define TP_DOWN_IRQ_EN(x) ((x) << 0)
+
+/* TP_INT_FIFOS irq and fifo status bits */
+#define TEMP_DATA_PENDING BIT(18)
+#define FIFO_OVERRUN_PENDING BIT(17)
+#define FIFO_DATA_PENDING BIT(16)
+#define TP_IDLE_FLG BIT(2)
+#define TP_UP_PENDING BIT(1)
+#define TP_DOWN_PENDING BIT(0)
+
+/* TP_TPR bits */
+#define TEMP_ENABLE(x) ((x) << 16)
+#define TEMP_PERIOD(x) ((x) << 0) /* t = x * 256 * 16 / clkin */
+
+struct sun4i_ts_data {
+ struct device *dev;
+ struct input_dev *input;
+ void __iomem *base;
+ unsigned int irq;
+ bool ignore_fifo_data;
+ int temp_data;
+};
+
+static void sun4i_ts_irq_handle_input(struct sun4i_ts_data *ts, u32 reg_val)
+{
+ u32 x, y;
+
+ if (reg_val & FIFO_DATA_PENDING) {
+ x = readl(ts->base + TP_DATA);
+ y = readl(ts->base + TP_DATA);
+ /* The 1st location reported after an up event is unreliable */
+ if (!ts->ignore_fifo_data) {
+ input_report_abs(ts->input, ABS_X, x);
+ input_report_abs(ts->input, ABS_Y, y);
+ /*
+ * The hardware has a separate down status bit, but
+ * that gets set before we get the first location,
+ * resulting in reporting a click on the old location.
+ */
+ input_report_key(ts->input, BTN_TOUCH, 1);
+ input_sync(ts->input);
+ } else {
+ ts->ignore_fifo_data = false;
+ }
+ }
+
+ if (reg_val & TP_UP_PENDING) {
+ ts->ignore_fifo_data = true;
+ input_report_key(ts->input, BTN_TOUCH, 0);
+ input_sync(ts->input);
+ }
+}
+
+static irqreturn_t sun4i_ts_irq(int irq, void *dev_id)
+{
+ struct sun4i_ts_data *ts = dev_id;
+ u32 reg_val;
+
+ reg_val = readl(ts->base + TP_INT_FIFOS);
+
+ if (reg_val & TEMP_DATA_PENDING)
+ ts->temp_data = readl(ts->base + TEMP_DATA);
+
+ if (ts->input)
+ sun4i_ts_irq_handle_input(ts, reg_val);
+
+ writel(reg_val, ts->base + TP_INT_FIFOS);
+
+ return IRQ_HANDLED;
+}
+
+static int sun4i_ts_open(struct input_dev *dev)
+{
+ struct sun4i_ts_data *ts = input_get_drvdata(dev);
+
+ /* Flush, set trig level to 1, enable temp, data and up irqs */
+ writel(TEMP_IRQ_EN(1) | DATA_IRQ_EN(1) | FIFO_TRIG(1) | FIFO_FLUSH(1) |
+ TP_UP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
+
+ return 0;
+}
+
+static void sun4i_ts_close(struct input_dev *dev)
+{
+ struct sun4i_ts_data *ts = input_get_drvdata(dev);
+
+ /* Deactivate all input IRQs */
+ writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
+}
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct sun4i_ts_data *ts = dev_get_drvdata(dev);
+
+ /* No temp_data until the first irq */
+ if (ts->temp_data == -1)
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", (ts->temp_data - 1447) * 100);
+}
+
+static ssize_t show_temp_label(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return sprintf(buf, "SoC temperature\n");
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static DEVICE_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL);
+
+static struct attribute *sun4i_ts_attrs[] = {
+ &dev_attr_temp1_input.attr,
+ &dev_attr_temp1_label.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(sun4i_ts);
+
+static int sun4i_ts_probe(struct platform_device *pdev)
+{
+ struct sun4i_ts_data *ts;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device *hwmon;
+ int error;
+ bool ts_attached;
+
+ ts = devm_kzalloc(dev, sizeof(struct sun4i_ts_data), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->dev = dev;
+ ts->ignore_fifo_data = true;
+ ts->temp_data = -1;
+
+ ts_attached = of_property_read_bool(np, "allwinner,ts-attached");
+ if (ts_attached) {
+ ts->input = devm_input_allocate_device(dev);
+ if (!ts->input)
+ return -ENOMEM;
+
+ ts->input->name = pdev->name;
+ ts->input->phys = "sun4i_ts/input0";
+ ts->input->open = sun4i_ts_open;
+ ts->input->close = sun4i_ts_close;
+ ts->input->id.bustype = BUS_HOST;
+ ts->input->id.vendor = 0x0001;
+ ts->input->id.product = 0x0001;
+ ts->input->id.version = 0x0100;
+ ts->input->evbit[0] = BIT(EV_SYN) | BIT(EV_KEY) | BIT(EV_ABS);
+ __set_bit(BTN_TOUCH, ts->input->keybit);
+ input_set_abs_params(ts->input, ABS_X, 0, 4095, 0, 0);
+ input_set_abs_params(ts->input, ABS_Y, 0, 4095, 0, 0);
+ input_set_drvdata(ts->input, ts);
+ }
+
+ ts->base = devm_ioremap_resource(dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(ts->base))
+ return PTR_ERR(ts->base);
+
+ ts->irq = platform_get_irq(pdev, 0);
+ error = devm_request_irq(dev, ts->irq, sun4i_ts_irq, 0, "sun4i-ts", ts);
+ if (error)
+ return error;
+
+ /*
+ * Select HOSC clk, clkin = clk / 6, adc samplefreq = clkin / 8192,
+ * t_acq = clkin / (16 * 64)
+ */
+ writel(ADC_CLK_SEL(0) | ADC_CLK_DIV(2) | FS_DIV(7) | T_ACQ(63),
+ ts->base + TP_CTRL0);
+
+ /*
+ * sensitive_adjust = 15 : max, which is not all that sensitive,
+ * tp_mode = 0 : only x and y coordinates, as we don't use dual touch
+ */
+ writel(TP_SENSITIVE_ADJUST(15) | TP_MODE_SELECT(0),
+ ts->base + TP_CTRL2);
+
+ /* Enable median filter, type 1 : 5/3 */
+ writel(FILTER_EN(1) | FILTER_TYPE(1), ts->base + TP_CTRL3);
+
+ /* Enable temperature measurement, period 1953 (2 seconds) */
+ writel(TEMP_ENABLE(1) | TEMP_PERIOD(1953), ts->base + TP_TPR);
+
+ /*
+ * Set stylus up debounce to aprox 10 ms, enable debounce, and
+ * finally enable tp mode.
+ */
+ writel(STYLUS_UP_DEBOUN(5) | STYLUS_UP_DEBOUN_EN(1) | TP_MODE_EN(1),
+ ts->base + TP_CTRL1);
+
+ hwmon = devm_hwmon_device_register_with_groups(ts->dev, "sun4i_ts",
+ ts, sun4i_ts_groups);
+ if (IS_ERR(hwmon))
+ return PTR_ERR(hwmon);
+
+ writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
+
+ if (ts_attached) {
+ error = input_register_device(ts->input);
+ if (error) {
+ writel(0, ts->base + TP_INT_FIFOC);
+ return error;
+ }
+ }
+
+ platform_set_drvdata(pdev, ts);
+ return 0;
+}
+
+static int sun4i_ts_remove(struct platform_device *pdev)
+{
+ struct sun4i_ts_data *ts = platform_get_drvdata(pdev);
+
+ /* Explicit unregister to avoid open/close changing the imask later */
+ if (ts->input)
+ input_unregister_device(ts->input);
+
+ /* Deactivate all IRQs */
+ writel(0, ts->base + TP_INT_FIFOC);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_ts_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-ts", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sun4i_ts_of_match);
+
+static struct platform_driver sun4i_ts_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "sun4i-ts",
+ .of_match_table = of_match_ptr(sun4i_ts_of_match),
+ },
+ .probe = sun4i_ts_probe,
+ .remove = sun4i_ts_remove,
+};
+
+module_platform_driver(sun4i_ts_driver);
+
+MODULE_DESCRIPTION("Allwinner sun4i resistive touchscreen controller driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 550adcbbfc23..52380b68ebdf 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -25,11 +25,15 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/tsc2005.h>
+#include <linux/regulator/consumer.h>
/*
* The touchscreen interface operates as follows:
@@ -100,6 +104,11 @@
TSC2005_CFR2_AVG_7)
#define MAX_12BIT 0xfff
+#define TSC2005_DEF_X_FUZZ 4
+#define TSC2005_DEF_Y_FUZZ 8
+#define TSC2005_DEF_P_FUZZ 2
+#define TSC2005_DEF_RESISTOR 280
+
#define TSC2005_SPI_MAX_SPEED_HZ 10000000
#define TSC2005_PENUP_TIME_MS 40
@@ -143,6 +152,9 @@ struct tsc2005 {
bool pen_down;
+ struct regulator *vio;
+
+ int reset_gpio;
void (*set_reset)(bool enable);
};
@@ -337,6 +349,14 @@ static void tsc2005_stop_scan(struct tsc2005 *ts)
tsc2005_cmd(ts, TSC2005_CMD_STOP);
}
+static void tsc2005_set_reset(struct tsc2005 *ts, bool enable)
+{
+ if (ts->reset_gpio >= 0)
+ gpio_set_value(ts->reset_gpio, enable);
+ else if (ts->set_reset)
+ ts->set_reset(enable);
+}
+
/* must be called with ts->mutex held */
static void __tsc2005_disable(struct tsc2005 *ts)
{
@@ -355,7 +375,7 @@ static void __tsc2005_enable(struct tsc2005 *ts)
{
tsc2005_start_scan(ts);
- if (ts->esd_timeout && ts->set_reset) {
+ if (ts->esd_timeout && (ts->set_reset || ts->reset_gpio)) {
ts->last_valid_interrupt = jiffies;
schedule_delayed_work(&ts->esd_work,
round_jiffies_relative(
@@ -414,9 +434,9 @@ static ssize_t tsc2005_selftest_show(struct device *dev,
}
/* hardware reset */
- ts->set_reset(false);
+ tsc2005_set_reset(ts, false);
usleep_range(100, 500); /* only 10us required */
- ts->set_reset(true);
+ tsc2005_set_reset(ts, true);
if (!success)
goto out;
@@ -459,7 +479,7 @@ static umode_t tsc2005_attr_is_visible(struct kobject *kobj,
umode_t mode = attr->mode;
if (attr == &dev_attr_selftest.attr) {
- if (!ts->set_reset)
+ if (!ts->set_reset && !ts->reset_gpio)
mode = 0;
}
@@ -509,9 +529,9 @@ static void tsc2005_esd_work(struct work_struct *work)
tsc2005_update_pen_state(ts, 0, 0, 0);
- ts->set_reset(false);
+ tsc2005_set_reset(ts, false);
usleep_range(100, 500); /* only 10us required */
- ts->set_reset(true);
+ tsc2005_set_reset(ts, true);
enable_irq(ts->spi->irq);
tsc2005_start_scan(ts);
@@ -572,29 +592,47 @@ static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
static int tsc2005_probe(struct spi_device *spi)
{
const struct tsc2005_platform_data *pdata = dev_get_platdata(&spi->dev);
+ struct device_node *np = spi->dev.of_node;
+
struct tsc2005 *ts;
struct input_dev *input_dev;
- unsigned int max_x, max_y, max_p;
- unsigned int fudge_x, fudge_y, fudge_p;
+ unsigned int max_x = MAX_12BIT;
+ unsigned int max_y = MAX_12BIT;
+ unsigned int max_p = MAX_12BIT;
+ unsigned int fudge_x = TSC2005_DEF_X_FUZZ;
+ unsigned int fudge_y = TSC2005_DEF_Y_FUZZ;
+ unsigned int fudge_p = TSC2005_DEF_P_FUZZ;
+ unsigned int x_plate_ohm = TSC2005_DEF_RESISTOR;
+ unsigned int esd_timeout;
int error;
- if (!pdata) {
- dev_dbg(&spi->dev, "no platform data\n");
+ if (!np && !pdata) {
+ dev_err(&spi->dev, "no platform data\n");
return -ENODEV;
}
- fudge_x = pdata->ts_x_fudge ? : 4;
- fudge_y = pdata->ts_y_fudge ? : 8;
- fudge_p = pdata->ts_pressure_fudge ? : 2;
- max_x = pdata->ts_x_max ? : MAX_12BIT;
- max_y = pdata->ts_y_max ? : MAX_12BIT;
- max_p = pdata->ts_pressure_max ? : MAX_12BIT;
-
if (spi->irq <= 0) {
- dev_dbg(&spi->dev, "no irq\n");
+ dev_err(&spi->dev, "no irq\n");
return -ENODEV;
}
+ if (pdata) {
+ fudge_x = pdata->ts_x_fudge;
+ fudge_y = pdata->ts_y_fudge;
+ fudge_p = pdata->ts_pressure_fudge;
+ max_x = pdata->ts_x_max;
+ max_y = pdata->ts_y_max;
+ max_p = pdata->ts_pressure_max;
+ x_plate_ohm = pdata->ts_x_plate_ohm;
+ esd_timeout = pdata->esd_timeout_ms;
+ } else {
+ x_plate_ohm = TSC2005_DEF_RESISTOR;
+ of_property_read_u32(np, "ti,x-plate-ohms", &x_plate_ohm);
+ esd_timeout = 0;
+ of_property_read_u32(np, "ti,esd-recovery-timeout-ms",
+ &esd_timeout);
+ }
+
spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
if (!spi->max_speed_hz)
@@ -604,19 +642,48 @@ static int tsc2005_probe(struct spi_device *spi)
if (error)
return error;
- ts = kzalloc(sizeof(*ts), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ ts = devm_kzalloc(&spi->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&spi->dev);
+ if (!input_dev)
+ return -ENOMEM;
ts->spi = spi;
ts->idev = input_dev;
- ts->x_plate_ohm = pdata->ts_x_plate_ohm ? : 280;
- ts->esd_timeout = pdata->esd_timeout_ms;
- ts->set_reset = pdata->set_reset;
+ ts->x_plate_ohm = x_plate_ohm;
+ ts->esd_timeout = esd_timeout;
+
+ if (np) {
+ ts->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
+ if (ts->reset_gpio == -EPROBE_DEFER)
+ return ts->reset_gpio;
+ if (ts->reset_gpio < 0) {
+ dev_err(&spi->dev, "error acquiring reset gpio: %d\n",
+ ts->reset_gpio);
+ return ts->reset_gpio;
+ }
+
+ error = devm_gpio_request_one(&spi->dev, ts->reset_gpio, 0,
+ "reset-gpios");
+ if (error) {
+ dev_err(&spi->dev, "error requesting reset gpio: %d\n",
+ error);
+ return error;
+ }
+
+ ts->vio = devm_regulator_get(&spi->dev, "vio");
+ if (IS_ERR(ts->vio)) {
+ error = PTR_ERR(ts->vio);
+ dev_err(&spi->dev, "vio regulator missing (%d)", error);
+ return error;
+ }
+ } else {
+ ts->reset_gpio = -1;
+ ts->set_reset = pdata->set_reset;
+ }
mutex_init(&ts->mutex);
@@ -641,6 +708,9 @@ static int tsc2005_probe(struct spi_device *spi)
input_set_abs_params(input_dev, ABS_Y, 0, max_y, fudge_y, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0);
+ if (np)
+ touchscreen_parse_of_params(input_dev);
+
input_dev->open = tsc2005_open;
input_dev->close = tsc2005_close;
@@ -649,12 +719,20 @@ static int tsc2005_probe(struct spi_device *spi)
/* Ensure the touchscreen is off */
tsc2005_stop_scan(ts);
- error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "tsc2005", ts);
+ error = devm_request_threaded_irq(&spi->dev, spi->irq, NULL,
+ tsc2005_irq_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "tsc2005", ts);
if (error) {
dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
- goto err_free_mem;
+ return error;
+ }
+
+ /* enable regulator for DT */
+ if (ts->vio) {
+ error = regulator_enable(ts->vio);
+ if (error)
+ return error;
}
spi_set_drvdata(spi, ts);
@@ -662,7 +740,7 @@ static int tsc2005_probe(struct spi_device *spi)
if (error) {
dev_err(&spi->dev,
"Failed to create sysfs attributes, err: %d\n", error);
- goto err_clear_drvdata;
+ goto disable_regulator;
}
error = input_register_device(ts->idev);
@@ -677,11 +755,9 @@ static int tsc2005_probe(struct spi_device *spi)
err_remove_sysfs:
sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
-err_clear_drvdata:
- free_irq(spi->irq, ts);
-err_free_mem:
- input_free_device(input_dev);
- kfree(ts);
+disable_regulator:
+ if (ts->vio)
+ regulator_disable(ts->vio);
return error;
}
@@ -689,11 +765,10 @@ static int tsc2005_remove(struct spi_device *spi)
{
struct tsc2005 *ts = spi_get_drvdata(spi);
- sysfs_remove_group(&ts->spi->dev.kobj, &tsc2005_attr_group);
+ sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
- free_irq(ts->spi->irq, ts);
- input_unregister_device(ts->idev);
- kfree(ts);
+ if (ts->vio)
+ regulator_disable(ts->vio);
return 0;
}
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 01d30cedde46..feea85b52fa8 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -880,7 +880,7 @@ static struct i2c_device_id zforce_idtable[] = {
MODULE_DEVICE_TABLE(i2c, zforce_idtable);
#ifdef CONFIG_OF
-static struct of_device_id zforce_dt_idtable[] = {
+static const struct of_device_id zforce_dt_idtable[] = {
{ .compatible = "neonode,zforce" },
{},
};
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d4daa05efe60..499b4366a98d 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -45,7 +45,7 @@ struct pri_queue {
struct pasid_state {
struct list_head list; /* For global state-list */
atomic_t count; /* Reference count */
- atomic_t mmu_notifier_count; /* Counting nested mmu_notifier
+ unsigned mmu_notifier_count; /* Counting nested mmu_notifier
calls */
struct task_struct *task; /* Task bound to this PASID */
struct mm_struct *mm; /* mm_struct for the faults */
@@ -53,7 +53,8 @@ struct pasid_state {
struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
struct device_state *device_state; /* Link to our device_state */
int pasid; /* PASID index */
- spinlock_t lock; /* Protect pri_queues */
+ spinlock_t lock; /* Protect pri_queues and
+ mmu_notifer_count */
wait_queue_head_t wq; /* To wait for count == 0 */
};
@@ -431,15 +432,19 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
+ unsigned long flags;
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
- if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ if (pasid_state->mmu_notifier_count == 0) {
amd_iommu_domain_set_gcr3(dev_state->domain,
pasid_state->pasid,
__pa(empty_page_table));
}
+ pasid_state->mmu_notifier_count += 1;
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
}
static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -448,15 +453,19 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
+ unsigned long flags;
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
- if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ pasid_state->mmu_notifier_count -= 1;
+ if (pasid_state->mmu_notifier_count == 0) {
amd_iommu_domain_set_gcr3(dev_state->domain,
pasid_state->pasid,
__pa(pasid_state->mm->pgd));
}
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
}
static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -650,7 +659,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
goto out;
atomic_set(&pasid_state->count, 1);
- atomic_set(&pasid_state->mmu_notifier_count, 0);
init_waitqueue_head(&pasid_state->wq);
spin_lock_init(&pasid_state->lock);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6bb32773c3ac..51b6b77dc3e5 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3816,14 +3816,11 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
((void *)rmrr) + rmrr->header.length,
rmrr->segment, rmrru->devices,
rmrru->devices_cnt);
- if (ret > 0)
- break;
- else if(ret < 0)
+ if(ret < 0)
return ret;
} else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
- if (dmar_remove_dev_scope(info, rmrr->segment,
- rmrru->devices, rmrru->devices_cnt))
- break;
+ dmar_remove_dev_scope(info, rmrr->segment,
+ rmrru->devices, rmrru->devices_cnt);
}
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index c887e6eebc41..574aba0eba4e 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -334,6 +334,15 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask,
static void armada_xp_mpic_smp_cpu_init(void)
{
+ u32 control;
+ int nr_irqs, i;
+
+ control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+ nr_irqs = (control >> 2) & 0x3ff;
+
+ for (i = 0; i < nr_irqs; i++)
+ writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+
/* Clear pending IPIs */
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
@@ -474,7 +483,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource main_int_res, per_cpu_int_res;
- int parent_irq;
+ int parent_irq, nr_irqs, i;
u32 control;
BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -496,9 +505,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
BUG_ON(!per_cpu_int_base);
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+ nr_irqs = (control >> 2) & 0x3ff;
+
+ for (i = 0; i < nr_irqs; i++)
+ writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
armada_370_xp_mpic_domain =
- irq_domain_add_linear(node, (control >> 2) & 0x3ff,
+ irq_domain_add_linear(node, nr_irqs,
&armada_370_xp_mpic_irq_ops, NULL);
BUG_ON(!armada_370_xp_mpic_domain);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 8ee2a36d5840..c15c840987d2 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -150,7 +150,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
/* Allocate a single Generic IRQ chip for this node */
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
- np->full_name, handle_level_irq, clr, 0, 0);
+ np->full_name, handle_edge_irq, clr, 0, 0);
if (ret) {
pr_err("failed to allocate generic irq chip\n");
goto out_free_domain;
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 3fdda3a40269..6ce6bd3441bf 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = {
};
static struct spear_shirq spear320_shirq_ras3 = {
- .irq_nr = 3,
+ .irq_nr = 7,
.irq_bit_off = 0,
.invalid_irq = 1,
.regs = {
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index 9816c51eb5c2..7641b3096ea6 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -1,11 +1,3 @@
-config ISDN_DRV_AVMB1_VERBOSE_REASON
- bool "Verbose reason code reporting"
- default y
- help
- If you say Y here, the CAPI drivers will give verbose reasons for
- disconnecting. This will increase the size of the kernel by 7 KB. If
- unsure, say Y.
-
config CAPI_TRACE
bool "CAPI trace support"
default y
@@ -17,7 +9,7 @@ config CAPI_TRACE
If unsure, say Y.
config ISDN_CAPI_CAPI20
- tristate "CAPI2.0 /dev/capi support"
+ tristate "CAPI2.0 /dev/capi20 support"
help
This option will provide the CAPI 2.0 interface to userspace
applications via /dev/capi20. Applications should use the
@@ -42,3 +34,11 @@ config ISDN_CAPI_CAPIDRV
the legacy isdn4linux link layer. If you have a card which is
supported by a CAPI driver, but still want to use old features like
ippp interfaces or ttyI emulation, say Y/M here.
+
+config ISDN_CAPI_CAPIDRV_VERBOSE
+ bool "Verbose reason code reporting"
+ depends on ISDN_CAPI_CAPIDRV
+ help
+ If you say Y here, the capidrv interface will give verbose reasons
+ for disconnecting. This will increase the size of the kernel by 7 KB.
+ If unsure, say N.
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ac6f72b455d1..f9a87ed2392b 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1271,7 +1271,7 @@ static int __init capinc_tty_init(void)
return -ENOMEM;
}
drv->driver_name = "capi_nc";
- drv->name = "capi";
+ drv->name = "capi!";
drv->major = 0;
drv->minor_start = 0;
drv->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1417,7 +1417,7 @@ static int __init capi_init(void)
return PTR_ERR(capi_class);
}
- device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi");
+ device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
if (capinc_tty_init() < 0) {
device_destroy(capi_class, MKDEV(capi_major, 0));
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index cc9f1927a322..fd6d28f3fc36 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -763,6 +763,201 @@ static inline int new_bchan(capidrv_contr *card)
}
/* ------------------------------------------------------------------- */
+static char *capi_info2str(u16 reason)
+{
+#ifndef CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE
+ return "..";
+#else
+ switch (reason) {
+
+/*-- informative values (corresponding message was processed) -----*/
+ case 0x0001:
+ return "NCPI not supported by current protocol, NCPI ignored";
+ case 0x0002:
+ return "Flags not supported by current protocol, flags ignored";
+ case 0x0003:
+ return "Alert already sent by another application";
+
+/*-- error information concerning CAPI_REGISTER -----*/
+ case 0x1001:
+ return "Too many applications";
+ case 0x1002:
+ return "Logical block size too small, must be at least 128 Bytes";
+ case 0x1003:
+ return "Buffer exceeds 64 kByte";
+ case 0x1004:
+ return "Message buffer size too small, must be at least 1024 Bytes";
+ case 0x1005:
+ return "Max. number of logical connections not supported";
+ case 0x1006:
+ return "Reserved";
+ case 0x1007:
+ return "The message could not be accepted because of an internal busy condition";
+ case 0x1008:
+ return "OS resource error (no memory ?)";
+ case 0x1009:
+ return "CAPI not installed";
+ case 0x100A:
+ return "Controller does not support external equipment";
+ case 0x100B:
+ return "Controller does only support external equipment";
+
+/*-- error information concerning message exchange functions -----*/
+ case 0x1101:
+ return "Illegal application number";
+ case 0x1102:
+ return "Illegal command or subcommand or message length less than 12 bytes";
+ case 0x1103:
+ return "The message could not be accepted because of a queue full condition !! The error code does not imply that CAPI cannot receive messages directed to another controller, PLCI or NCCI";
+ case 0x1104:
+ return "Queue is empty";
+ case 0x1105:
+ return "Queue overflow, a message was lost !! This indicates a configuration error. The only recovery from this error is to perform a CAPI_RELEASE";
+ case 0x1106:
+ return "Unknown notification parameter";
+ case 0x1107:
+ return "The Message could not be accepted because of an internal busy condition";
+ case 0x1108:
+ return "OS Resource error (no memory ?)";
+ case 0x1109:
+ return "CAPI not installed";
+ case 0x110A:
+ return "Controller does not support external equipment";
+ case 0x110B:
+ return "Controller does only support external equipment";
+
+/*-- error information concerning resource / coding problems -----*/
+ case 0x2001:
+ return "Message not supported in current state";
+ case 0x2002:
+ return "Illegal Controller / PLCI / NCCI";
+ case 0x2003:
+ return "Out of PLCI";
+ case 0x2004:
+ return "Out of NCCI";
+ case 0x2005:
+ return "Out of LISTEN";
+ case 0x2006:
+ return "Out of FAX resources (protocol T.30)";
+ case 0x2007:
+ return "Illegal message parameter coding";
+
+/*-- error information concerning requested services -----*/
+ case 0x3001:
+ return "B1 protocol not supported";
+ case 0x3002:
+ return "B2 protocol not supported";
+ case 0x3003:
+ return "B3 protocol not supported";
+ case 0x3004:
+ return "B1 protocol parameter not supported";
+ case 0x3005:
+ return "B2 protocol parameter not supported";
+ case 0x3006:
+ return "B3 protocol parameter not supported";
+ case 0x3007:
+ return "B protocol combination not supported";
+ case 0x3008:
+ return "NCPI not supported";
+ case 0x3009:
+ return "CIP Value unknown";
+ case 0x300A:
+ return "Flags not supported (reserved bits)";
+ case 0x300B:
+ return "Facility not supported";
+ case 0x300C:
+ return "Data length not supported by current protocol";
+ case 0x300D:
+ return "Reset procedure not supported by current protocol";
+
+/*-- informations about the clearing of a physical connection -----*/
+ case 0x3301:
+ return "Protocol error layer 1 (broken line or B-channel removed by signalling protocol)";
+ case 0x3302:
+ return "Protocol error layer 2";
+ case 0x3303:
+ return "Protocol error layer 3";
+ case 0x3304:
+ return "Another application got that call";
+/*-- T.30 specific reasons -----*/
+ case 0x3311:
+ return "Connecting not successful (remote station is no FAX G3 machine)";
+ case 0x3312:
+ return "Connecting not successful (training error)";
+ case 0x3313:
+ return "Disconnected before transfer (remote station does not support transfer mode, e.g. resolution)";
+ case 0x3314:
+ return "Disconnected during transfer (remote abort)";
+ case 0x3315:
+ return "Disconnected during transfer (remote procedure error, e.g. unsuccessful repetition of T.30 commands)";
+ case 0x3316:
+ return "Disconnected during transfer (local tx data underrun)";
+ case 0x3317:
+ return "Disconnected during transfer (local rx data overflow)";
+ case 0x3318:
+ return "Disconnected during transfer (local abort)";
+ case 0x3319:
+ return "Illegal parameter coding (e.g. SFF coding error)";
+
+/*-- disconnect causes from the network according to ETS 300 102-1/Q.931 -----*/
+ case 0x3481: return "Unallocated (unassigned) number";
+ case 0x3482: return "No route to specified transit network";
+ case 0x3483: return "No route to destination";
+ case 0x3486: return "Channel unacceptable";
+ case 0x3487:
+ return "Call awarded and being delivered in an established channel";
+ case 0x3490: return "Normal call clearing";
+ case 0x3491: return "User busy";
+ case 0x3492: return "No user responding";
+ case 0x3493: return "No answer from user (user alerted)";
+ case 0x3495: return "Call rejected";
+ case 0x3496: return "Number changed";
+ case 0x349A: return "Non-selected user clearing";
+ case 0x349B: return "Destination out of order";
+ case 0x349C: return "Invalid number format";
+ case 0x349D: return "Facility rejected";
+ case 0x349E: return "Response to STATUS ENQUIRY";
+ case 0x349F: return "Normal, unspecified";
+ case 0x34A2: return "No circuit / channel available";
+ case 0x34A6: return "Network out of order";
+ case 0x34A9: return "Temporary failure";
+ case 0x34AA: return "Switching equipment congestion";
+ case 0x34AB: return "Access information discarded";
+ case 0x34AC: return "Requested circuit / channel not available";
+ case 0x34AF: return "Resources unavailable, unspecified";
+ case 0x34B1: return "Quality of service unavailable";
+ case 0x34B2: return "Requested facility not subscribed";
+ case 0x34B9: return "Bearer capability not authorized";
+ case 0x34BA: return "Bearer capability not presently available";
+ case 0x34BF: return "Service or option not available, unspecified";
+ case 0x34C1: return "Bearer capability not implemented";
+ case 0x34C2: return "Channel type not implemented";
+ case 0x34C5: return "Requested facility not implemented";
+ case 0x34C6: return "Only restricted digital information bearer capability is available";
+ case 0x34CF: return "Service or option not implemented, unspecified";
+ case 0x34D1: return "Invalid call reference value";
+ case 0x34D2: return "Identified channel does not exist";
+ case 0x34D3: return "A suspended call exists, but this call identity does not";
+ case 0x34D4: return "Call identity in use";
+ case 0x34D5: return "No call suspended";
+ case 0x34D6: return "Call having the requested call identity has been cleared";
+ case 0x34D8: return "Incompatible destination";
+ case 0x34DB: return "Invalid transit network selection";
+ case 0x34DF: return "Invalid message, unspecified";
+ case 0x34E0: return "Mandatory information element is missing";
+ case 0x34E1: return "Message type non-existent or not implemented";
+ case 0x34E2: return "Message not compatible with call state or message type non-existent or not implemented";
+ case 0x34E3: return "Information element non-existent or not implemented";
+ case 0x34E4: return "Invalid information element contents";
+ case 0x34E5: return "Message not compatible with call state";
+ case 0x34E6: return "Recovery on timer expiry";
+ case 0x34EF: return "Protocol error, unspecified";
+ case 0x34FF: return "Interworking, unspecified";
+
+ default: return "No additional information";
+ }
+#endif
+}
static void handle_controller(_cmsg *cmsg)
{
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index d26f17033b68..6e797e502cfa 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -22,205 +22,6 @@
/* from CAPI2.0 DDK AVM Berlin GmbH */
-#ifndef CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON
-char *capi_info2str(u16 reason)
-{
- return "..";
-}
-#else
-char *capi_info2str(u16 reason)
-{
- switch (reason) {
-
-/*-- informative values (corresponding message was processed) -----*/
- case 0x0001:
- return "NCPI not supported by current protocol, NCPI ignored";
- case 0x0002:
- return "Flags not supported by current protocol, flags ignored";
- case 0x0003:
- return "Alert already sent by another application";
-
-/*-- error information concerning CAPI_REGISTER -----*/
- case 0x1001:
- return "Too many applications";
- case 0x1002:
- return "Logical block size too small, must be at least 128 Bytes";
- case 0x1003:
- return "Buffer exceeds 64 kByte";
- case 0x1004:
- return "Message buffer size too small, must be at least 1024 Bytes";
- case 0x1005:
- return "Max. number of logical connections not supported";
- case 0x1006:
- return "Reserved";
- case 0x1007:
- return "The message could not be accepted because of an internal busy condition";
- case 0x1008:
- return "OS resource error (no memory ?)";
- case 0x1009:
- return "CAPI not installed";
- case 0x100A:
- return "Controller does not support external equipment";
- case 0x100B:
- return "Controller does only support external equipment";
-
-/*-- error information concerning message exchange functions -----*/
- case 0x1101:
- return "Illegal application number";
- case 0x1102:
- return "Illegal command or subcommand or message length less than 12 bytes";
- case 0x1103:
- return "The message could not be accepted because of a queue full condition !! The error code does not imply that CAPI cannot receive messages directed to another controller, PLCI or NCCI";
- case 0x1104:
- return "Queue is empty";
- case 0x1105:
- return "Queue overflow, a message was lost !! This indicates a configuration error. The only recovery from this error is to perform a CAPI_RELEASE";
- case 0x1106:
- return "Unknown notification parameter";
- case 0x1107:
- return "The Message could not be accepted because of an internal busy condition";
- case 0x1108:
- return "OS Resource error (no memory ?)";
- case 0x1109:
- return "CAPI not installed";
- case 0x110A:
- return "Controller does not support external equipment";
- case 0x110B:
- return "Controller does only support external equipment";
-
-/*-- error information concerning resource / coding problems -----*/
- case 0x2001:
- return "Message not supported in current state";
- case 0x2002:
- return "Illegal Controller / PLCI / NCCI";
- case 0x2003:
- return "Out of PLCI";
- case 0x2004:
- return "Out of NCCI";
- case 0x2005:
- return "Out of LISTEN";
- case 0x2006:
- return "Out of FAX resources (protocol T.30)";
- case 0x2007:
- return "Illegal message parameter coding";
-
-/*-- error information concerning requested services -----*/
- case 0x3001:
- return "B1 protocol not supported";
- case 0x3002:
- return "B2 protocol not supported";
- case 0x3003:
- return "B3 protocol not supported";
- case 0x3004:
- return "B1 protocol parameter not supported";
- case 0x3005:
- return "B2 protocol parameter not supported";
- case 0x3006:
- return "B3 protocol parameter not supported";
- case 0x3007:
- return "B protocol combination not supported";
- case 0x3008:
- return "NCPI not supported";
- case 0x3009:
- return "CIP Value unknown";
- case 0x300A:
- return "Flags not supported (reserved bits)";
- case 0x300B:
- return "Facility not supported";
- case 0x300C:
- return "Data length not supported by current protocol";
- case 0x300D:
- return "Reset procedure not supported by current protocol";
-
-/*-- informations about the clearing of a physical connection -----*/
- case 0x3301:
- return "Protocol error layer 1 (broken line or B-channel removed by signalling protocol)";
- case 0x3302:
- return "Protocol error layer 2";
- case 0x3303:
- return "Protocol error layer 3";
- case 0x3304:
- return "Another application got that call";
-/*-- T.30 specific reasons -----*/
- case 0x3311:
- return "Connecting not successful (remote station is no FAX G3 machine)";
- case 0x3312:
- return "Connecting not successful (training error)";
- case 0x3313:
- return "Disconnected before transfer (remote station does not support transfer mode, e.g. resolution)";
- case 0x3314:
- return "Disconnected during transfer (remote abort)";
- case 0x3315:
- return "Disconnected during transfer (remote procedure error, e.g. unsuccessful repetition of T.30 commands)";
- case 0x3316:
- return "Disconnected during transfer (local tx data underrun)";
- case 0x3317:
- return "Disconnected during transfer (local rx data overflow)";
- case 0x3318:
- return "Disconnected during transfer (local abort)";
- case 0x3319:
- return "Illegal parameter coding (e.g. SFF coding error)";
-
-/*-- disconnect causes from the network according to ETS 300 102-1/Q.931 -----*/
- case 0x3481: return "Unallocated (unassigned) number";
- case 0x3482: return "No route to specified transit network";
- case 0x3483: return "No route to destination";
- case 0x3486: return "Channel unacceptable";
- case 0x3487:
- return "Call awarded and being delivered in an established channel";
- case 0x3490: return "Normal call clearing";
- case 0x3491: return "User busy";
- case 0x3492: return "No user responding";
- case 0x3493: return "No answer from user (user alerted)";
- case 0x3495: return "Call rejected";
- case 0x3496: return "Number changed";
- case 0x349A: return "Non-selected user clearing";
- case 0x349B: return "Destination out of order";
- case 0x349C: return "Invalid number format";
- case 0x349D: return "Facility rejected";
- case 0x349E: return "Response to STATUS ENQUIRY";
- case 0x349F: return "Normal, unspecified";
- case 0x34A2: return "No circuit / channel available";
- case 0x34A6: return "Network out of order";
- case 0x34A9: return "Temporary failure";
- case 0x34AA: return "Switching equipment congestion";
- case 0x34AB: return "Access information discarded";
- case 0x34AC: return "Requested circuit / channel not available";
- case 0x34AF: return "Resources unavailable, unspecified";
- case 0x34B1: return "Quality of service unavailable";
- case 0x34B2: return "Requested facility not subscribed";
- case 0x34B9: return "Bearer capability not authorized";
- case 0x34BA: return "Bearer capability not presently available";
- case 0x34BF: return "Service or option not available, unspecified";
- case 0x34C1: return "Bearer capability not implemented";
- case 0x34C2: return "Channel type not implemented";
- case 0x34C5: return "Requested facility not implemented";
- case 0x34C6: return "Only restricted digital information bearer capability is available";
- case 0x34CF: return "Service or option not implemented, unspecified";
- case 0x34D1: return "Invalid call reference value";
- case 0x34D2: return "Identified channel does not exist";
- case 0x34D3: return "A suspended call exists, but this call identity does not";
- case 0x34D4: return "Call identity in use";
- case 0x34D5: return "No call suspended";
- case 0x34D6: return "Call having the requested call identity has been cleared";
- case 0x34D8: return "Incompatible destination";
- case 0x34DB: return "Invalid transit network selection";
- case 0x34DF: return "Invalid message, unspecified";
- case 0x34E0: return "Mandatory information element is missing";
- case 0x34E1: return "Message type non-existent or not implemented";
- case 0x34E2: return "Message not compatible with call state or message type non-existent or not implemented";
- case 0x34E3: return "Information element non-existent or not implemented";
- case 0x34E4: return "Invalid information element contents";
- case 0x34E5: return "Message not compatible with call state";
- case 0x34E6: return "Recovery on timer expiry";
- case 0x34EF: return "Protocol error, unspecified";
- case 0x34FF: return "Interworking, unspecified";
-
- default: return "No additional information";
- }
-}
-#endif
-
typedef struct {
int typ;
size_t off;
@@ -1073,4 +874,3 @@ EXPORT_SYMBOL(capi_cmsg_header);
EXPORT_SYMBOL(capi_cmd2str);
EXPORT_SYMBOL(capi_cmsg2str);
EXPORT_SYMBOL(capi_message2str);
-EXPORT_SYMBOL(capi_info2str);
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index d9edcc94c2a8..97465ac5a2d5 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -16,7 +16,7 @@ config ISDN_DRV_HISAX
also to the configuration option of the driver for your particular
card, below.
-if ISDN_DRV_HISAX!=n
+if ISDN_DRV_HISAX
comment "D-channel protocol features"
@@ -348,10 +348,6 @@ config HISAX_ENTERNOW_PCI
This enables HiSax support for the Formula-n enter:now PCI
ISDN card.
-endif
-
-if ISDN_DRV_HISAX
-
config HISAX_DEBUG
bool "HiSax debugging"
help
@@ -420,11 +416,6 @@ config HISAX_FRITZ_PCIPNP
(the latter also needs you to select "ISA Plug and Play support"
from the menu "Plug and Play configuration")
-config HISAX_AVM_A1_PCMCIA
- bool
- depends on HISAX_AVM_A1_CS
- default y
-
endif
endmenu
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 414dbf6da89a..fc9f9d03fa13 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -197,25 +197,6 @@ typedef struct _hfc4s8s_hw {
-/***************************/
-/* inline function defines */
-/***************************/
-#ifdef HISAX_HFC4S8S_PCIMEM /* inline functions memory mapped */
-
-/* memory write and dummy IO read to avoid PCI byte merge problems */
-#define Write_hfc8(a, b, c) {(*((volatile u_char *)(a->membase + b)) = c); inb(a->iobase + 4);}
-/* memory write without dummy IO access for fifo data access */
-#define fWrite_hfc8(a, b, c) (*((volatile u_char *)(a->membase + b)) = c)
-#define Read_hfc8(a, b) (*((volatile u_char *)(a->membase + b)))
-#define Write_hfc16(a, b, c) (*((volatile unsigned short *)(a->membase + b)) = c)
-#define Read_hfc16(a, b) (*((volatile unsigned short *)(a->membase + b)))
-#define Write_hfc32(a, b, c) (*((volatile unsigned long *)(a->membase + b)) = c)
-#define Read_hfc32(a, b) (*((volatile unsigned long *)(a->membase + b)))
-#define wait_busy(a) {while ((Read_hfc8(a, R_STATUS) & M_BUSY));}
-#define PCI_ENA_MEMIO 0x03
-
-#else
-
/* inline functions io mapped */
static inline void
SetRegAddr(hfc4s8s_hw *a, u_char b)
@@ -306,8 +287,6 @@ wait_busy(hfc4s8s_hw *a)
#define PCI_ENA_REGIO 0x01
-#endif /* HISAX_HFC4S8S_PCIMEM */
-
/******************************************************/
/* function to read critical counter registers that */
/* may be updated by the chip during read */
@@ -724,26 +703,15 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
return;
} else {
/* read errornous D frame */
-
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
fRead_hfc32(l1p->hw);
-#endif
z1 -= 4;
}
while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
- fRead_hfc8(l1p->hw);
-#endif
+ fRead_hfc8(l1p->hw);
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
wait_busy(l1p->hw);
@@ -753,27 +721,16 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
cp = skb->data;
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- *((unsigned long *) cp) =
- Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
*((unsigned long *) cp) = fRead_hfc32(l1p->hw);
-#endif
cp += 4;
z1 -= 4;
}
while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- *cp++ = Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
- *cp++ = fRead_hfc8(l1p->hw);
-#endif
+ *cp++ = fRead_hfc8(l1p->hw);
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
wait_busy(l1p->hw);
@@ -859,28 +816,17 @@ rx_b_frame(struct hfc4s8s_btype *bch)
wait_busy(l1->hw);
return;
}
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- *((unsigned long *) bch->rx_ptr) =
- Read_hfc32(l1->hw, A_FIFO_DATA0);
-#else
*((unsigned long *) bch->rx_ptr) =
fRead_hfc32(l1->hw);
-#endif
bch->rx_ptr += 4;
z1 -= 4;
}
while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- *(bch->rx_ptr++) = Read_hfc8(l1->hw, A_FIFO_DATA0);
-#else
- *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
-#endif
+ *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
if (hdlc_complete) {
/* increment f counter */
@@ -940,29 +886,17 @@ tx_d_frame(struct hfc4s8s_l1 *l1p)
if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
cp = skb->data;
cnt = skb->len;
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- fWrite_hfc32(l1p->hw, A_FIFO_DATA0,
- *(unsigned long *) cp);
-#else
SetRegAddr(l1p->hw, A_FIFO_DATA0);
fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
-#endif
cp += 4;
cnt -= 4;
}
-#ifdef HISAX_HFC4S8S_PCIMEM
- while (cnt--)
- fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++);
-#else
while (cnt--)
fWrite_hfc8(l1p->hw, *cp++);
-#endif
l1p->tx_cnt = skb->truesize;
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
@@ -1037,26 +971,15 @@ tx_b_frame(struct hfc4s8s_btype *bch)
cp = skb->data + bch->tx_cnt;
bch->tx_cnt += cnt;
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
- fWrite_hfc32(l1->hw, A_FIFO_DATA0,
- *(unsigned long *) cp);
-#else
fWrite_hfc32(l1->hw, *(unsigned long *) cp);
-#endif
cp += 4;
cnt -= 4;
}
while (cnt--)
-#ifdef HISAX_HFC4S8S_PCIMEM
- fWrite_hfc8(l1->hw, A_FIFO_DATA0, *cp++);
-#else
- fWrite_hfc8(l1->hw, *cp++);
-#endif
+ fWrite_hfc8(l1->hw, *cp++);
if (bch->tx_cnt >= skb->len) {
if (bch->mode == L1_MODE_HDLC) {
@@ -1281,10 +1204,8 @@ hfc4s8s_interrupt(int intno, void *dev_id)
if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN))
return IRQ_NONE;
-#ifndef HISAX_HFC4S8S_PCIMEM
/* read current selected regsister */
old_ioreg = GetRegAddr(hw);
-#endif
/* Layer 1 State change */
hw->mr.r_irq_statech |=
@@ -1292,9 +1213,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
if (!
(b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA)))
&& !hw->mr.r_irq_statech) {
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(hw, old_ioreg);
-#endif
return IRQ_NONE;
}
@@ -1322,9 +1241,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
/* queue the request to allow other cards to interrupt */
schedule_work(&hw->tqueue);
-#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(hw, old_ioreg);
-#endif
return IRQ_HANDLED;
} /* hfc4s8s_interrupt */
@@ -1471,13 +1388,8 @@ static void
release_pci_ports(hfc4s8s_hw *hw)
{
pci_write_config_word(hw->pdev, PCI_COMMAND, 0);
-#ifdef HISAX_HFC4S8S_PCIMEM
- if (hw->membase)
- iounmap((void *) hw->membase);
-#else
if (hw->iobase)
release_region(hw->iobase, 8);
-#endif
}
/*****************************************/
@@ -1486,11 +1398,7 @@ release_pci_ports(hfc4s8s_hw *hw)
static void
enable_pci_ports(hfc4s8s_hw *hw)
{
-#ifdef HISAX_HFC4S8S_PCIMEM
- pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
-#else
pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO);
-#endif
}
/*************************************/
@@ -1561,15 +1469,9 @@ setup_instance(hfc4s8s_hw *hw)
hw->irq);
goto out;
}
-#ifdef HISAX_HFC4S8S_PCIMEM
- printk(KERN_INFO
- "HFC-4S/8S: found PCI card at membase 0x%p, irq %d\n",
- hw->hw_membase, hw->irq);
-#else
printk(KERN_INFO
"HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n",
hw->iobase, hw->irq);
-#endif
hfc_hardware_enable(hw, 1, 0);
@@ -1614,17 +1516,12 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->irq = pdev->irq;
hw->iobase = pci_resource_start(pdev, 0);
-#ifdef HISAX_HFC4S8S_PCIMEM
- hw->hw_membase = (u_char *) pci_resource_start(pdev, 1);
- hw->membase = ioremap((ulong) hw->hw_membase, 256);
-#else
if (!request_region(hw->iobase, 8, hw->card_name)) {
printk(KERN_INFO
"HFC-4S/8S: failed to request address space at 0x%04x\n",
hw->iobase);
goto out;
}
-#endif
pci_set_drvdata(pdev, hw);
err = setup_instance(hw);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a5da511e3c9a..61ac63237446 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -634,7 +634,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_IPPP_FILTER
case PPPIOCSPASS:
{
- struct sock_fprog fprog;
+ struct sock_fprog_kern fprog;
struct sock_filter *code;
int err, len = get_filter(argp, &code);
@@ -653,7 +653,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
}
case PPPIOCSACTIVE:
{
- struct sock_fprog fprog;
+ struct sock_fprog_kern fprog;
struct sock_filter *code;
int err, len = get_filter(argp, &code);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 2c0d2c2bf946..9f454d76cc06 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -287,11 +287,9 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
p = frame;
/* restart timer */
- if ((int)(hc->keep_tl.expires-jiffies) < 5 * HZ) {
- del_timer(&hc->keep_tl);
- hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
- add_timer(&hc->keep_tl);
- } else
+ if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
+ mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
+ else
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
if (debug & DEBUG_L1OIP_MSG)
@@ -621,11 +619,9 @@ multiframe:
goto multiframe;
/* restart timer */
- if ((int)(hc->timeout_tl.expires-jiffies) < 5 * HZ || !hc->timeout_on) {
+ if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
hc->timeout_on = 1;
- del_timer(&hc->timeout_tl);
- hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
- add_timer(&hc->timeout_tl);
+ mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
} else /* only adjust timer */
hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 089841ca180f..a1b044e7eaad 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -300,16 +300,6 @@ config LEDS_PCA963X
LED driver chip accessed via the I2C bus. Supported
devices include PCA9633 and PCA9634
-config LEDS_PCA9685
- tristate "LED support for PCA9685 I2C chip"
- depends on LEDS_CLASS
- depends on I2C
- help
- This option enables support for LEDs connected to the PCA9685
- LED driver chip accessed via the I2C bus.
- The PCA9685 offers 12-bit PWM (4095 levels of brightness) on
- 16 individual channels.
-
config LEDS_WM831X_STATUS
tristate "LED support for status LEDs on WM831x PMICs"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 8b4c956e11ba..79c5155199a7 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
-obj-$(CONFIG_LEDS_PCA9685) += leds-pca9685.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
diff --git a/drivers/leds/dell-led.c b/drivers/leds/dell-led.c
index e5c57389efd6..c36acaf566a6 100644
--- a/drivers/leds/dell-led.c
+++ b/drivers/leds/dell-led.c
@@ -15,12 +15,15 @@
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/dell-led.h>
MODULE_AUTHOR("Louis Davis/Jim Dailey");
MODULE_DESCRIPTION("Dell LED Control Driver");
MODULE_LICENSE("GPL");
#define DELL_LED_BIOS_GUID "F6E4FE6E-909D-47cb-8BAB-C9F6F2F8D396"
+#define DELL_APP_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID);
/* Error Result Codes: */
@@ -39,6 +42,149 @@ MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID);
#define CMD_LED_OFF 17
#define CMD_LED_BLINK 18
+struct app_wmi_args {
+ u16 class;
+ u16 selector;
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+ u32 arg4;
+ u32 res1;
+ u32 res2;
+ u32 res3;
+ u32 res4;
+ char dummy[92];
+};
+
+#define GLOBAL_MIC_MUTE_ENABLE 0x364
+#define GLOBAL_MIC_MUTE_DISABLE 0x365
+
+struct dell_bios_data_token {
+ u16 tokenid;
+ u16 location;
+ u16 value;
+};
+
+struct __attribute__ ((__packed__)) dell_bios_calling_interface {
+ struct dmi_header header;
+ u16 cmd_io_addr;
+ u8 cmd_io_code;
+ u32 supported_cmds;
+ struct dell_bios_data_token damap[];
+};
+
+static struct dell_bios_data_token dell_mic_tokens[2];
+
+static int dell_wmi_perform_query(struct app_wmi_args *args)
+{
+ struct app_wmi_args *bios_return;
+ union acpi_object *obj;
+ struct acpi_buffer input;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ u32 rc = -EINVAL;
+
+ input.length = 128;
+ input.pointer = args;
+
+ status = wmi_evaluate_method(DELL_APP_GUID, 0, 1, &input, &output);
+ if (!ACPI_SUCCESS(status))
+ goto err_out0;
+
+ obj = output.pointer;
+ if (!obj)
+ goto err_out0;
+
+ if (obj->type != ACPI_TYPE_BUFFER)
+ goto err_out1;
+
+ bios_return = (struct app_wmi_args *)obj->buffer.pointer;
+ rc = bios_return->res1;
+ if (rc)
+ goto err_out1;
+
+ memcpy(args, bios_return, sizeof(struct app_wmi_args));
+ rc = 0;
+
+ err_out1:
+ kfree(obj);
+ err_out0:
+ return rc;
+}
+
+static void __init find_micmute_tokens(const struct dmi_header *dm, void *dummy)
+{
+ struct dell_bios_calling_interface *calling_interface;
+ struct dell_bios_data_token *token;
+ int token_size = sizeof(struct dell_bios_data_token);
+ int i = 0;
+
+ if (dm->type == 0xda && dm->length > 17) {
+ calling_interface = container_of(dm,
+ struct dell_bios_calling_interface, header);
+
+ token = &calling_interface->damap[i];
+ while (token->tokenid != 0xffff) {
+ if (token->tokenid == GLOBAL_MIC_MUTE_DISABLE)
+ memcpy(&dell_mic_tokens[0], token, token_size);
+ else if (token->tokenid == GLOBAL_MIC_MUTE_ENABLE)
+ memcpy(&dell_mic_tokens[1], token, token_size);
+
+ i++;
+ token = &calling_interface->damap[i];
+ }
+ }
+}
+
+static int dell_micmute_led_set(int state)
+{
+ struct app_wmi_args args;
+ struct dell_bios_data_token *token;
+
+ if (!wmi_has_guid(DELL_APP_GUID))
+ return -ENODEV;
+
+ if (state == 0 || state == 1)
+ token = &dell_mic_tokens[state];
+ else
+ return -EINVAL;
+
+ memset(&args, 0, sizeof(struct app_wmi_args));
+
+ args.class = 1;
+ args.arg1 = token->location;
+ args.arg2 = token->value;
+
+ dell_wmi_perform_query(&args);
+
+ return state;
+}
+
+int dell_app_wmi_led_set(int whichled, int on)
+{
+ int state = 0;
+
+ switch (whichled) {
+ case DELL_LED_MICMUTE:
+ state = dell_micmute_led_set(on);
+ break;
+ default:
+ pr_warn("led type %x is not supported\n", whichled);
+ break;
+ }
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(dell_app_wmi_led_set);
+
+static int __init dell_micmute_led_init(void)
+{
+ memset(dell_mic_tokens, 0, sizeof(struct dell_bios_data_token) * 2);
+ dmi_walk(find_micmute_tokens, NULL);
+
+ return 0;
+}
+
struct bios_args {
unsigned char length;
unsigned char result_code;
@@ -181,21 +327,32 @@ static int __init dell_led_init(void)
{
int error = 0;
- if (!wmi_has_guid(DELL_LED_BIOS_GUID))
+ if (!wmi_has_guid(DELL_LED_BIOS_GUID) && !wmi_has_guid(DELL_APP_GUID))
return -ENODEV;
- error = led_off();
- if (error != 0)
- return -ENODEV;
+ if (wmi_has_guid(DELL_APP_GUID))
+ error = dell_micmute_led_init();
- return led_classdev_register(NULL, &dell_led);
+ if (wmi_has_guid(DELL_LED_BIOS_GUID)) {
+ error = led_off();
+ if (error != 0)
+ return -ENODEV;
+
+ error = led_classdev_register(NULL, &dell_led);
+ }
+
+ return error;
}
static void __exit dell_led_exit(void)
{
- led_classdev_unregister(&dell_led);
+ int error = 0;
- led_off();
+ if (wmi_has_guid(DELL_LED_BIOS_GUID)) {
+ error = led_off();
+ if (error == 0)
+ led_classdev_unregister(&dell_led);
+ }
}
module_init(dell_led_init);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index d1e1bca90d11..c2def5551ce1 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -130,10 +130,9 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
struct device_node *nproot, *np;
int iset = 0;
- nproot = of_node_get(pdev->dev.parent->of_node);
- if (!nproot)
+ if (!pdev->dev.parent->of_node)
return -ENODEV;
- nproot = of_find_node_by_name(nproot, "leds");
+ nproot = of_get_child_by_name(pdev->dev.parent->of_node, "leds");
if (!nproot) {
dev_err(&pdev->dev, "failed to find leds node\n");
return -ENODEV;
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index 86b5bdb0c773..5036d7b4f82e 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -120,13 +120,10 @@ static int adp5520_led_probe(struct platform_device *pdev)
led = devm_kzalloc(&pdev->dev, sizeof(*led) * pdata->num_leds,
GFP_KERNEL);
- if (led == NULL) {
- dev_err(&pdev->dev, "failed to alloc memory\n");
+ if (!led)
return -ENOMEM;
- }
ret = adp5520_led_prepare(pdev);
-
if (ret) {
dev_err(&pdev->dev, "failed to write\n");
return ret;
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index fb5a3472d614..6078c15d3452 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -678,10 +678,8 @@ static int bd2802_probe(struct i2c_client *client,
int ret, i;
led = devm_kzalloc(&client->dev, sizeof(struct bd2802_led), GFP_KERNEL);
- if (!led) {
- dev_err(&client->dev, "failed to allocate driver data\n");
+ if (!led)
return -ENOMEM;
- }
led->client = client;
pdata = led->pdata = dev_get_platdata(&client->dev);
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 35dffb100388..54b8b5216b8b 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -108,10 +108,8 @@ static int da903x_led_probe(struct platform_device *pdev)
}
led = devm_kzalloc(&pdev->dev, sizeof(struct da903x_led), GFP_KERNEL);
- if (led == NULL) {
- dev_err(&pdev->dev, "failed to alloc memory for LED%d\n", id);
+ if (!led)
return -ENOMEM;
- }
led->cdev.name = pdata->name;
led->cdev.default_trigger = pdata->default_trigger;
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
index 01486adc7f8b..e4da1f460ac5 100644
--- a/drivers/leds/leds-da9052.c
+++ b/drivers/leds/leds-da9052.c
@@ -126,8 +126,7 @@ static int da9052_led_probe(struct platform_device *pdev)
led = devm_kzalloc(&pdev->dev,
sizeof(struct da9052_led) * pled->num_leds,
GFP_KERNEL);
- if (led == NULL) {
- dev_err(&pdev->dev, "Failed to alloc memory\n");
+ if (!led) {
error = -ENOMEM;
goto err;
}
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index cb5ed82994ba..9e1716f8098c 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -1,5 +1,5 @@
/*
- * lp5523.c - LP5523 LED Driver
+ * lp5523.c - LP5523, LP55231 LED Driver
*
* Copyright (C) 2010 Nokia Corporation
* Copyright (C) 2012 Texas Instruments
@@ -814,6 +814,7 @@ MODULE_DEVICE_TABLE(i2c, lp5523_id);
#ifdef CONFIG_OF
static const struct of_device_id of_lp5523_leds_match[] = {
{ .compatible = "national,lp5523", },
+ { .compatible = "ti,lp55231", },
{},
};
diff --git a/drivers/leds/leds-pca9685.c b/drivers/leds/leds-pca9685.c
deleted file mode 100644
index 6e1ef3a9d6ef..000000000000
--- a/drivers/leds/leds-pca9685.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright 2013 Maximilian Güntner <maximilian.guentner@gmail.com>
- *
- * This file is subject to the terms and conditions of version 2 of
- * the GNU General Public License. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Based on leds-pca963x.c driver by
- * Peter Meerwald <p.meerwald@bct-electronic.com>
- *
- * Driver for the NXP PCA9685 12-Bit PWM LED driver chip.
- *
- */
-
-#include <linux/ctype.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/leds.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/workqueue.h>
-
-#include <linux/platform_data/leds-pca9685.h>
-
-/* Register Addresses */
-#define PCA9685_MODE1 0x00
-#define PCA9685_MODE2 0x01
-#define PCA9685_LED0_ON_L 0x06
-#define PCA9685_ALL_LED_ON_L 0xFA
-
-/* MODE1 Register */
-#define PCA9685_ALLCALL 0x00
-#define PCA9685_SLEEP 0x04
-#define PCA9685_AI 0x05
-
-/* MODE2 Register */
-#define PCA9685_INVRT 0x04
-#define PCA9685_OUTDRV 0x02
-
-static const struct i2c_device_id pca9685_id[] = {
- { "pca9685", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, pca9685_id);
-
-struct pca9685_led {
- struct i2c_client *client;
- struct work_struct work;
- u16 brightness;
- struct led_classdev led_cdev;
- int led_num; /* 0-15 */
- char name[32];
-};
-
-static void pca9685_write_msg(struct i2c_client *client, u8 *buf, u8 len)
-{
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = 0x00,
- .len = len,
- .buf = buf
- };
- i2c_transfer(client->adapter, &msg, 1);
-}
-
-static void pca9685_all_off(struct i2c_client *client)
-{
- u8 i2c_buffer[5] = {PCA9685_ALL_LED_ON_L, 0x00, 0x00, 0x00, 0x10};
- pca9685_write_msg(client, i2c_buffer, 5);
-}
-
-static void pca9685_led_work(struct work_struct *work)
-{
- struct pca9685_led *pca9685;
- u8 i2c_buffer[5];
-
- pca9685 = container_of(work, struct pca9685_led, work);
- i2c_buffer[0] = PCA9685_LED0_ON_L + 4 * pca9685->led_num;
- /*
- * 4095 is the maximum brightness, so we set the ON time to 0x1000
- * which disables the PWM generator for that LED
- */
- if (pca9685->brightness == 4095)
- *((__le16 *)(i2c_buffer+1)) = cpu_to_le16(0x1000);
- else
- *((__le16 *)(i2c_buffer+1)) = 0x0000;
-
- if (pca9685->brightness == 0)
- *((__le16 *)(i2c_buffer+3)) = cpu_to_le16(0x1000);
- else if (pca9685->brightness == 4095)
- *((__le16 *)(i2c_buffer+3)) = 0x0000;
- else
- *((__le16 *)(i2c_buffer+3)) = cpu_to_le16(pca9685->brightness);
-
- pca9685_write_msg(pca9685->client, i2c_buffer, 5);
-}
-
-static void pca9685_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct pca9685_led *pca9685;
- pca9685 = container_of(led_cdev, struct pca9685_led, led_cdev);
- pca9685->brightness = value;
-
- schedule_work(&pca9685->work);
-}
-
-static int pca9685_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct pca9685_led *pca9685;
- struct pca9685_platform_data *pdata;
- int err;
- u8 i;
-
- pdata = dev_get_platdata(&client->dev);
- if (pdata) {
- if (pdata->leds.num_leds < 1 || pdata->leds.num_leds > 15) {
- dev_err(&client->dev, "board info must claim 1-16 LEDs");
- return -EINVAL;
- }
- }
-
- pca9685 = devm_kzalloc(&client->dev, 16 * sizeof(*pca9685), GFP_KERNEL);
- if (!pca9685)
- return -ENOMEM;
-
- i2c_set_clientdata(client, pca9685);
- pca9685_all_off(client);
-
- for (i = 0; i < 16; i++) {
- pca9685[i].client = client;
- pca9685[i].led_num = i;
- pca9685[i].name[0] = '\0';
- if (pdata && i < pdata->leds.num_leds) {
- if (pdata->leds.leds[i].name)
- strncpy(pca9685[i].name,
- pdata->leds.leds[i].name,
- sizeof(pca9685[i].name)-1);
- if (pdata->leds.leds[i].default_trigger)
- pca9685[i].led_cdev.default_trigger =
- pdata->leds.leds[i].default_trigger;
- }
- if (strlen(pca9685[i].name) == 0) {
- /*
- * Write adapter and address to the name as well.
- * Otherwise multiple chips attached to one host would
- * not work.
- */
- snprintf(pca9685[i].name, sizeof(pca9685[i].name),
- "pca9685:%d:x%.2x:%d",
- client->adapter->nr, client->addr, i);
- }
- pca9685[i].led_cdev.name = pca9685[i].name;
- pca9685[i].led_cdev.max_brightness = 0xfff;
- pca9685[i].led_cdev.brightness_set = pca9685_led_set;
-
- INIT_WORK(&pca9685[i].work, pca9685_led_work);
- err = led_classdev_register(&client->dev, &pca9685[i].led_cdev);
- if (err < 0)
- goto exit;
- }
-
- if (pdata)
- i2c_smbus_write_byte_data(client, PCA9685_MODE2,
- pdata->outdrv << PCA9685_OUTDRV |
- pdata->inverted << PCA9685_INVRT);
- else
- i2c_smbus_write_byte_data(client, PCA9685_MODE2,
- PCA9685_TOTEM_POLE << PCA9685_OUTDRV);
- /* Enable Auto-Increment, enable oscillator, ALLCALL/SUBADDR disabled */
- i2c_smbus_write_byte_data(client, PCA9685_MODE1, BIT(PCA9685_AI));
-
- return 0;
-
-exit:
- while (i--) {
- led_classdev_unregister(&pca9685[i].led_cdev);
- cancel_work_sync(&pca9685[i].work);
- }
- return err;
-}
-
-static int pca9685_remove(struct i2c_client *client)
-{
- struct pca9685_led *pca9685 = i2c_get_clientdata(client);
- u8 i;
-
- for (i = 0; i < 16; i++) {
- led_classdev_unregister(&pca9685[i].led_cdev);
- cancel_work_sync(&pca9685[i].work);
- }
- pca9685_all_off(client);
- return 0;
-}
-
-static struct i2c_driver pca9685_driver = {
- .driver = {
- .name = "leds-pca9685",
- .owner = THIS_MODULE,
- },
- .probe = pca9685_probe,
- .remove = pca9685_remove,
- .id_table = pca9685_id,
-};
-
-module_i2c_driver(pca9685_driver);
-
-MODULE_AUTHOR("Maximilian Güntner <maximilian.guentner@gmail.com>");
-MODULE_DESCRIPTION("PCA9685 LED Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 7d0aaed1e23a..d672bb4480f6 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -69,6 +69,10 @@ static void led_pwm_set(struct led_classdev *led_cdev,
duty *= brightness;
do_div(duty, max);
+
+ if (led_dat->active_low)
+ duty = led_dat->period - duty;
+
led_dat->duty = duty;
if (led_dat->can_sleep)
@@ -92,55 +96,78 @@ static void led_pwm_cleanup(struct led_pwm_priv *priv)
}
}
-static int led_pwm_create_of(struct platform_device *pdev,
- struct led_pwm_priv *priv)
+static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
+ struct led_pwm *led, struct device_node *child)
{
- struct device_node *child;
+ struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
int ret;
- for_each_child_of_node(pdev->dev.of_node, child) {
- struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
+ led_data->active_low = led->active_low;
+ led_data->cdev.name = led->name;
+ led_data->cdev.default_trigger = led->default_trigger;
+ led_data->cdev.brightness_set = led_pwm_set;
+ led_data->cdev.brightness = LED_OFF;
+ led_data->cdev.max_brightness = led->max_brightness;
+ led_data->cdev.flags = LED_CORE_SUSPENDRESUME;
- led_dat->cdev.name = of_get_property(child, "label",
- NULL) ? : child->name;
+ if (child)
+ led_data->pwm = devm_of_pwm_get(dev, child, NULL);
+ else
+ led_data->pwm = devm_pwm_get(dev, led->name);
+ if (IS_ERR(led_data->pwm)) {
+ ret = PTR_ERR(led_data->pwm);
+ dev_err(dev, "unable to request PWM for %s: %d\n",
+ led->name, ret);
+ return ret;
+ }
- led_dat->pwm = devm_of_pwm_get(&pdev->dev, child, NULL);
- if (IS_ERR(led_dat->pwm)) {
- dev_err(&pdev->dev, "unable to request PWM for %s\n",
- led_dat->cdev.name);
- ret = PTR_ERR(led_dat->pwm);
- goto err;
- }
- /* Get the period from PWM core when n*/
- led_dat->period = pwm_get_period(led_dat->pwm);
+ if (child)
+ led_data->period = pwm_get_period(led_data->pwm);
- led_dat->cdev.default_trigger = of_get_property(child,
- "linux,default-trigger", NULL);
- of_property_read_u32(child, "max-brightness",
- &led_dat->cdev.max_brightness);
+ led_data->can_sleep = pwm_can_sleep(led_data->pwm);
+ if (led_data->can_sleep)
+ INIT_WORK(&led_data->work, led_pwm_work);
- led_dat->cdev.brightness_set = led_pwm_set;
- led_dat->cdev.brightness = LED_OFF;
- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led_data->period = pwm_get_period(led_data->pwm);
+ if (!led_data->period && (led->pwm_period_ns > 0))
+ led_data->period = led->pwm_period_ns;
- led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
- if (led_dat->can_sleep)
- INIT_WORK(&led_dat->work, led_pwm_work);
+ ret = led_classdev_register(dev, &led_data->cdev);
+ if (ret == 0) {
+ priv->num_leds++;
+ } else {
+ dev_err(dev, "failed to register PWM led for %s: %d\n",
+ led->name, ret);
+ }
+
+ return ret;
+}
- ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register for %s\n",
- led_dat->cdev.name);
+static int led_pwm_create_of(struct device *dev, struct led_pwm_priv *priv)
+{
+ struct device_node *child;
+ struct led_pwm led;
+ int ret = 0;
+
+ memset(&led, 0, sizeof(led));
+
+ for_each_child_of_node(dev->of_node, child) {
+ led.name = of_get_property(child, "label", NULL) ? :
+ child->name;
+
+ led.default_trigger = of_get_property(child,
+ "linux,default-trigger", NULL);
+ led.active_low = of_property_read_bool(child, "active-low");
+ of_property_read_u32(child, "max-brightness",
+ &led.max_brightness);
+
+ ret = led_pwm_add(dev, priv, &led, child);
+ if (ret) {
of_node_put(child);
- goto err;
+ break;
}
- priv->num_leds++;
}
- return 0;
-err:
- led_pwm_cleanup(priv);
-
return ret;
}
@@ -166,51 +193,23 @@ static int led_pwm_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < count; i++) {
- struct led_pwm *cur_led = &pdata->leds[i];
- struct led_pwm_data *led_dat = &priv->leds[i];
-
- led_dat->pwm = devm_pwm_get(&pdev->dev, cur_led->name);
- if (IS_ERR(led_dat->pwm)) {
- ret = PTR_ERR(led_dat->pwm);
- dev_err(&pdev->dev,
- "unable to request PWM for %s\n",
- cur_led->name);
- goto err;
- }
-
- led_dat->cdev.name = cur_led->name;
- led_dat->cdev.default_trigger = cur_led->default_trigger;
- led_dat->active_low = cur_led->active_low;
- led_dat->period = cur_led->pwm_period_ns;
- led_dat->cdev.brightness_set = led_pwm_set;
- led_dat->cdev.brightness = LED_OFF;
- led_dat->cdev.max_brightness = cur_led->max_brightness;
- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
-
- led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
- if (led_dat->can_sleep)
- INIT_WORK(&led_dat->work, led_pwm_work);
-
- ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
- if (ret < 0)
- goto err;
+ ret = led_pwm_add(&pdev->dev, priv, &pdata->leds[i],
+ NULL);
+ if (ret)
+ break;
}
- priv->num_leds = count;
} else {
- ret = led_pwm_create_of(pdev, priv);
- if (ret)
- return ret;
+ ret = led_pwm_create_of(&pdev->dev, priv);
+ }
+
+ if (ret) {
+ led_pwm_cleanup(priv);
+ return ret;
}
platform_set_drvdata(pdev, priv);
return 0;
-
-err:
- priv->num_leds = i;
- led_pwm_cleanup(priv);
-
- return ret;
}
static int led_pwm_remove(struct platform_device *pdev)
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 28988b7b4fab..785eb53a87fc 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -76,10 +76,8 @@ static int s3c24xx_led_probe(struct platform_device *dev)
led = devm_kzalloc(&dev->dev, sizeof(struct s3c24xx_gpio_led),
GFP_KERNEL);
- if (led == NULL) {
- dev_err(&dev->dev, "No memory for device\n");
+ if (!led)
return -ENOMEM;
- }
platform_set_drvdata(dev, led);
diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c
index 388632d23d44..0b8cc4a021a6 100644
--- a/drivers/leds/leds-sunfire.c
+++ b/drivers/leds/leds-sunfire.c
@@ -135,10 +135,8 @@ static int sunfire_led_generic_probe(struct platform_device *pdev,
}
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
- if (!p) {
- dev_err(&pdev->dev, "Could not allocate struct sunfire_drvdata\n");
+ if (!p)
return -ENOMEM;
- }
for (i = 0; i < NUM_LEDS_PER_BOARD; i++) {
struct led_classdev *lp = &p->leds[i].led_cdev;
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 1c3ee9fcaf34..aec0f02b6b3e 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -47,7 +47,7 @@ static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig);
*/
void ledtrig_cpu(enum cpu_led_event ledevt)
{
- struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig);
+ struct led_trigger_cpu *trig = this_cpu_ptr(&cpu_trig);
/* Locate the correct CPU LED */
switch (ledevt) {
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 23b4a3b28dbc..4eab93aa570b 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1257,7 +1257,8 @@ static unsigned int smu_fpoll(struct file *file, poll_table *wait)
if (pp->busy && pp->cmd.status != 1)
mask |= POLLIN;
spin_unlock_irqrestore(&pp->lock, flags);
- } if (pp->mode == smu_file_events) {
+ }
+ if (pp->mode == smu_file_events) {
/* Not yet implemented */
}
return mask;
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index 7fe58b0ae8b4..b350fb86ff08 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -555,8 +555,18 @@ static void pm121_create_sys_fans(int loop_id)
pid_param.interval = PM121_SYS_INTERVAL;
pid_param.history_len = PM121_SYS_HISTORY_SIZE;
pid_param.itarget = param->itarget;
- pid_param.min = control->ops->get_min(control);
- pid_param.max = control->ops->get_max(control);
+ if(control)
+ {
+ pid_param.min = control->ops->get_min(control);
+ pid_param.max = control->ops->get_max(control);
+ } else {
+ /*
+ * This is probably not the right!?
+ * Perhaps goto fail if control == NULL above?
+ */
+ pid_param.min = 0;
+ pid_param.max = 0;
+ }
wf_pid_init(&pm121_sys_state[loop_id]->pid, &pid_param);
@@ -571,7 +581,7 @@ static void pm121_create_sys_fans(int loop_id)
control the same control */
printk(KERN_WARNING "pm121: failed to set up %s loop "
"setting \"%s\" to max speed.\n",
- loop_names[loop_id], control->name);
+ loop_names[loop_id], control ? control->name : "uninitialized value");
if (control)
wf_control_set_max(control);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9a8e66ae04f5..67f8b31e2054 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -669,17 +669,13 @@ static inline unsigned long file_page_offset(struct bitmap_storage *store,
/*
* return a pointer to the page in the filemap that contains the given bit
*
- * this lookup is complicated by the fact that the bitmap sb might be exactly
- * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
- * 0 or page 1
*/
static inline struct page *filemap_get_page(struct bitmap_storage *store,
unsigned long chunk)
{
if (file_page_index(store, chunk) >= store->file_pages)
return NULL;
- return store->filemap[file_page_index(store, chunk)
- - file_page_index(store, 0)];
+ return store->filemap[file_page_index(store, chunk)];
}
static int bitmap_storage_alloc(struct bitmap_storage *store,
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index 85f0b7074257..f752d12081ff 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -14,13 +14,17 @@
/*----------------------------------------------------------------*/
-struct dm_bio_prison {
+struct bucket {
spinlock_t lock;
+ struct hlist_head cells;
+};
+
+struct dm_bio_prison {
mempool_t *cell_pool;
unsigned nr_buckets;
unsigned hash_mask;
- struct hlist_head *cells;
+ struct bucket *buckets;
};
/*----------------------------------------------------------------*/
@@ -40,6 +44,12 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
static struct kmem_cache *_cell_cache;
+static void init_bucket(struct bucket *b)
+{
+ spin_lock_init(&b->lock);
+ INIT_HLIST_HEAD(&b->cells);
+}
+
/*
* @nr_cells should be the number of cells you want in use _concurrently_.
* Don't confuse it with the number of distinct keys.
@@ -49,13 +59,12 @@ struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
unsigned i;
uint32_t nr_buckets = calc_nr_buckets(nr_cells);
size_t len = sizeof(struct dm_bio_prison) +
- (sizeof(struct hlist_head) * nr_buckets);
+ (sizeof(struct bucket) * nr_buckets);
struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
if (!prison)
return NULL;
- spin_lock_init(&prison->lock);
prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
if (!prison->cell_pool) {
kfree(prison);
@@ -64,9 +73,9 @@ struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
prison->nr_buckets = nr_buckets;
prison->hash_mask = nr_buckets - 1;
- prison->cells = (struct hlist_head *) (prison + 1);
+ prison->buckets = (struct bucket *) (prison + 1);
for (i = 0; i < nr_buckets; i++)
- INIT_HLIST_HEAD(prison->cells + i);
+ init_bucket(prison->buckets + i);
return prison;
}
@@ -107,40 +116,44 @@ static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
(lhs->block == rhs->block);
}
-static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
+static struct bucket *get_bucket(struct dm_bio_prison *prison,
+ struct dm_cell_key *key)
+{
+ return prison->buckets + hash_key(prison, key);
+}
+
+static struct dm_bio_prison_cell *__search_bucket(struct bucket *b,
struct dm_cell_key *key)
{
struct dm_bio_prison_cell *cell;
- hlist_for_each_entry(cell, bucket, list)
+ hlist_for_each_entry(cell, &b->cells, list)
if (keys_equal(&cell->key, key))
return cell;
return NULL;
}
-static void __setup_new_cell(struct dm_bio_prison *prison,
+static void __setup_new_cell(struct bucket *b,
struct dm_cell_key *key,
struct bio *holder,
- uint32_t hash,
struct dm_bio_prison_cell *cell)
{
memcpy(&cell->key, key, sizeof(cell->key));
cell->holder = holder;
bio_list_init(&cell->bios);
- hlist_add_head(&cell->list, prison->cells + hash);
+ hlist_add_head(&cell->list, &b->cells);
}
-static int __bio_detain(struct dm_bio_prison *prison,
+static int __bio_detain(struct bucket *b,
struct dm_cell_key *key,
struct bio *inmate,
struct dm_bio_prison_cell *cell_prealloc,
struct dm_bio_prison_cell **cell_result)
{
- uint32_t hash = hash_key(prison, key);
struct dm_bio_prison_cell *cell;
- cell = __search_bucket(prison->cells + hash, key);
+ cell = __search_bucket(b, key);
if (cell) {
if (inmate)
bio_list_add(&cell->bios, inmate);
@@ -148,7 +161,7 @@ static int __bio_detain(struct dm_bio_prison *prison,
return 1;
}
- __setup_new_cell(prison, key, inmate, hash, cell_prealloc);
+ __setup_new_cell(b, key, inmate, cell_prealloc);
*cell_result = cell_prealloc;
return 0;
}
@@ -161,10 +174,11 @@ static int bio_detain(struct dm_bio_prison *prison,
{
int r;
unsigned long flags;
+ struct bucket *b = get_bucket(prison, key);
- spin_lock_irqsave(&prison->lock, flags);
- r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_lock_irqsave(&b->lock, flags);
+ r = __bio_detain(b, key, inmate, cell_prealloc, cell_result);
+ spin_unlock_irqrestore(&b->lock, flags);
return r;
}
@@ -208,10 +222,11 @@ void dm_cell_release(struct dm_bio_prison *prison,
struct bio_list *bios)
{
unsigned long flags;
+ struct bucket *b = get_bucket(prison, &cell->key);
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irqsave(&b->lock, flags);
__cell_release(cell, bios);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irqrestore(&b->lock, flags);
}
EXPORT_SYMBOL_GPL(dm_cell_release);
@@ -230,28 +245,25 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
struct bio_list *inmates)
{
unsigned long flags;
+ struct bucket *b = get_bucket(prison, &cell->key);
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irqsave(&b->lock, flags);
__cell_release_no_holder(cell, inmates);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irqrestore(&b->lock, flags);
}
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
void dm_cell_error(struct dm_bio_prison *prison,
- struct dm_bio_prison_cell *cell)
+ struct dm_bio_prison_cell *cell, int error)
{
struct bio_list bios;
struct bio *bio;
- unsigned long flags;
bio_list_init(&bios);
-
- spin_lock_irqsave(&prison->lock, flags);
- __cell_release(cell, &bios);
- spin_unlock_irqrestore(&prison->lock, flags);
+ dm_cell_release(prison, cell, &bios);
while ((bio = bio_list_pop(&bios)))
- bio_io_error(bio);
+ bio_endio(bio, error);
}
EXPORT_SYMBOL_GPL(dm_cell_error);
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
index 3f833190eadf..6805a142b750 100644
--- a/drivers/md/dm-bio-prison.h
+++ b/drivers/md/dm-bio-prison.h
@@ -85,7 +85,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell,
struct bio_list *inmates);
void dm_cell_error(struct dm_bio_prison *prison,
- struct dm_bio_prison_cell *cell);
+ struct dm_bio_prison_cell *cell, int error);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 53b213226c01..4cba2d808afb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
+ * Copyright (C) 2003 Jana Saout <jana@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
* Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
* Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
@@ -1996,6 +1996,6 @@ static void __exit dm_crypt_exit(void)
module_init(dm_crypt_init);
module_exit(dm_crypt_exit);
-MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
+MODULE_AUTHOR("Jana Saout <jana@saout.de>");
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 414dad4cb49b..ad913cd4aded 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1391,7 +1391,8 @@ static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
static void era_destroy(struct era *era)
{
- metadata_close(era->md);
+ if (era->md)
+ metadata_close(era->md);
if (era->wq)
destroy_workqueue(era->wq);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3842ac738f98..db404a0f7e2c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -10,6 +10,7 @@
#include <linux/device-mapper.h>
#include <linux/bio.h>
+#include <linux/completion.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -32,7 +33,7 @@ struct dm_io_client {
struct io {
unsigned long error_bits;
atomic_t count;
- struct task_struct *sleeper;
+ struct completion *wait;
struct dm_io_client *client;
io_notify_fn callback;
void *context;
@@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
invalidate_kernel_vmap_range(io->vma_invalidate_address,
io->vma_invalidate_size);
- if (io->sleeper)
- wake_up_process(io->sleeper);
+ if (io->wait)
+ complete(io->wait);
else {
unsigned long r = io->error_bits;
@@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
*/
volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
+ DECLARE_COMPLETION_ONSTACK(wait);
if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
WARN_ON(1);
@@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
- io->sleeper = current;
+ io->wait = &wait;
io->client = client;
io->vma_invalidate_address = dp->vma_invalidate_address;
@@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
dispatch_io(rw, num_regions, where, dp, io, 1);
- while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!atomic_read(&io->count))
- break;
-
- io_schedule();
- }
- set_current_state(TASK_RUNNING);
+ wait_for_completion_io(&wait);
if (error_bits)
*error_bits = io->error_bits;
@@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io = mempool_alloc(client->pool, GFP_NOIO);
io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
- io->sleeper = NULL;
+ io->wait = NULL;
io->client = client;
io->callback = fn;
io->context = context;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index ebfa411d1a7d..f4167b013d99 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1242,17 +1242,8 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (noretry_error(error)) {
- if ((clone->cmd_flags & REQ_WRITE_SAME) &&
- !clone->q->limits.max_write_same_sectors) {
- struct queue_limits *limits;
-
- /* device doesn't really support WRITE SAME, disable it */
- limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
- limits->max_write_same_sectors = 0;
- }
+ if (noretry_error(error))
return error;
- }
if (mpio->pgpath)
fail_path(mpio->pgpath);
@@ -1620,8 +1611,9 @@ static int multipath_busy(struct dm_target *ti)
spin_lock_irqsave(&m->lock, flags);
- /* pg_init in progress, requeue until done */
- if (!pg_ready(m)) {
+ /* pg_init in progress or no paths available */
+ if (m->pg_init_in_progress ||
+ (!m->nr_valid_paths && m->queue_if_no_path)) {
busy = 1;
goto out;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 8e0caed0bf74..5bd2290cfb1e 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2141,6 +2141,11 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
* Origin: maps a linear range of a device, with hooks for snapshotting.
*/
+struct dm_origin {
+ struct dm_dev *dev;
+ unsigned split_boundary;
+};
+
/*
* Construct an origin mapping: <dev_path>
* The context for an origin is merely a 'struct dm_dev *'
@@ -2149,41 +2154,65 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
- struct dm_dev *dev;
+ struct dm_origin *o;
if (argc != 1) {
ti->error = "origin: incorrect number of arguments";
return -EINVAL;
}
- r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
+ o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
+ if (!o) {
+ ti->error = "Cannot allocate private origin structure";
+ r = -ENOMEM;
+ goto bad_alloc;
+ }
+
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
if (r) {
ti->error = "Cannot get target device";
- return r;
+ goto bad_open;
}
- ti->private = dev;
+ ti->private = o;
ti->num_flush_bios = 1;
return 0;
+
+bad_open:
+ kfree(o);
+bad_alloc:
+ return r;
}
static void origin_dtr(struct dm_target *ti)
{
- struct dm_dev *dev = ti->private;
- dm_put_device(ti, dev);
+ struct dm_origin *o = ti->private;
+ dm_put_device(ti, o->dev);
+ kfree(o);
}
static int origin_map(struct dm_target *ti, struct bio *bio)
{
- struct dm_dev *dev = ti->private;
- bio->bi_bdev = dev->bdev;
+ struct dm_origin *o = ti->private;
+ unsigned available_sectors;
- if (bio->bi_rw & REQ_FLUSH)
+ bio->bi_bdev = o->dev->bdev;
+
+ if (unlikely(bio->bi_rw & REQ_FLUSH))
return DM_MAPIO_REMAPPED;
+ if (bio_rw(bio) != WRITE)
+ return DM_MAPIO_REMAPPED;
+
+ available_sectors = o->split_boundary -
+ ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
+
+ if (bio_sectors(bio) > available_sectors)
+ dm_accept_partial_bio(bio, available_sectors);
+
/* Only tell snapshots if this is a write */
- return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
+ return do_origin(o->dev, bio);
}
/*
@@ -2192,15 +2221,15 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
*/
static void origin_resume(struct dm_target *ti)
{
- struct dm_dev *dev = ti->private;
+ struct dm_origin *o = ti->private;
- ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
+ o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
}
static void origin_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
- struct dm_dev *dev = ti->private;
+ struct dm_origin *o = ti->private;
switch (type) {
case STATUSTYPE_INFO:
@@ -2208,7 +2237,7 @@ static void origin_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
- snprintf(result, maxlen, "%s", dev->name);
+ snprintf(result, maxlen, "%s", o->dev->name);
break;
}
}
@@ -2216,13 +2245,13 @@ static void origin_status(struct dm_target *ti, status_type_t type,
static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size)
{
- struct dm_dev *dev = ti->private;
- struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct dm_origin *o = ti->private;
+ struct request_queue *q = bdev_get_queue(o->dev->bdev);
if (!q->merge_bvec_fn)
return max_size;
- bvm->bi_bdev = dev->bdev;
+ bvm->bi_bdev = o->dev->bdev;
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
@@ -2230,9 +2259,9 @@ static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
static int origin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
- struct dm_dev *dev = ti->private;
+ struct dm_origin *o = ti->private;
- return fn(ti, dev, 0, ti->len, data);
+ return fn(ti, o->dev, 0, ti->len, data);
}
static struct target_type origin_target = {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 50601ec7017a..5f59f1e3e5b1 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -465,8 +465,8 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
}
EXPORT_SYMBOL(dm_get_device);
-int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
@@ -499,7 +499,6 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
(unsigned int) (PAGE_SIZE >> 9));
return 0;
}
-EXPORT_SYMBOL_GPL(dm_set_device_limits);
/*
* Decrement a device's use count and remove it if necessary.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 242ac2ea5f29..fc9c848a60c9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -310,13 +310,18 @@ static void cell_defer_no_holder_no_free(struct thin_c *tc,
wake_worker(pool);
}
-static void cell_error(struct pool *pool,
- struct dm_bio_prison_cell *cell)
+static void cell_error_with_code(struct pool *pool,
+ struct dm_bio_prison_cell *cell, int error_code)
{
- dm_cell_error(pool->prison, cell);
+ dm_cell_error(pool->prison, cell, error_code);
dm_bio_prison_free_cell(pool->prison, cell);
}
+static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
+{
+ cell_error_with_code(pool, cell, -EIO);
+}
+
/*----------------------------------------------------------------*/
/*
@@ -1027,7 +1032,7 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&tc->lock, flags);
}
-static bool should_error_unserviceable_bio(struct pool *pool)
+static int should_error_unserviceable_bio(struct pool *pool)
{
enum pool_mode m = get_pool_mode(pool);
@@ -1035,25 +1040,27 @@ static bool should_error_unserviceable_bio(struct pool *pool)
case PM_WRITE:
/* Shouldn't get here */
DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
- return true;
+ return -EIO;
case PM_OUT_OF_DATA_SPACE:
- return pool->pf.error_if_no_space;
+ return pool->pf.error_if_no_space ? -ENOSPC : 0;
case PM_READ_ONLY:
case PM_FAIL:
- return true;
+ return -EIO;
default:
/* Shouldn't get here */
DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
- return true;
+ return -EIO;
}
}
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
- if (should_error_unserviceable_bio(pool))
- bio_io_error(bio);
+ int error = should_error_unserviceable_bio(pool);
+
+ if (error)
+ bio_endio(bio, error);
else
retry_on_resume(bio);
}
@@ -1062,18 +1069,21 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
{
struct bio *bio;
struct bio_list bios;
+ int error;
- if (should_error_unserviceable_bio(pool)) {
- cell_error(pool, cell);
+ error = should_error_unserviceable_bio(pool);
+ if (error) {
+ cell_error_with_code(pool, cell, error);
return;
}
bio_list_init(&bios);
cell_release(pool, cell, &bios);
- if (should_error_unserviceable_bio(pool))
+ error = should_error_unserviceable_bio(pool);
+ if (error)
while ((bio = bio_list_pop(&bios)))
- bio_io_error(bio);
+ bio_endio(bio, error);
else
while ((bio = bio_list_pop(&bios)))
retry_on_resume(bio);
@@ -1610,47 +1620,63 @@ static void do_no_space_timeout(struct work_struct *ws)
/*----------------------------------------------------------------*/
-struct noflush_work {
+struct pool_work {
struct work_struct worker;
- struct thin_c *tc;
+ struct completion complete;
+};
+
+static struct pool_work *to_pool_work(struct work_struct *ws)
+{
+ return container_of(ws, struct pool_work, worker);
+}
- atomic_t complete;
- wait_queue_head_t wait;
+static void pool_work_complete(struct pool_work *pw)
+{
+ complete(&pw->complete);
+}
+
+static void pool_work_wait(struct pool_work *pw, struct pool *pool,
+ void (*fn)(struct work_struct *))
+{
+ INIT_WORK_ONSTACK(&pw->worker, fn);
+ init_completion(&pw->complete);
+ queue_work(pool->wq, &pw->worker);
+ wait_for_completion(&pw->complete);
+}
+
+/*----------------------------------------------------------------*/
+
+struct noflush_work {
+ struct pool_work pw;
+ struct thin_c *tc;
};
-static void complete_noflush_work(struct noflush_work *w)
+static struct noflush_work *to_noflush(struct work_struct *ws)
{
- atomic_set(&w->complete, 1);
- wake_up(&w->wait);
+ return container_of(to_pool_work(ws), struct noflush_work, pw);
}
static void do_noflush_start(struct work_struct *ws)
{
- struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ struct noflush_work *w = to_noflush(ws);
w->tc->requeue_mode = true;
requeue_io(w->tc);
- complete_noflush_work(w);
+ pool_work_complete(&w->pw);
}
static void do_noflush_stop(struct work_struct *ws)
{
- struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ struct noflush_work *w = to_noflush(ws);
w->tc->requeue_mode = false;
- complete_noflush_work(w);
+ pool_work_complete(&w->pw);
}
static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
{
struct noflush_work w;
- INIT_WORK_ONSTACK(&w.worker, fn);
w.tc = tc;
- atomic_set(&w.complete, 0);
- init_waitqueue_head(&w.wait);
-
- queue_work(tc->pool->wq, &w.worker);
-
- wait_event(w.wait, atomic_read(&w.complete));
+ pool_work_wait(&w.pw, tc->pool, fn);
}
/*----------------------------------------------------------------*/
@@ -3068,7 +3094,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
*/
if (pt->adjusted_pf.discard_passdown) {
data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
- limits->discard_granularity = data_limits->discard_granularity;
+ limits->discard_granularity = max(data_limits->discard_granularity,
+ pool->sectors_per_block << SECTOR_SHIFT);
} else
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index c99003e0d47a..b9a64bbce304 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
+ * Copyright (C) 2003 Jana Saout <jana@saout.de>
*
* This file is released under the GPL.
*/
@@ -79,6 +79,6 @@ static void __exit dm_zero_exit(void)
module_init(dm_zero_init)
module_exit(dm_zero_exit)
-MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
+MODULE_AUTHOR("Jana Saout <jana@saout.de>");
MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index aa9e093343d4..32b958dbc499 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w);
static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
+static struct workqueue_struct *deferred_remove_workqueue;
+
/*
* For bio-based dm.
* One of these is allocated per bio.
@@ -276,16 +278,24 @@ static int __init local_init(void)
if (r)
goto out_free_rq_tio_cache;
+ deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
+ if (!deferred_remove_workqueue) {
+ r = -ENOMEM;
+ goto out_uevent_exit;
+ }
+
_major = major;
r = register_blkdev(_major, _name);
if (r < 0)
- goto out_uevent_exit;
+ goto out_free_workqueue;
if (!_major)
_major = r;
return 0;
+out_free_workqueue:
+ destroy_workqueue(deferred_remove_workqueue);
out_uevent_exit:
dm_uevent_exit();
out_free_rq_tio_cache:
@@ -299,6 +309,7 @@ out_free_io_cache:
static void local_exit(void)
{
flush_scheduled_work();
+ destroy_workqueue(deferred_remove_workqueue);
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
@@ -407,7 +418,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
if (atomic_dec_and_test(&md->open_count) &&
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
- schedule_work(&deferred_remove_work);
+ queue_work(deferred_remove_workqueue, &deferred_remove_work);
dm_put(md);
@@ -755,6 +766,14 @@ static void dec_pending(struct dm_io *io, int error)
}
}
+static void disable_write_same(struct mapped_device *md)
+{
+ struct queue_limits *limits = dm_get_queue_limits(md);
+
+ /* device doesn't really support WRITE SAME, disable it */
+ limits->max_write_same_sectors = 0;
+}
+
static void clone_endio(struct bio *bio, int error)
{
int r = 0;
@@ -783,6 +802,10 @@ static void clone_endio(struct bio *bio, int error)
}
}
+ if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
+ !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
+ disable_write_same(md);
+
free_tio(md, tio);
dec_pending(io, error);
}
@@ -977,6 +1000,10 @@ static void dm_done(struct request *clone, int error, bool mapped)
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
+ if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
+ !clone->q->limits.max_write_same_sectors))
+ disable_write_same(tio->md);
+
if (r <= 0)
/* The target wants to complete the I/O */
dm_end_request(clone, r);
@@ -1110,6 +1137,46 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
+/*
+ * A target may call dm_accept_partial_bio only from the map routine. It is
+ * allowed for all bio types except REQ_FLUSH.
+ *
+ * dm_accept_partial_bio informs the dm that the target only wants to process
+ * additional n_sectors sectors of the bio and the rest of the data should be
+ * sent in a next bio.
+ *
+ * A diagram that explains the arithmetics:
+ * +--------------------+---------------+-------+
+ * | 1 | 2 | 3 |
+ * +--------------------+---------------+-------+
+ *
+ * <-------------- *tio->len_ptr --------------->
+ * <------- bi_size ------->
+ * <-- n_sectors -->
+ *
+ * Region 1 was already iterated over with bio_advance or similar function.
+ * (it may be empty if the target doesn't use bio_advance)
+ * Region 2 is the remaining bio size that the target wants to process.
+ * (it may be empty if region 1 is non-empty, although there is no reason
+ * to make it empty)
+ * The target requires that region 3 is to be sent in the next bio.
+ *
+ * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
+ * the partially processed part (the sum of regions 1+2) must be the same for all
+ * copies of the bio.
+ */
+void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
+{
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+ BUG_ON(bio->bi_rw & REQ_FLUSH);
+ BUG_ON(bi_size > *tio->len_ptr);
+ BUG_ON(n_sectors > bi_size);
+ *tio->len_ptr -= bi_size - n_sectors;
+ bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
+}
+EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+
static void __map_bio(struct dm_target_io *tio)
{
int r;
@@ -1152,10 +1219,10 @@ struct clone_info {
struct bio *bio;
struct dm_io *io;
sector_t sector;
- sector_t sector_count;
+ unsigned sector_count;
};
-static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
+static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
{
bio->bi_iter.bi_sector = sector;
bio->bi_iter.bi_size = to_bytes(len);
@@ -1200,11 +1267,13 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
static void __clone_and_map_simple_bio(struct clone_info *ci,
struct dm_target *ti,
- unsigned target_bio_nr, sector_t len)
+ unsigned target_bio_nr, unsigned *len)
{
struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
struct bio *clone = &tio->clone;
+ tio->len_ptr = len;
+
/*
* Discard requests require the bio's inline iovecs be initialized.
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
@@ -1212,13 +1281,13 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
*/
__bio_clone_fast(clone, ci->bio);
if (len)
- bio_setup_sector(clone, ci->sector, len);
+ bio_setup_sector(clone, ci->sector, *len);
__map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
- unsigned num_bios, sector_t len)
+ unsigned num_bios, unsigned *len)
{
unsigned target_bio_nr;
@@ -1233,13 +1302,13 @@ static int __send_empty_flush(struct clone_info *ci)
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
- __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
+ __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
return 0;
}
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, unsigned len)
+ sector_t sector, unsigned *len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
@@ -1254,7 +1323,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
tio = alloc_tio(ci, ti, 0, target_bio_nr);
- clone_bio(tio, bio, sector, len);
+ tio->len_ptr = len;
+ clone_bio(tio, bio, sector, *len);
__map_bio(tio);
}
}
@@ -1283,7 +1353,7 @@ static int __send_changing_extent_only(struct clone_info *ci,
is_split_required_fn is_split_required)
{
struct dm_target *ti;
- sector_t len;
+ unsigned len;
unsigned num_bios;
do {
@@ -1302,11 +1372,11 @@ static int __send_changing_extent_only(struct clone_info *ci,
return -EOPNOTSUPP;
if (is_split_required && !is_split_required(ti))
- len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+ len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
else
- len = min(ci->sector_count, max_io_len(ci->sector, ti));
+ len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
- __send_duplicate_bios(ci, ti, num_bios, len);
+ __send_duplicate_bios(ci, ti, num_bios, &len);
ci->sector += len;
} while (ci->sector_count -= len);
@@ -1345,7 +1415,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
- __clone_and_map_data_bio(ci, ti, ci->sector, len);
+ __clone_and_map_data_bio(ci, ti, ci->sector, &len);
ci->sector += len;
ci->sector_count -= len;
@@ -1439,7 +1509,6 @@ static int dm_merge_bvec(struct request_queue *q,
* just one page.
*/
else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
-
max_size = 0;
out:
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2382cfc9bb3f..32fc19c540d4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3448,6 +3448,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->level = LEVEL_NONE;
return rv;
}
+ if (mddev->ro)
+ return -EROFS;
/* request to change the personality. Need to ensure:
* - array is not engaged in resync/recovery/reshape
@@ -3634,6 +3636,8 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)
int err;
if (mddev->pers->check_reshape == NULL)
return -EBUSY;
+ if (mddev->ro)
+ return -EROFS;
mddev->new_layout = n;
err = mddev->pers->check_reshape(mddev);
if (err) {
@@ -3723,6 +3727,8 @@ chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
int err;
if (mddev->pers->check_reshape == NULL)
return -EBUSY;
+ if (mddev->ro)
+ return -EROFS;
mddev->new_chunk_sectors = n >> 9;
err = mddev->pers->check_reshape(mddev);
if (err) {
@@ -5593,7 +5599,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
if (mddev->in_sync)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_info.offset)
- info.state = (1<<MD_SB_BITMAP_PRESENT);
+ info.state |= (1<<MD_SB_BITMAP_PRESENT);
info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
@@ -6135,6 +6141,8 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
*/
if (mddev->sync_thread)
return -EBUSY;
+ if (mddev->ro)
+ return -EROFS;
rdev_for_each(rdev, mddev) {
sector_t avail = rdev->sectors;
@@ -6157,6 +6165,8 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
/* change the number of raid disks */
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
+ if (mddev->ro)
+ return -EROFS;
if (raid_disks <= 0 ||
(mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
@@ -7491,6 +7501,19 @@ void md_do_sync(struct md_thread *thread)
rdev->recovery_offset < j)
j = rdev->recovery_offset;
rcu_read_unlock();
+
+ /* If there is a bitmap, we need to make sure all
+ * writes that started before we added a spare
+ * complete before we start doing a recovery.
+ * Otherwise the write might complete and (via
+ * bitmap_endwrite) set a bit in the bitmap after the
+ * recovery has checked that bit and skipped that
+ * region.
+ */
+ if (mddev->bitmap) {
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ }
}
printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
@@ -8333,7 +8356,7 @@ static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
if (a < s) {
/* we need to split this range */
if (bb->count >= MD_MAX_BADBLOCKS) {
- rv = 0;
+ rv = -ENOSPC;
goto out;
}
memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2afef4ec9312..6234b2e84587 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -292,9 +292,12 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state) &&
- !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
list_add_tail(&sh->lru, &conf->delayed_list);
- else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+ if (atomic_read(&conf->preread_active_stripes)
+ < IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0)
list_add_tail(&sh->lru, &conf->bitmap_list);
else {
@@ -413,6 +416,11 @@ static void release_stripe(struct stripe_head *sh)
int hash;
bool wakeup;
+ /* Avoid release_list until the last reference.
+ */
+ if (atomic_add_unless(&sh->count, -1, 1))
+ return;
+
if (unlikely(!conf->mddev->thread) ||
test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
goto slow_path;
@@ -479,6 +487,7 @@ static void shrink_buffers(struct stripe_head *sh)
int num = sh->raid_conf->pool_size;
for (i = 0; i < num ; i++) {
+ WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
p = sh->dev[i].page;
if (!p)
continue;
@@ -499,6 +508,7 @@ static int grow_buffers(struct stripe_head *sh)
return 1;
}
sh->dev[i].page = page;
+ sh->dev[i].orig_page = page;
}
return 0;
}
@@ -855,6 +865,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_NOMERGE;
+ if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
+ WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
+ sh->dev[i].vec.bv_page = sh->dev[i].page;
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
@@ -899,6 +912,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
else
rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->data_offset);
+ if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
+ WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
+ sh->dev[i].rvec.bv_page = sh->dev[i].page;
rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
@@ -927,8 +943,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
}
static struct dma_async_tx_descriptor *
-async_copy_data(int frombio, struct bio *bio, struct page *page,
- sector_t sector, struct dma_async_tx_descriptor *tx)
+async_copy_data(int frombio, struct bio *bio, struct page **page,
+ sector_t sector, struct dma_async_tx_descriptor *tx,
+ struct stripe_head *sh)
{
struct bio_vec bvl;
struct bvec_iter iter;
@@ -965,11 +982,16 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
if (clen > 0) {
b_offset += bvl.bv_offset;
bio_page = bvl.bv_page;
- if (frombio)
- tx = async_memcpy(page, bio_page, page_offset,
+ if (frombio) {
+ if (sh->raid_conf->skip_copy &&
+ b_offset == 0 && page_offset == 0 &&
+ clen == STRIPE_SIZE)
+ *page = bio_page;
+ else
+ tx = async_memcpy(*page, bio_page, page_offset,
b_offset, clen, &submit);
- else
- tx = async_memcpy(bio_page, page, b_offset,
+ } else
+ tx = async_memcpy(bio_page, *page, b_offset,
page_offset, clen, &submit);
}
/* chain the operations */
@@ -1045,8 +1067,8 @@ static void ops_run_biofill(struct stripe_head *sh)
spin_unlock_irq(&sh->stripe_lock);
while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
- tx = async_copy_data(0, rbi, dev->page,
- dev->sector, tx);
+ tx = async_copy_data(0, rbi, &dev->page,
+ dev->sector, tx, sh);
rbi = r5_next_bio(rbi, dev->sector);
}
}
@@ -1384,6 +1406,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
BUG_ON(dev->written);
wbi = dev->written = chosen;
spin_unlock_irq(&sh->stripe_lock);
+ WARN_ON(dev->page != dev->orig_page);
while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
@@ -1393,9 +1416,15 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
set_bit(R5_SyncIO, &dev->flags);
if (wbi->bi_rw & REQ_DISCARD)
set_bit(R5_Discard, &dev->flags);
- else
- tx = async_copy_data(1, wbi, dev->page,
- dev->sector, tx);
+ else {
+ tx = async_copy_data(1, wbi, &dev->page,
+ dev->sector, tx, sh);
+ if (dev->page != dev->orig_page) {
+ set_bit(R5_SkipCopy, &dev->flags);
+ clear_bit(R5_UPTODATE, &dev->flags);
+ clear_bit(R5_OVERWRITE, &dev->flags);
+ }
+ }
wbi = r5_next_bio(wbi, dev->sector);
}
}
@@ -1426,7 +1455,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
struct r5dev *dev = &sh->dev[i];
if (dev->written || i == pd_idx || i == qd_idx) {
- if (!discard)
+ if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
set_bit(R5_UPTODATE, &dev->flags);
if (fua)
set_bit(R5_WantFUA, &dev->flags);
@@ -1839,8 +1868,10 @@ static int resize_stripes(struct r5conf *conf, int newsize)
osh = get_free_stripe(conf, hash);
unlock_device_hash_lock(conf, hash);
atomic_set(&nsh->count, 1);
- for(i=0; i<conf->pool_size; i++)
+ for(i=0; i<conf->pool_size; i++) {
nsh->dev[i].page = osh->dev[i].page;
+ nsh->dev[i].orig_page = osh->dev[i].page;
+ }
for( ; i<newsize; i++)
nsh->dev[i].page = NULL;
nsh->hash_lock_index = hash;
@@ -1896,6 +1927,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (nsh->dev[i].page == NULL) {
struct page *p = alloc_page(GFP_NOIO);
nsh->dev[i].page = p;
+ nsh->dev[i].orig_page = p;
if (!p)
err = -ENOMEM;
}
@@ -2133,24 +2165,20 @@ static void raid5_end_write_request(struct bio *bi, int error)
}
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
-
+
static void raid5_build_block(struct stripe_head *sh, int i, int previous)
{
struct r5dev *dev = &sh->dev[i];
bio_init(&dev->req);
dev->req.bi_io_vec = &dev->vec;
- dev->req.bi_vcnt++;
- dev->req.bi_max_vecs++;
+ dev->req.bi_max_vecs = 1;
dev->req.bi_private = sh;
- dev->vec.bv_page = dev->page;
bio_init(&dev->rreq);
dev->rreq.bi_io_vec = &dev->rvec;
- dev->rreq.bi_vcnt++;
- dev->rreq.bi_max_vecs++;
+ dev->rreq.bi_max_vecs = 1;
dev->rreq.bi_private = sh;
- dev->rvec.bv_page = dev->page;
dev->flags = 0;
dev->sector = compute_blocknr(sh, i, previous);
@@ -2750,6 +2778,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
/* and fail all 'written' */
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
+ if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
+ WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
+ sh->dev[i].page = sh->dev[i].orig_page;
+ }
+
if (bi) bitmap_end = 1;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
@@ -2886,8 +2919,11 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
(s->failed >= 1 && fdev[0]->toread) ||
(s->failed >= 2 && fdev[1]->toread) ||
(sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
+ (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
!test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
- (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
+ (sh->raid_conf->level == 6 && s->failed && s->to_write &&
+ s->to_write < sh->raid_conf->raid_disks - 2 &&
+ (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
/* we would like to get this block, possibly by computing it,
* otherwise read it if the backing disk is insync
*/
@@ -2991,12 +3027,17 @@ static void handle_stripe_clean_event(struct r5conf *conf,
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) &&
(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Discard, &dev->flags))) {
+ test_bit(R5_Discard, &dev->flags) ||
+ test_bit(R5_SkipCopy, &dev->flags))) {
/* We can return any write requests */
struct bio *wbi, *wbi2;
pr_debug("Return write for disc %d\n", i);
if (test_and_clear_bit(R5_Discard, &dev->flags))
clear_bit(R5_UPTODATE, &dev->flags);
+ if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
+ WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
+ dev->page = dev->orig_page;
+ }
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_iter.bi_sector <
@@ -3015,6 +3056,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
0);
} else if (test_bit(R5_Discard, &dev->flags))
discard_pending = 1;
+ WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
+ WARN_ON(dev->page != dev->orig_page);
}
if (!discard_pending &&
test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
@@ -3086,7 +3129,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
- if (test_bit(R5_Insync, &dev->flags)) rcw++;
+ if (test_bit(R5_Insync, &dev->flags))
+ rcw++;
else
rcw += 2*disks;
}
@@ -3107,10 +3151,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
- if (
- test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- pr_debug("Read_old block "
- "%d for r-m-w\n", i);
+ if (test_bit(STRIPE_PREREAD_ACTIVE,
+ &sh->state)) {
+ pr_debug("Read_old block %d for r-m-w\n",
+ i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
@@ -3133,10 +3177,9 @@ static void handle_stripe_dirtying(struct r5conf *conf,
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
rcw++;
- if (!test_bit(R5_Insync, &dev->flags))
- continue; /* it's a failed drive */
- if (
- test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ if (test_bit(R5_Insync, &dev->flags) &&
+ test_bit(STRIPE_PREREAD_ACTIVE,
+ &sh->state)) {
pr_debug("Read_old block "
"%d for Reconstruct\n", i);
set_bit(R5_LOCKED, &dev->flags);
@@ -5031,8 +5074,8 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
- handle_stripe(sh);
release_stripe(sh);
return STRIPE_SECTORS;
@@ -5072,7 +5115,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
/* already done this stripe */
continue;
- sh = get_active_stripe(conf, sector, 0, 1, 0);
+ sh = get_active_stripe(conf, sector, 0, 1, 1);
if (!sh) {
/* failed to get a stripe - must wait */
@@ -5355,6 +5398,50 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
raid5_store_preread_threshold);
static ssize_t
+raid5_show_skip_copy(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ if (conf)
+ return sprintf(page, "%d\n", conf->skip_copy);
+ else
+ return 0;
+}
+
+static ssize_t
+raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf = mddev->private;
+ unsigned long new;
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (!conf)
+ return -ENODEV;
+
+ if (kstrtoul(page, 10, &new))
+ return -EINVAL;
+ new = !!new;
+ if (new == conf->skip_copy)
+ return len;
+
+ mddev_suspend(mddev);
+ conf->skip_copy = new;
+ if (new)
+ mddev->queue->backing_dev_info.capabilities |=
+ BDI_CAP_STABLE_WRITES;
+ else
+ mddev->queue->backing_dev_info.capabilities &=
+ ~BDI_CAP_STABLE_WRITES;
+ mddev_resume(mddev);
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
+ raid5_show_skip_copy,
+ raid5_store_skip_copy);
+
+
+static ssize_t
stripe_cache_active_show(struct mddev *mddev, char *page)
{
struct r5conf *conf = mddev->private;
@@ -5439,6 +5526,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_stripecache_active.attr,
&raid5_preread_bypass_threshold.attr,
&raid5_group_thread_cnt.attr,
+ &raid5_skip_copy.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 01ad8ae8f578..bc72cd4be5f8 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -232,7 +232,7 @@ struct stripe_head {
*/
struct bio req, rreq;
struct bio_vec vec, rvec;
- struct page *page;
+ struct page *page, *orig_page;
struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */
unsigned long flags;
@@ -299,6 +299,7 @@ enum r5dev_flags {
* data in, and now is a good time to write it out.
*/
R5_Discard, /* Discard the stripe */
+ R5_SkipCopy, /* Don't copy data from bio to stripe cache */
};
/*
@@ -436,6 +437,7 @@ struct r5conf {
atomic_t pending_full_writes; /* full write backlog */
int bypass_count; /* bypassed prereads */
int bypass_threshold; /* preread nice */
+ int skip_copy; /* Don't copy data from bio to stripe cache */
struct list_head *last_hold; /* detect hold_list promotions */
atomic_t reshape_stripes; /* stripes with pending writes for reshape */
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 20f1655e6d75..8108c698b548 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -93,7 +93,9 @@ config VIDEO_M32R_AR_M64278
config VIDEO_OMAP3
tristate "OMAP 3 Camera support"
- depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
+ select ARM_DMA_USE_IOMMU
+ select OMAP_IOMMU
---help---
Driver for an OMAP 3 camera controller.
diff --git a/drivers/media/platform/omap3isp/Makefile b/drivers/media/platform/omap3isp/Makefile
index e8847e79e31a..254975a9174e 100644
--- a/drivers/media/platform/omap3isp/Makefile
+++ b/drivers/media/platform/omap3isp/Makefile
@@ -3,7 +3,7 @@
ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG
omap3-isp-objs += \
- isp.o ispqueue.o ispvideo.o \
+ isp.o ispvideo.o \
ispcsiphy.o ispccp2.o ispcsi2.o \
ispccdc.o isppreview.o ispresizer.o \
ispstat.o isph3a_aewb.o isph3a_af.o isphist.o
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 06a0df434249..2c7aa6720569 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -69,6 +69,8 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
+#include <asm/dma-iommu.h>
+
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
@@ -1397,14 +1399,14 @@ int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
if (isp_pipeline_is_last(me)) {
struct isp_video *video = pipe->output;
unsigned long flags;
- spin_lock_irqsave(&video->queue->irqlock, flags);
+ spin_lock_irqsave(&video->irqlock, flags);
if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
- spin_unlock_irqrestore(&video->queue->irqlock, flags);
+ spin_unlock_irqrestore(&video->irqlock, flags);
atomic_set(stopping, 0);
smp_mb();
return 0;
}
- spin_unlock_irqrestore(&video->queue->irqlock, flags);
+ spin_unlock_irqrestore(&video->irqlock, flags);
if (!wait_event_timeout(*wait, !atomic_read(stopping),
msecs_to_jiffies(1000))) {
atomic_set(stopping, 0);
@@ -1625,7 +1627,7 @@ struct isp_device *omap3isp_get(struct isp_device *isp)
* Decrement the reference count on the ISP. If the last reference is released,
* power-down all submodules, disable clocks and free temporary buffers.
*/
-void omap3isp_put(struct isp_device *isp)
+static void __omap3isp_put(struct isp_device *isp, bool save_ctx)
{
if (isp == NULL)
return;
@@ -1634,7 +1636,7 @@ void omap3isp_put(struct isp_device *isp)
BUG_ON(isp->ref_count == 0);
if (--isp->ref_count == 0) {
isp_disable_interrupts(isp);
- if (isp->domain) {
+ if (save_ctx) {
isp_save_ctx(isp);
isp->has_context = 1;
}
@@ -1648,6 +1650,11 @@ void omap3isp_put(struct isp_device *isp)
mutex_unlock(&isp->isp_mutex);
}
+void omap3isp_put(struct isp_device *isp)
+{
+ __omap3isp_put(isp, true);
+}
+
/* --------------------------------------------------------------------------
* Platform device driver
*/
@@ -2120,6 +2127,61 @@ error_csiphy:
return ret;
}
+static void isp_detach_iommu(struct isp_device *isp)
+{
+ arm_iommu_release_mapping(isp->mapping);
+ isp->mapping = NULL;
+ iommu_group_remove_device(isp->dev);
+}
+
+static int isp_attach_iommu(struct isp_device *isp)
+{
+ struct dma_iommu_mapping *mapping;
+ struct iommu_group *group;
+ int ret;
+
+ /* Create a device group and add the device to it. */
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(isp->dev, "failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+
+ ret = iommu_group_add_device(group, isp->dev);
+ iommu_group_put(group);
+
+ if (ret < 0) {
+ dev_err(isp->dev, "failed to add device to IPMMU group\n");
+ return ret;
+ }
+
+ /*
+ * Create the ARM mapping, used by the ARM DMA mapping core to allocate
+ * VAs. This will allocate a corresponding IOMMU domain.
+ */
+ mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
+ if (IS_ERR(mapping)) {
+ dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
+ ret = PTR_ERR(mapping);
+ goto error;
+ }
+
+ isp->mapping = mapping;
+
+ /* Attach the ARM VA mapping to the device. */
+ ret = arm_iommu_attach_device(isp->dev, mapping);
+ if (ret < 0) {
+ dev_err(isp->dev, "failed to attach device to VA mapping\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ isp_detach_iommu(isp);
+ return ret;
+}
+
/*
* isp_remove - Remove ISP platform device
* @pdev: Pointer to ISP platform device
@@ -2135,10 +2197,8 @@ static int isp_remove(struct platform_device *pdev)
isp_xclk_cleanup(isp);
__omap3isp_get(isp, false);
- iommu_detach_device(isp->domain, &pdev->dev);
- iommu_domain_free(isp->domain);
- isp->domain = NULL;
- omap3isp_put(isp);
+ isp_detach_iommu(isp);
+ __omap3isp_put(isp, false);
return 0;
}
@@ -2265,39 +2325,32 @@ static int isp_probe(struct platform_device *pdev)
}
}
- isp->domain = iommu_domain_alloc(pdev->dev.bus);
- if (!isp->domain) {
- dev_err(isp->dev, "can't alloc iommu domain\n");
- ret = -ENOMEM;
+ /* IOMMU */
+ ret = isp_attach_iommu(isp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to attach to IOMMU\n");
goto error_isp;
}
- ret = iommu_attach_device(isp->domain, &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
- ret = -EPROBE_DEFER;
- goto free_domain;
- }
-
/* Interrupt */
isp->irq_num = platform_get_irq(pdev, 0);
if (isp->irq_num <= 0) {
dev_err(isp->dev, "No IRQ resource\n");
ret = -ENODEV;
- goto detach_dev;
+ goto error_iommu;
}
if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
"OMAP3 ISP", isp)) {
dev_err(isp->dev, "Unable to request IRQ\n");
ret = -EINVAL;
- goto detach_dev;
+ goto error_iommu;
}
/* Entities */
ret = isp_initialize_modules(isp);
if (ret < 0)
- goto detach_dev;
+ goto error_iommu;
ret = isp_register_entities(isp);
if (ret < 0)
@@ -2310,14 +2363,11 @@ static int isp_probe(struct platform_device *pdev)
error_modules:
isp_cleanup_modules(isp);
-detach_dev:
- iommu_detach_device(isp->domain, &pdev->dev);
-free_domain:
- iommu_domain_free(isp->domain);
- isp->domain = NULL;
+error_iommu:
+ isp_detach_iommu(isp);
error_isp:
isp_xclk_cleanup(isp);
- omap3isp_put(isp);
+ __omap3isp_put(isp, false);
error:
mutex_destroy(&isp->isp_mutex);
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 6d5e69711907..2c314eea1252 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -45,8 +45,6 @@
#include "ispcsi2.h"
#include "ispccp2.h"
-#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
-
#define ISP_TOK_TERM 0xFFFFFFFF /*
* terminating token for ISP
* modules reg list
@@ -152,6 +150,7 @@ struct isp_xclk {
* regions.
* @mmio_base_phys: Array with physical L4 bus addresses for ISP register
* regions.
+ * @mapping: IOMMU mapping
* @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP.
* @stop_failure: Indicates that an entity failed to stop.
@@ -171,7 +170,6 @@ struct isp_xclk {
* @isp_res: Pointer to current settings for ISP Resizer.
* @isp_prev: Pointer to current settings for ISP Preview.
* @isp_ccdc: Pointer to current settings for ISP CCDC.
- * @iommu: Pointer to requested IOMMU instance for ISP.
* @platform_cb: ISP driver callback function pointers for platform code
*
* This structure is used to store the OMAP ISP Information.
@@ -189,6 +187,8 @@ struct isp_device {
void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
+ struct dma_iommu_mapping *mapping;
+
/* ISP Obj */
spinlock_t stat_lock; /* common lock for statistic drivers */
struct mutex isp_mutex; /* For handling ref_count field */
@@ -219,8 +219,6 @@ struct isp_device {
unsigned int sbl_resources;
unsigned int subclk_resources;
-
- struct iommu_domain *domain;
};
#define v4l2_dev_to_isp_device(dev) \
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 4d920c800ff5..9f727d20f06d 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -30,7 +30,6 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
-#include <linux/omap-iommu.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/v4l2-event.h>
@@ -206,7 +205,8 @@ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
* ccdc_lsc_program_table - Program Lens Shading Compensation table address.
* @ccdc: Pointer to ISP CCDC device.
*/
-static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc, u32 addr)
+static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc,
+ dma_addr_t addr)
{
isp_reg_writel(to_isp_device(ccdc), addr,
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE);
@@ -333,7 +333,7 @@ static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc,
return -EBUSY;
ccdc_lsc_setup_regs(ccdc, &req->config);
- ccdc_lsc_program_table(ccdc, req->table);
+ ccdc_lsc_program_table(ccdc, req->table.dma);
return 0;
}
@@ -368,11 +368,12 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
if (req == NULL)
return;
- if (req->iovm)
- dma_unmap_sg(isp->dev, req->iovm->sgt->sgl,
- req->iovm->sgt->nents, DMA_TO_DEVICE);
- if (req->table)
- omap_iommu_vfree(isp->domain, isp->dev, req->table);
+ if (req->table.addr) {
+ sg_free_table(&req->table.sgt);
+ dma_free_coherent(isp->dev, req->config.size, req->table.addr,
+ req->table.dma);
+ }
+
kfree(req);
}
@@ -416,7 +417,6 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
struct isp_device *isp = to_isp_device(ccdc);
struct ispccdc_lsc_config_req *req;
unsigned long flags;
- void *table;
u16 update;
int ret;
@@ -444,38 +444,31 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
req->enable = 1;
- req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
- req->config.size, IOMMU_FLAG);
- if (IS_ERR_VALUE(req->table)) {
- req->table = 0;
- ret = -ENOMEM;
- goto done;
- }
-
- req->iovm = omap_find_iovm_area(isp->dev, req->table);
- if (req->iovm == NULL) {
+ req->table.addr = dma_alloc_coherent(isp->dev, req->config.size,
+ &req->table.dma,
+ GFP_KERNEL);
+ if (req->table.addr == NULL) {
ret = -ENOMEM;
goto done;
}
- if (!dma_map_sg(isp->dev, req->iovm->sgt->sgl,
- req->iovm->sgt->nents, DMA_TO_DEVICE)) {
- ret = -ENOMEM;
- req->iovm = NULL;
+ ret = dma_get_sgtable(isp->dev, &req->table.sgt,
+ req->table.addr, req->table.dma,
+ req->config.size);
+ if (ret < 0)
goto done;
- }
- dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl,
- req->iovm->sgt->nents, DMA_TO_DEVICE);
+ dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
+ req->table.sgt.nents, DMA_TO_DEVICE);
- table = omap_da_to_va(isp->dev, req->table);
- if (copy_from_user(table, config->lsc, req->config.size)) {
+ if (copy_from_user(req->table.addr, config->lsc,
+ req->config.size)) {
ret = -EFAULT;
goto done;
}
- dma_sync_sg_for_device(isp->dev, req->iovm->sgt->sgl,
- req->iovm->sgt->nents, DMA_TO_DEVICE);
+ dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
+ req->table.sgt.nents, DMA_TO_DEVICE);
}
spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
@@ -584,7 +577,7 @@ static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc)
if (!ccdc->fpc_en)
return;
- isp_reg_writel(isp, ccdc->fpc.fpcaddr, OMAP3_ISP_IOMEM_CCDC,
+ isp_reg_writel(isp, ccdc->fpc.dma, OMAP3_ISP_IOMEM_CCDC,
ISPCCDC_FPC_ADDR);
/* The FPNUM field must be set before enabling FPC. */
isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT),
@@ -724,8 +717,9 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
ccdc->shadow_update = 0;
if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) {
- u32 table_old = 0;
- u32 table_new;
+ struct omap3isp_ccdc_fpc fpc;
+ struct ispccdc_fpc fpc_old = { .addr = NULL, };
+ struct ispccdc_fpc fpc_new;
u32 size;
if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
@@ -734,35 +728,39 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag);
if (ccdc->fpc_en) {
- if (copy_from_user(&ccdc->fpc, ccdc_struct->fpc,
- sizeof(ccdc->fpc)))
+ if (copy_from_user(&fpc, ccdc_struct->fpc, sizeof(fpc)))
return -EFAULT;
+ size = fpc.fpnum * 4;
+
/*
- * table_new must be 64-bytes aligned, but it's
- * already done by omap_iommu_vmalloc().
+ * The table address must be 64-bytes aligned, which is
+ * guaranteed by dma_alloc_coherent().
*/
- size = ccdc->fpc.fpnum * 4;
- table_new = omap_iommu_vmalloc(isp->domain, isp->dev,
- 0, size, IOMMU_FLAG);
- if (IS_ERR_VALUE(table_new))
+ fpc_new.fpnum = fpc.fpnum;
+ fpc_new.addr = dma_alloc_coherent(isp->dev, size,
+ &fpc_new.dma,
+ GFP_KERNEL);
+ if (fpc_new.addr == NULL)
return -ENOMEM;
- if (copy_from_user(omap_da_to_va(isp->dev, table_new),
- (__force void __user *)
- ccdc->fpc.fpcaddr, size)) {
- omap_iommu_vfree(isp->domain, isp->dev,
- table_new);
+ if (copy_from_user(fpc_new.addr,
+ (__force void __user *)fpc.fpcaddr,
+ size)) {
+ dma_free_coherent(isp->dev, size, fpc_new.addr,
+ fpc_new.dma);
return -EFAULT;
}
- table_old = ccdc->fpc.fpcaddr;
- ccdc->fpc.fpcaddr = table_new;
+ fpc_old = ccdc->fpc;
+ ccdc->fpc = fpc_new;
}
ccdc_configure_fpc(ccdc);
- if (table_old != 0)
- omap_iommu_vfree(isp->domain, isp->dev, table_old);
+
+ if (fpc_old.addr != NULL)
+ dma_free_coherent(isp->dev, fpc_old.fpnum * 4,
+ fpc_old.addr, fpc_old.dma);
}
return ccdc_lsc_config(ccdc, ccdc_struct);
@@ -1523,7 +1521,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
buffer = omap3isp_video_buffer_next(&ccdc->video_out);
if (buffer != NULL) {
- ccdc_set_outaddr(ccdc, buffer->isp_addr);
+ ccdc_set_outaddr(ccdc, buffer->dma);
restart = 1;
}
@@ -1662,7 +1660,7 @@ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
if (!(ccdc->output & CCDC_OUTPUT_MEMORY))
return -ENODEV;
- ccdc_set_outaddr(ccdc, buffer->isp_addr);
+ ccdc_set_outaddr(ccdc, buffer->dma);
/* We now have a buffer queued on the output, restart the pipeline
* on the next CCDC interrupt if running in continuous mode (or when
@@ -2580,8 +2578,9 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
cancel_work_sync(&ccdc->lsc.table_work);
ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
- if (ccdc->fpc.fpcaddr != 0)
- omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr);
+ if (ccdc->fpc.addr != NULL)
+ dma_free_coherent(isp->dev, ccdc->fpc.fpnum * 4, ccdc->fpc.addr,
+ ccdc->fpc.dma);
mutex_destroy(&ccdc->ioctl_lock);
}
diff --git a/drivers/media/platform/omap3isp/ispccdc.h b/drivers/media/platform/omap3isp/ispccdc.h
index 9d24e4107864..f65061602c71 100644
--- a/drivers/media/platform/omap3isp/ispccdc.h
+++ b/drivers/media/platform/omap3isp/ispccdc.h
@@ -46,6 +46,12 @@ enum ccdc_input_entity {
#define OMAP3ISP_CCDC_NEVENTS 16
+struct ispccdc_fpc {
+ void *addr;
+ dma_addr_t dma;
+ unsigned int fpnum;
+};
+
enum ispccdc_lsc_state {
LSC_STATE_STOPPED = 0,
LSC_STATE_STOPPING = 1,
@@ -57,8 +63,12 @@ struct ispccdc_lsc_config_req {
struct list_head list;
struct omap3isp_ccdc_lsc_config config;
unsigned char enable;
- u32 table;
- struct iovm_struct *iovm;
+
+ struct {
+ void *addr;
+ dma_addr_t dma;
+ struct sg_table sgt;
+ } table;
};
/*
@@ -136,7 +146,7 @@ struct isp_ccdc_device {
fpc_en:1;
struct omap3isp_ccdc_blcomp blcomp;
struct omap3isp_ccdc_bclamp clamp;
- struct omap3isp_ccdc_fpc fpc;
+ struct ispccdc_fpc fpc;
struct ispccdc_lsc lsc;
unsigned int update;
unsigned int shadow_update;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index b30b67d22a58..f3801db9095c 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -549,7 +549,7 @@ static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
buffer = omap3isp_video_buffer_next(&ccp2->video_in);
if (buffer != NULL)
- ccp2_set_inaddr(ccp2, buffer->isp_addr);
+ ccp2_set_inaddr(ccp2, buffer->dma);
pipe->state |= ISP_PIPELINE_IDLE_INPUT;
@@ -940,7 +940,7 @@ static int ccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer)
{
struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2;
- ccp2_set_inaddr(ccp2, buffer->isp_addr);
+ ccp2_set_inaddr(ccp2, buffer->dma);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 620560828a48..5a2e47e58b84 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -695,7 +695,7 @@ static void csi2_isr_buffer(struct isp_csi2_device *csi2)
if (buffer == NULL)
return;
- csi2_set_outaddr(csi2, buffer->isp_addr);
+ csi2_set_outaddr(csi2, buffer->dma);
csi2_ctx_enable(isp, csi2, 0, 1);
}
@@ -812,7 +812,7 @@ static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer)
struct isp_device *isp = video->isp;
struct isp_csi2_device *csi2 = &isp->isp_csi2a;
- csi2_set_outaddr(csi2, buffer->isp_addr);
+ csi2_set_outaddr(csi2, buffer->dma);
/*
* If streaming was enabled before there was a buffer queued
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index 75fd82b152ba..d6811ce263eb 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -47,7 +47,7 @@ static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv)
if (aewb->state == ISPSTAT_DISABLED)
return;
- isp_reg_writel(aewb->isp, aewb->active_buf->iommu_addr,
+ isp_reg_writel(aewb->isp, aewb->active_buf->dma_addr,
OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST);
if (!aewb->update)
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index a0bf5af32438..6fc960cd30f5 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -51,7 +51,7 @@ static void h3a_af_setup_regs(struct ispstat *af, void *priv)
if (af->state == ISPSTAT_DISABLED)
return;
- isp_reg_writel(af->isp, af->active_buf->iommu_addr, OMAP3_ISP_IOMEM_H3A,
+ isp_reg_writel(af->isp, af->active_buf->dma_addr, OMAP3_ISP_IOMEM_H3A,
ISPH3A_AFBUFST);
if (!af->update)
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 395b2b068c75..720809b07e75 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -1499,14 +1499,14 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
if (prev->input == PREVIEW_INPUT_MEMORY) {
buffer = omap3isp_video_buffer_next(&prev->video_in);
if (buffer != NULL)
- preview_set_inaddr(prev, buffer->isp_addr);
+ preview_set_inaddr(prev, buffer->dma);
pipe->state |= ISP_PIPELINE_IDLE_INPUT;
}
if (prev->output & PREVIEW_OUTPUT_MEMORY) {
buffer = omap3isp_video_buffer_next(&prev->video_out);
if (buffer != NULL) {
- preview_set_outaddr(prev, buffer->isp_addr);
+ preview_set_outaddr(prev, buffer->dma);
restart = 1;
}
pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
@@ -1577,10 +1577,10 @@ static int preview_video_queue(struct isp_video *video,
struct isp_prev_device *prev = &video->isp->isp_prev;
if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- preview_set_inaddr(prev, buffer->isp_addr);
+ preview_set_inaddr(prev, buffer->dma);
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- preview_set_outaddr(prev, buffer->isp_addr);
+ preview_set_outaddr(prev, buffer->dma);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
deleted file mode 100644
index a5e65858e799..000000000000
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ /dev/null
@@ -1,1161 +0,0 @@
-/*
- * ispqueue.c
- *
- * TI OMAP3 ISP - Video buffers queue handling
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- * Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#include <asm/cacheflush.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/poll.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ispqueue.h"
-
-/* -----------------------------------------------------------------------------
- * Video buffers management
- */
-
-/*
- * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
- *
- * The typical operation required here is Cache Invalidation across
- * the (user space) buffer address range. And this _must_ be done
- * at QBUF stage (and *only* at QBUF).
- *
- * We try to use optimal cache invalidation function:
- * - dmac_map_area:
- * - used when the number of pages are _low_.
- * - it becomes quite slow as the number of pages increase.
- * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
- * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
- *
- * - flush_cache_all:
- * - used when the number of pages are _high_.
- * - time taken in the range of 500-900 us.
- * - has a higher penalty but, as whole dcache + icache is invalidated
- */
-/*
- * FIXME: dmac_inv_range crashes randomly on the user space buffer
- * address. Fall back to flush_cache_all for now.
- */
-#define ISP_CACHE_FLUSH_PAGES_MAX 0
-
-static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
-{
- if (buf->skip_cache)
- return;
-
- if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
- buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
- flush_cache_all();
- else {
- dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
- DMA_FROM_DEVICE);
- outer_inv_range(buf->vbuf.m.userptr,
- buf->vbuf.m.userptr + buf->vbuf.length);
- }
-}
-
-/*
- * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
- *
- * Lock the VMAs underlying the given buffer into memory. This avoids the
- * userspace buffer mapping from being swapped out, making VIPT cache handling
- * easier.
- *
- * Note that the pages will not be freed as the buffers have been locked to
- * memory using by a call to get_user_pages(), but the userspace mapping could
- * still disappear if the VMAs are not locked. This is caused by the memory
- * management code trying to be as lock-less as possible, which results in the
- * userspace mapping manager not finding out that the pages are locked under
- * some conditions.
- */
-static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
-{
- struct vm_area_struct *vma;
- unsigned long start;
- unsigned long end;
- int ret = 0;
-
- if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
- return 0;
-
- /* We can be called from workqueue context if the current task dies to
- * unlock the VMAs. In that case there's no current memory management
- * context so unlocking can't be performed, but the VMAs have been or
- * are getting destroyed anyway so it doesn't really matter.
- */
- if (!current || !current->mm)
- return lock ? -EINVAL : 0;
-
- start = buf->vbuf.m.userptr;
- end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
-
- down_write(&current->mm->mmap_sem);
- spin_lock(&current->mm->page_table_lock);
-
- do {
- vma = find_vma(current->mm, start);
- if (vma == NULL) {
- ret = -EFAULT;
- goto out;
- }
-
- if (lock)
- vma->vm_flags |= VM_LOCKED;
- else
- vma->vm_flags &= ~VM_LOCKED;
-
- start = vma->vm_end + 1;
- } while (vma->vm_end < end);
-
- if (lock)
- buf->vm_flags |= VM_LOCKED;
- else
- buf->vm_flags &= ~VM_LOCKED;
-
-out:
- spin_unlock(&current->mm->page_table_lock);
- up_write(&current->mm->mmap_sem);
- return ret;
-}
-
-/*
- * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
- *
- * Iterate over the vmalloc'ed area and create a scatter list entry for every
- * page.
- */
-static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
-{
- struct scatterlist *sglist;
- unsigned int npages;
- unsigned int i;
- void *addr;
-
- addr = buf->vaddr;
- npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
-
- sglist = vmalloc(npages * sizeof(*sglist));
- if (sglist == NULL)
- return -ENOMEM;
-
- sg_init_table(sglist, npages);
-
- for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
- struct page *page = vmalloc_to_page(addr);
-
- if (page == NULL || PageHighMem(page)) {
- vfree(sglist);
- return -EINVAL;
- }
-
- sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
- }
-
- buf->sglen = npages;
- buf->sglist = sglist;
-
- return 0;
-}
-
-/*
- * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
- *
- * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
- */
-static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
-{
- struct scatterlist *sglist;
- unsigned int offset = buf->offset;
- unsigned int i;
-
- sglist = vmalloc(buf->npages * sizeof(*sglist));
- if (sglist == NULL)
- return -ENOMEM;
-
- sg_init_table(sglist, buf->npages);
-
- for (i = 0; i < buf->npages; ++i) {
- if (PageHighMem(buf->pages[i])) {
- vfree(sglist);
- return -EINVAL;
- }
-
- sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
- offset);
- offset = 0;
- }
-
- buf->sglen = buf->npages;
- buf->sglist = sglist;
-
- return 0;
-}
-
-/*
- * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
- *
- * Create a scatter list of physically contiguous pages starting at the buffer
- * memory physical address.
- */
-static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
-{
- struct scatterlist *sglist;
- unsigned int offset = buf->offset;
- unsigned long pfn = buf->paddr >> PAGE_SHIFT;
- unsigned int i;
-
- sglist = vmalloc(buf->npages * sizeof(*sglist));
- if (sglist == NULL)
- return -ENOMEM;
-
- sg_init_table(sglist, buf->npages);
-
- for (i = 0; i < buf->npages; ++i, ++pfn) {
- sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
- offset);
- /* PFNMAP buffers will not get DMA-mapped, set the DMA address
- * manually.
- */
- sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
- offset = 0;
- }
-
- buf->sglen = buf->npages;
- buf->sglist = sglist;
-
- return 0;
-}
-
-/*
- * isp_video_buffer_cleanup - Release pages for a userspace VMA.
- *
- * Release pages locked by a call isp_video_buffer_prepare_user and free the
- * pages table.
- */
-static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
-{
- enum dma_data_direction direction;
- unsigned int i;
-
- if (buf->queue->ops->buffer_cleanup)
- buf->queue->ops->buffer_cleanup(buf);
-
- if (!(buf->vm_flags & VM_PFNMAP)) {
- direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
- direction);
- }
-
- vfree(buf->sglist);
- buf->sglist = NULL;
- buf->sglen = 0;
-
- if (buf->pages != NULL) {
- isp_video_buffer_lock_vma(buf, 0);
-
- for (i = 0; i < buf->npages; ++i)
- page_cache_release(buf->pages[i]);
-
- vfree(buf->pages);
- buf->pages = NULL;
- }
-
- buf->npages = 0;
- buf->skip_cache = false;
-}
-
-/*
- * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
- *
- * This function creates a list of pages for a userspace VMA. The number of
- * pages is first computed based on the buffer size, and pages are then
- * retrieved by a call to get_user_pages.
- *
- * Pages are pinned to memory by get_user_pages, making them available for DMA
- * transfers. However, due to memory management optimization, it seems the
- * get_user_pages doesn't guarantee that the pinned pages will not be written
- * to swap and removed from the userspace mapping(s). When this happens, a page
- * fault can be generated when accessing those unmapped pages.
- *
- * If the fault is triggered by a page table walk caused by VIPT cache
- * management operations, the page fault handler might oops if the MM semaphore
- * is held, as it can't handle kernel page faults in that case. To fix that, a
- * fixup entry needs to be added to the cache management code, or the userspace
- * VMA must be locked to avoid removing pages from the userspace mapping in the
- * first place.
- *
- * If the number of pages retrieved is smaller than the number required by the
- * buffer size, the function returns -EFAULT.
- */
-static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
-{
- unsigned long data;
- unsigned int first;
- unsigned int last;
- int ret;
-
- data = buf->vbuf.m.userptr;
- first = (data & PAGE_MASK) >> PAGE_SHIFT;
- last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
-
- buf->offset = data & ~PAGE_MASK;
- buf->npages = last - first + 1;
- buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
- if (buf->pages == NULL)
- return -ENOMEM;
-
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, data & PAGE_MASK,
- buf->npages,
- buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
- buf->pages, NULL);
- up_read(&current->mm->mmap_sem);
-
- if (ret != buf->npages) {
- buf->npages = ret < 0 ? 0 : ret;
- isp_video_buffer_cleanup(buf);
- return -EFAULT;
- }
-
- ret = isp_video_buffer_lock_vma(buf, 1);
- if (ret < 0)
- isp_video_buffer_cleanup(buf);
-
- return ret;
-}
-
-/*
- * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
- *
- * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
- * memory and if they span a single VMA.
- *
- * Return 0 if the buffer is valid, or -EFAULT otherwise.
- */
-static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
-{
- struct vm_area_struct *vma;
- unsigned long prev_pfn;
- unsigned long this_pfn;
- unsigned long start;
- unsigned long end;
- dma_addr_t pa = 0;
- int ret = -EFAULT;
-
- start = buf->vbuf.m.userptr;
- end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
-
- buf->offset = start & ~PAGE_MASK;
- buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
- buf->pages = NULL;
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, start);
- if (vma == NULL || vma->vm_end < end)
- goto done;
-
- for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
- ret = follow_pfn(vma, start, &this_pfn);
- if (ret)
- goto done;
-
- if (prev_pfn == 0)
- pa = this_pfn << PAGE_SHIFT;
- else if (this_pfn != prev_pfn + 1) {
- ret = -EFAULT;
- goto done;
- }
-
- prev_pfn = this_pfn;
- }
-
- buf->paddr = pa + buf->offset;
- ret = 0;
-
-done:
- up_read(&current->mm->mmap_sem);
- return ret;
-}
-
-/*
- * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
- *
- * This function locates the VMAs for the buffer's userspace address and checks
- * that their flags match. The only flag that we need to care for at the moment
- * is VM_PFNMAP.
- *
- * The buffer vm_flags field is set to the first VMA flags.
- *
- * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
- * have incompatible flags.
- */
-static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
-{
- struct vm_area_struct *vma;
- pgprot_t uninitialized_var(vm_page_prot);
- unsigned long start;
- unsigned long end;
- int ret = -EFAULT;
-
- start = buf->vbuf.m.userptr;
- end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
-
- down_read(&current->mm->mmap_sem);
-
- do {
- vma = find_vma(current->mm, start);
- if (vma == NULL)
- goto done;
-
- if (start == buf->vbuf.m.userptr) {
- buf->vm_flags = vma->vm_flags;
- vm_page_prot = vma->vm_page_prot;
- }
-
- if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
- goto done;
-
- if (vm_page_prot != vma->vm_page_prot)
- goto done;
-
- start = vma->vm_end + 1;
- } while (vma->vm_end < end);
-
- /* Skip cache management to enhance performances for non-cached or
- * write-combining buffers.
- */
- if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
- vm_page_prot == pgprot_writecombine(vm_page_prot))
- buf->skip_cache = true;
-
- ret = 0;
-
-done:
- up_read(&current->mm->mmap_sem);
- return ret;
-}
-
-/*
- * isp_video_buffer_prepare - Make a buffer ready for operation
- *
- * Preparing a buffer involves:
- *
- * - validating VMAs (userspace buffers only)
- * - locking pages and VMAs into memory (userspace buffers only)
- * - building page and scatter-gather lists
- * - mapping buffers for DMA operation
- * - performing driver-specific preparation
- *
- * The function must be called in userspace context with a valid mm context
- * (this excludes cleanup paths such as sys_close when the userspace process
- * segfaults).
- */
-static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
-{
- enum dma_data_direction direction;
- int ret;
-
- switch (buf->vbuf.memory) {
- case V4L2_MEMORY_MMAP:
- ret = isp_video_buffer_sglist_kernel(buf);
- break;
-
- case V4L2_MEMORY_USERPTR:
- ret = isp_video_buffer_prepare_vm_flags(buf);
- if (ret < 0)
- return ret;
-
- if (buf->vm_flags & VM_PFNMAP) {
- ret = isp_video_buffer_prepare_pfnmap(buf);
- if (ret < 0)
- return ret;
-
- ret = isp_video_buffer_sglist_pfnmap(buf);
- } else {
- ret = isp_video_buffer_prepare_user(buf);
- if (ret < 0)
- return ret;
-
- ret = isp_video_buffer_sglist_user(buf);
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- if (ret < 0)
- goto done;
-
- if (!(buf->vm_flags & VM_PFNMAP)) {
- direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
- direction);
- if (ret != buf->sglen) {
- ret = -EFAULT;
- goto done;
- }
- }
-
- if (buf->queue->ops->buffer_prepare)
- ret = buf->queue->ops->buffer_prepare(buf);
-
-done:
- if (ret < 0) {
- isp_video_buffer_cleanup(buf);
- return ret;
- }
-
- return ret;
-}
-
-/*
- * isp_video_queue_query - Query the status of a given buffer
- *
- * Locking: must be called with the queue lock held.
- */
-static void isp_video_buffer_query(struct isp_video_buffer *buf,
- struct v4l2_buffer *vbuf)
-{
- memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
-
- if (buf->vma_use_count)
- vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
-
- switch (buf->state) {
- case ISP_BUF_STATE_ERROR:
- vbuf->flags |= V4L2_BUF_FLAG_ERROR;
- /* Fallthrough */
- case ISP_BUF_STATE_DONE:
- vbuf->flags |= V4L2_BUF_FLAG_DONE;
- break;
- case ISP_BUF_STATE_QUEUED:
- case ISP_BUF_STATE_ACTIVE:
- vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
- break;
- case ISP_BUF_STATE_IDLE:
- default:
- break;
- }
-}
-
-/*
- * isp_video_buffer_wait - Wait for a buffer to be ready
- *
- * In non-blocking mode, return immediately with 0 if the buffer is ready or
- * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
- *
- * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
- * queue using the same condition.
- */
-static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
-{
- if (nonblocking) {
- return (buf->state != ISP_BUF_STATE_QUEUED &&
- buf->state != ISP_BUF_STATE_ACTIVE)
- ? 0 : -EAGAIN;
- }
-
- return wait_event_interruptible(buf->wait,
- buf->state != ISP_BUF_STATE_QUEUED &&
- buf->state != ISP_BUF_STATE_ACTIVE);
-}
-
-/* -----------------------------------------------------------------------------
- * Queue management
- */
-
-/*
- * isp_video_queue_free - Free video buffers memory
- *
- * Buffers can only be freed if the queue isn't streaming and if no buffer is
- * mapped to userspace. Return -EBUSY if those conditions aren't satisfied.
- *
- * This function must be called with the queue lock held.
- */
-static int isp_video_queue_free(struct isp_video_queue *queue)
-{
- unsigned int i;
-
- if (queue->streaming)
- return -EBUSY;
-
- for (i = 0; i < queue->count; ++i) {
- if (queue->buffers[i]->vma_use_count != 0)
- return -EBUSY;
- }
-
- for (i = 0; i < queue->count; ++i) {
- struct isp_video_buffer *buf = queue->buffers[i];
-
- isp_video_buffer_cleanup(buf);
-
- vfree(buf->vaddr);
- buf->vaddr = NULL;
-
- kfree(buf);
- queue->buffers[i] = NULL;
- }
-
- INIT_LIST_HEAD(&queue->queue);
- queue->count = 0;
- return 0;
-}
-
-/*
- * isp_video_queue_alloc - Allocate video buffers memory
- *
- * This function must be called with the queue lock held.
- */
-static int isp_video_queue_alloc(struct isp_video_queue *queue,
- unsigned int nbuffers,
- unsigned int size, enum v4l2_memory memory)
-{
- struct isp_video_buffer *buf;
- unsigned int i;
- void *mem;
- int ret;
-
- /* Start by freeing the buffers. */
- ret = isp_video_queue_free(queue);
- if (ret < 0)
- return ret;
-
- /* Bail out if no buffers should be allocated. */
- if (nbuffers == 0)
- return 0;
-
- /* Initialize the allocated buffers. */
- for (i = 0; i < nbuffers; ++i) {
- buf = kzalloc(queue->bufsize, GFP_KERNEL);
- if (buf == NULL)
- break;
-
- if (memory == V4L2_MEMORY_MMAP) {
- /* Allocate video buffers memory for mmap mode. Align
- * the size to the page size.
- */
- mem = vmalloc_32_user(PAGE_ALIGN(size));
- if (mem == NULL) {
- kfree(buf);
- break;
- }
-
- buf->vbuf.m.offset = i * PAGE_ALIGN(size);
- buf->vaddr = mem;
- }
-
- buf->vbuf.index = i;
- buf->vbuf.length = size;
- buf->vbuf.type = queue->type;
- buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- buf->vbuf.field = V4L2_FIELD_NONE;
- buf->vbuf.memory = memory;
-
- buf->queue = queue;
- init_waitqueue_head(&buf->wait);
-
- queue->buffers[i] = buf;
- }
-
- if (i == 0)
- return -ENOMEM;
-
- queue->count = i;
- return nbuffers;
-}
-
-/**
- * omap3isp_video_queue_cleanup - Clean up the video buffers queue
- * @queue: Video buffers queue
- *
- * Free all allocated resources and clean up the video buffers queue. The queue
- * must not be busy (no ongoing video stream) and buffers must have been
- * unmapped.
- *
- * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
- * unmapped.
- */
-int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
-{
- return isp_video_queue_free(queue);
-}
-
-/**
- * omap3isp_video_queue_init - Initialize the video buffers queue
- * @queue: Video buffers queue
- * @type: V4L2 buffer type (capture or output)
- * @ops: Driver-specific queue operations
- * @dev: Device used for DMA operations
- * @bufsize: Size of the driver-specific buffer structure
- *
- * Initialize the video buffers queue with the supplied parameters.
- *
- * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
- * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
- *
- * Buffer objects will be allocated using the given buffer size to allow room
- * for driver-specific fields. Driver-specific buffer structures must start
- * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
- * structure must pass the size of the isp_video_buffer structure in the bufsize
- * parameter.
- *
- * Return 0 on success.
- */
-int omap3isp_video_queue_init(struct isp_video_queue *queue,
- enum v4l2_buf_type type,
- const struct isp_video_queue_operations *ops,
- struct device *dev, unsigned int bufsize)
-{
- INIT_LIST_HEAD(&queue->queue);
- mutex_init(&queue->lock);
- spin_lock_init(&queue->irqlock);
-
- queue->type = type;
- queue->ops = ops;
- queue->dev = dev;
- queue->bufsize = bufsize;
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 operations
- */
-
-/**
- * omap3isp_video_queue_reqbufs - Allocate video buffers memory
- *
- * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
- * allocated video buffer objects and, for MMAP buffers, buffer memory.
- *
- * If the number of buffers is 0, all buffers are freed and the function returns
- * without performing any allocation.
- *
- * If the number of buffers is not 0, currently allocated buffers (if any) are
- * freed and the requested number of buffers are allocated. Depending on
- * driver-specific requirements and on memory availability, a number of buffer
- * smaller or bigger than requested can be allocated. This isn't considered as
- * an error.
- *
- * Return 0 on success or one of the following error codes:
- *
- * -EINVAL if the buffer type or index are invalid
- * -EBUSY if the queue is busy (streaming or buffers mapped)
- * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
- */
-int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
- struct v4l2_requestbuffers *rb)
-{
- unsigned int nbuffers = rb->count;
- unsigned int size;
- int ret;
-
- if (rb->type != queue->type)
- return -EINVAL;
-
- queue->ops->queue_prepare(queue, &nbuffers, &size);
- if (size == 0)
- return -EINVAL;
-
- nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
-
- mutex_lock(&queue->lock);
-
- ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
- if (ret < 0)
- goto done;
-
- rb->count = ret;
- ret = 0;
-
-done:
- mutex_unlock(&queue->lock);
- return ret;
-}
-
-/**
- * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
- *
- * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
- * returns the status of a given video buffer.
- *
- * Return 0 on success or -EINVAL if the buffer type or index are invalid.
- */
-int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
- struct v4l2_buffer *vbuf)
-{
- struct isp_video_buffer *buf;
- int ret = 0;
-
- if (vbuf->type != queue->type)
- return -EINVAL;
-
- mutex_lock(&queue->lock);
-
- if (vbuf->index >= queue->count) {
- ret = -EINVAL;
- goto done;
- }
-
- buf = queue->buffers[vbuf->index];
- isp_video_buffer_query(buf, vbuf);
-
-done:
- mutex_unlock(&queue->lock);
- return ret;
-}
-
-/**
- * omap3isp_video_queue_qbuf - Queue a buffer
- *
- * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
- *
- * The v4l2_buffer structure passed from userspace is first sanity tested. If
- * sane, the buffer is then processed and added to the main queue and, if the
- * queue is streaming, to the IRQ queue.
- *
- * Before being enqueued, USERPTR buffers are checked for address changes. If
- * the buffer has a different userspace address, the old memory area is unlocked
- * and the new memory area is locked.
- */
-int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
- struct v4l2_buffer *vbuf)
-{
- struct isp_video_buffer *buf;
- unsigned long flags;
- int ret = -EINVAL;
-
- if (vbuf->type != queue->type)
- goto done;
-
- mutex_lock(&queue->lock);
-
- if (vbuf->index >= queue->count)
- goto done;
-
- buf = queue->buffers[vbuf->index];
-
- if (vbuf->memory != buf->vbuf.memory)
- goto done;
-
- if (buf->state != ISP_BUF_STATE_IDLE)
- goto done;
-
- if (vbuf->memory == V4L2_MEMORY_USERPTR &&
- vbuf->length < buf->vbuf.length)
- goto done;
-
- if (vbuf->memory == V4L2_MEMORY_USERPTR &&
- vbuf->m.userptr != buf->vbuf.m.userptr) {
- isp_video_buffer_cleanup(buf);
- buf->vbuf.m.userptr = vbuf->m.userptr;
- buf->prepared = 0;
- }
-
- if (!buf->prepared) {
- ret = isp_video_buffer_prepare(buf);
- if (ret < 0)
- goto done;
- buf->prepared = 1;
- }
-
- isp_video_buffer_cache_sync(buf);
-
- buf->state = ISP_BUF_STATE_QUEUED;
- list_add_tail(&buf->stream, &queue->queue);
-
- if (queue->streaming) {
- spin_lock_irqsave(&queue->irqlock, flags);
- queue->ops->buffer_queue(buf);
- spin_unlock_irqrestore(&queue->irqlock, flags);
- }
-
- ret = 0;
-
-done:
- mutex_unlock(&queue->lock);
- return ret;
-}
-
-/**
- * omap3isp_video_queue_dqbuf - Dequeue a buffer
- *
- * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
- *
- * Wait until a buffer is ready to be dequeued, remove it from the queue and
- * copy its information to the v4l2_buffer structure.
- *
- * If the nonblocking argument is not zero and no buffer is ready, return
- * -EAGAIN immediately instead of waiting.
- *
- * If no buffer has been enqueued, or if the requested buffer type doesn't match
- * the queue type, return -EINVAL.
- */
-int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
- struct v4l2_buffer *vbuf, int nonblocking)
-{
- struct isp_video_buffer *buf;
- int ret;
-
- if (vbuf->type != queue->type)
- return -EINVAL;
-
- mutex_lock(&queue->lock);
-
- if (list_empty(&queue->queue)) {
- ret = -EINVAL;
- goto done;
- }
-
- buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
- ret = isp_video_buffer_wait(buf, nonblocking);
- if (ret < 0)
- goto done;
-
- list_del(&buf->stream);
-
- isp_video_buffer_query(buf, vbuf);
- buf->state = ISP_BUF_STATE_IDLE;
- vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
-
-done:
- mutex_unlock(&queue->lock);
- return ret;
-}
-
-/**
- * omap3isp_video_queue_streamon - Start streaming
- *
- * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
- * starts streaming on the queue and calls the buffer_queue operation for all
- * queued buffers.
- *
- * Return 0 on success.
- */
-int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
-{
- struct isp_video_buffer *buf;
- unsigned long flags;
-
- mutex_lock(&queue->lock);
-
- if (queue->streaming)
- goto done;
-
- queue->streaming = 1;
-
- spin_lock_irqsave(&queue->irqlock, flags);
- list_for_each_entry(buf, &queue->queue, stream)
- queue->ops->buffer_queue(buf);
- spin_unlock_irqrestore(&queue->irqlock, flags);
-
-done:
- mutex_unlock(&queue->lock);
- return 0;
-}
-
-/**
- * omap3isp_video_queue_streamoff - Stop streaming
- *
- * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
- * stops streaming on the queue and wakes up all the buffers.
- *
- * Drivers must stop the hardware and synchronize with interrupt handlers and/or
- * delayed works before calling this function to make sure no buffer will be
- * touched by the driver and/or hardware.
- */
-void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
-{
- struct isp_video_buffer *buf;
- unsigned long flags;
- unsigned int i;
-
- mutex_lock(&queue->lock);
-
- if (!queue->streaming)
- goto done;
-
- queue->streaming = 0;
-
- spin_lock_irqsave(&queue->irqlock, flags);
- for (i = 0; i < queue->count; ++i) {
- buf = queue->buffers[i];
-
- if (buf->state == ISP_BUF_STATE_ACTIVE)
- wake_up(&buf->wait);
-
- buf->state = ISP_BUF_STATE_IDLE;
- }
- spin_unlock_irqrestore(&queue->irqlock, flags);
-
- INIT_LIST_HEAD(&queue->queue);
-
-done:
- mutex_unlock(&queue->lock);
-}
-
-/**
- * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
- *
- * This function is intended to be used with suspend/resume operations. It
- * discards all 'done' buffers as they would be too old to be requested after
- * resume.
- *
- * Drivers must stop the hardware and synchronize with interrupt handlers and/or
- * delayed works before calling this function to make sure no buffer will be
- * touched by the driver and/or hardware.
- */
-void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
-{
- struct isp_video_buffer *buf;
- unsigned int i;
-
- mutex_lock(&queue->lock);
-
- if (!queue->streaming)
- goto done;
-
- for (i = 0; i < queue->count; ++i) {
- buf = queue->buffers[i];
-
- if (buf->state == ISP_BUF_STATE_DONE)
- buf->state = ISP_BUF_STATE_ERROR;
- }
-
-done:
- mutex_unlock(&queue->lock);
-}
-
-static void isp_video_queue_vm_open(struct vm_area_struct *vma)
-{
- struct isp_video_buffer *buf = vma->vm_private_data;
-
- buf->vma_use_count++;
-}
-
-static void isp_video_queue_vm_close(struct vm_area_struct *vma)
-{
- struct isp_video_buffer *buf = vma->vm_private_data;
-
- buf->vma_use_count--;
-}
-
-static const struct vm_operations_struct isp_video_queue_vm_ops = {
- .open = isp_video_queue_vm_open,
- .close = isp_video_queue_vm_close,
-};
-
-/**
- * omap3isp_video_queue_mmap - Map buffers to userspace
- *
- * This function is intended to be used as an mmap() file operation handler. It
- * maps a buffer to userspace based on the VMA offset.
- *
- * Only buffers of memory type MMAP are supported.
- */
-int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
- struct vm_area_struct *vma)
-{
- struct isp_video_buffer *uninitialized_var(buf);
- unsigned long size;
- unsigned int i;
- int ret = 0;
-
- mutex_lock(&queue->lock);
-
- for (i = 0; i < queue->count; ++i) {
- buf = queue->buffers[i];
- if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
- break;
- }
-
- if (i == queue->count) {
- ret = -EINVAL;
- goto done;
- }
-
- size = vma->vm_end - vma->vm_start;
-
- if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
- size != PAGE_ALIGN(buf->vbuf.length)) {
- ret = -EINVAL;
- goto done;
- }
-
- ret = remap_vmalloc_range(vma, buf->vaddr, 0);
- if (ret < 0)
- goto done;
-
- vma->vm_ops = &isp_video_queue_vm_ops;
- vma->vm_private_data = buf;
- isp_video_queue_vm_open(vma);
-
-done:
- mutex_unlock(&queue->lock);
- return ret;
-}
-
-/**
- * omap3isp_video_queue_poll - Poll video queue state
- *
- * This function is intended to be used as a poll() file operation handler. It
- * polls the state of the video buffer at the front of the queue and returns an
- * events mask.
- *
- * If no buffer is present at the front of the queue, POLLERR is returned.
- */
-unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
- struct file *file, poll_table *wait)
-{
- struct isp_video_buffer *buf;
- unsigned int mask = 0;
-
- mutex_lock(&queue->lock);
- if (list_empty(&queue->queue)) {
- mask |= POLLERR;
- goto done;
- }
- buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
-
- poll_wait(file, &buf->wait, wait);
- if (buf->state == ISP_BUF_STATE_DONE ||
- buf->state == ISP_BUF_STATE_ERROR) {
- if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- mask |= POLLIN | POLLRDNORM;
- else
- mask |= POLLOUT | POLLWRNORM;
- }
-
-done:
- mutex_unlock(&queue->lock);
- return mask;
-}
diff --git a/drivers/media/platform/omap3isp/ispqueue.h b/drivers/media/platform/omap3isp/ispqueue.h
deleted file mode 100644
index 3e048ad65647..000000000000
--- a/drivers/media/platform/omap3isp/ispqueue.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * ispqueue.h
- *
- * TI OMAP3 ISP - Video buffers queue handling
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- * Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef OMAP3_ISP_QUEUE_H
-#define OMAP3_ISP_QUEUE_H
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/videodev2.h>
-#include <linux/wait.h>
-
-struct isp_video_queue;
-struct page;
-struct scatterlist;
-
-#define ISP_VIDEO_MAX_BUFFERS 16
-
-/**
- * enum isp_video_buffer_state - ISP video buffer state
- * @ISP_BUF_STATE_IDLE: The buffer is under userspace control (dequeued
- * or not queued yet).
- * @ISP_BUF_STATE_QUEUED: The buffer has been queued but isn't used by the
- * device yet.
- * @ISP_BUF_STATE_ACTIVE: The buffer is in use for an active video transfer.
- * @ISP_BUF_STATE_ERROR: The device is done with the buffer and an error
- * occurred. For capture device the buffer likely contains corrupted data or
- * no data at all.
- * @ISP_BUF_STATE_DONE: The device is done with the buffer and no error occurred.
- * For capture devices the buffer contains valid data.
- */
-enum isp_video_buffer_state {
- ISP_BUF_STATE_IDLE,
- ISP_BUF_STATE_QUEUED,
- ISP_BUF_STATE_ACTIVE,
- ISP_BUF_STATE_ERROR,
- ISP_BUF_STATE_DONE,
-};
-
-/**
- * struct isp_video_buffer - ISP video buffer
- * @vma_use_count: Number of times the buffer is mmap'ed to userspace
- * @stream: List head for insertion into main queue
- * @queue: ISP buffers queue this buffer belongs to
- * @prepared: Whether the buffer has been prepared
- * @skip_cache: Whether to skip cache management operations for this buffer
- * @vaddr: Memory virtual address (for kernel buffers)
- * @vm_flags: Buffer VMA flags (for userspace buffers)
- * @offset: Offset inside the first page (for userspace buffers)
- * @npages: Number of pages (for userspace buffers)
- * @pages: Pages table (for userspace non-VM_PFNMAP buffers)
- * @paddr: Memory physical address (for userspace VM_PFNMAP buffers)
- * @sglen: Number of elements in the scatter list (for non-VM_PFNMAP buffers)
- * @sglist: Scatter list (for non-VM_PFNMAP buffers)
- * @vbuf: V4L2 buffer
- * @irqlist: List head for insertion into IRQ queue
- * @state: Current buffer state
- * @wait: Wait queue to signal buffer completion
- */
-struct isp_video_buffer {
- unsigned long vma_use_count;
- struct list_head stream;
- struct isp_video_queue *queue;
- unsigned int prepared:1;
- bool skip_cache;
-
- /* For kernel buffers. */
- void *vaddr;
-
- /* For userspace buffers. */
- vm_flags_t vm_flags;
- unsigned long offset;
- unsigned int npages;
- struct page **pages;
- dma_addr_t paddr;
-
- /* For all buffers except VM_PFNMAP. */
- unsigned int sglen;
- struct scatterlist *sglist;
-
- /* Touched by the interrupt handler. */
- struct v4l2_buffer vbuf;
- struct list_head irqlist;
- enum isp_video_buffer_state state;
- wait_queue_head_t wait;
-};
-
-#define to_isp_video_buffer(vb) container_of(vb, struct isp_video_buffer, vb)
-
-/**
- * struct isp_video_queue_operations - Driver-specific operations
- * @queue_prepare: Called before allocating buffers. Drivers should clamp the
- * number of buffers according to their requirements, and must return the
- * buffer size in bytes.
- * @buffer_prepare: Called the first time a buffer is queued, or after changing
- * the userspace memory address for a USERPTR buffer, with the queue lock
- * held. Drivers should perform device-specific buffer preparation (such as
- * mapping the buffer memory in an IOMMU). This operation is optional.
- * @buffer_queue: Called when a buffer is being added to the queue with the
- * queue irqlock spinlock held.
- * @buffer_cleanup: Called before freeing buffers, or before changing the
- * userspace memory address for a USERPTR buffer, with the queue lock held.
- * Drivers must perform cleanup operations required to undo the
- * buffer_prepare call. This operation is optional.
- */
-struct isp_video_queue_operations {
- void (*queue_prepare)(struct isp_video_queue *queue,
- unsigned int *nbuffers, unsigned int *size);
- int (*buffer_prepare)(struct isp_video_buffer *buf);
- void (*buffer_queue)(struct isp_video_buffer *buf);
- void (*buffer_cleanup)(struct isp_video_buffer *buf);
-};
-
-/**
- * struct isp_video_queue - ISP video buffers queue
- * @type: Type of video buffers handled by this queue
- * @ops: Queue operations
- * @dev: Device used for DMA operations
- * @bufsize: Size of a driver-specific buffer object
- * @count: Number of currently allocated buffers
- * @buffers: ISP video buffers
- * @lock: Mutex to protect access to the buffers, main queue and state
- * @irqlock: Spinlock to protect access to the IRQ queue
- * @streaming: Queue state, indicates whether the queue is streaming
- * @queue: List of all queued buffers
- */
-struct isp_video_queue {
- enum v4l2_buf_type type;
- const struct isp_video_queue_operations *ops;
- struct device *dev;
- unsigned int bufsize;
-
- unsigned int count;
- struct isp_video_buffer *buffers[ISP_VIDEO_MAX_BUFFERS];
- struct mutex lock;
- spinlock_t irqlock;
-
- unsigned int streaming:1;
-
- struct list_head queue;
-};
-
-int omap3isp_video_queue_cleanup(struct isp_video_queue *queue);
-int omap3isp_video_queue_init(struct isp_video_queue *queue,
- enum v4l2_buf_type type,
- const struct isp_video_queue_operations *ops,
- struct device *dev, unsigned int bufsize);
-
-int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
- struct v4l2_requestbuffers *rb);
-int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
- struct v4l2_buffer *vbuf);
-int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
- struct v4l2_buffer *vbuf);
-int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
- struct v4l2_buffer *vbuf, int nonblocking);
-int omap3isp_video_queue_streamon(struct isp_video_queue *queue);
-void omap3isp_video_queue_streamoff(struct isp_video_queue *queue);
-void omap3isp_video_queue_discard_done(struct isp_video_queue *queue);
-int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
- struct vm_area_struct *vma);
-unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
- struct file *file, poll_table *wait);
-
-#endif /* OMAP3_ISP_QUEUE_H */
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 86369df81d74..6f077c2377db 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -1040,7 +1040,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
*/
buffer = omap3isp_video_buffer_next(&res->video_out);
if (buffer != NULL) {
- resizer_set_outaddr(res, buffer->isp_addr);
+ resizer_set_outaddr(res, buffer->dma);
restart = 1;
}
@@ -1049,7 +1049,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
if (res->input == RESIZER_INPUT_MEMORY) {
buffer = omap3isp_video_buffer_next(&res->video_in);
if (buffer != NULL)
- resizer_set_inaddr(res, buffer->isp_addr);
+ resizer_set_inaddr(res, buffer->dma);
pipe->state |= ISP_PIPELINE_IDLE_INPUT;
}
@@ -1101,7 +1101,7 @@ static int resizer_video_queue(struct isp_video *video,
struct isp_res_device *res = &video->isp->isp_res;
if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- resizer_set_inaddr(res, buffer->isp_addr);
+ resizer_set_inaddr(res, buffer->dma);
/*
* We now have a buffer queued on the output. Despite what the
@@ -1116,7 +1116,7 @@ static int resizer_video_queue(struct isp_video *video,
* continuous mode or when starting the stream.
*/
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- resizer_set_outaddr(res, buffer->isp_addr);
+ resizer_set_outaddr(res, buffer->dma);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 5707f85c4cc4..e6cbc1eaf4ca 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -26,13 +26,12 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/omap-iommu.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "isp.h"
-#define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0)
+#define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch >= 0)
/*
* MAGIC_SIZE must always be the greatest common divisor of
@@ -77,21 +76,10 @@ static void __isp_stat_buf_sync_magic(struct ispstat *stat,
dma_addr_t, unsigned long, size_t,
enum dma_data_direction))
{
- struct device *dev = stat->isp->dev;
- struct page *pg;
- dma_addr_t dma_addr;
- u32 offset;
-
- /* Initial magic words */
- pg = vmalloc_to_page(buf->virt_addr);
- dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
- dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
-
- /* Final magic words */
- pg = vmalloc_to_page(buf->virt_addr + buf_size);
- dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
- offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
- dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
+ /* Sync the initial and final magic words. */
+ dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir);
+ dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK),
+ buf_size & ~PAGE_MASK, MAGIC_SIZE, dir);
}
static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
@@ -99,7 +87,7 @@ static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
u32 buf_size,
enum dma_data_direction dir)
{
- if (IS_COHERENT_BUF(stat))
+ if (ISP_STAT_USES_DMAENGINE(stat))
return;
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
@@ -111,7 +99,7 @@ static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
u32 buf_size,
enum dma_data_direction dir)
{
- if (IS_COHERENT_BUF(stat))
+ if (ISP_STAT_USES_DMAENGINE(stat))
return;
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
@@ -180,21 +168,21 @@ static void isp_stat_buf_insert_magic(struct ispstat *stat,
static void isp_stat_buf_sync_for_device(struct ispstat *stat,
struct ispstat_buffer *buf)
{
- if (IS_COHERENT_BUF(stat))
+ if (ISP_STAT_USES_DMAENGINE(stat))
return;
- dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl,
- buf->iovm->sgt->nents, DMA_FROM_DEVICE);
+ dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
+ buf->sgt.nents, DMA_FROM_DEVICE);
}
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
struct ispstat_buffer *buf)
{
- if (IS_COHERENT_BUF(stat))
+ if (ISP_STAT_USES_DMAENGINE(stat))
return;
- dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl,
- buf->iovm->sgt->nents, DMA_FROM_DEVICE);
+ dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
+ buf->sgt.nents, DMA_FROM_DEVICE);
}
static void isp_stat_buf_clear(struct ispstat *stat)
@@ -354,29 +342,21 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
static void isp_stat_bufs_free(struct ispstat *stat)
{
- struct isp_device *isp = stat->isp;
- int i;
+ struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
+ ? NULL : stat->isp->dev;
+ unsigned int i;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i];
- if (!IS_COHERENT_BUF(stat)) {
- if (IS_ERR_OR_NULL((void *)buf->iommu_addr))
- continue;
- if (buf->iovm)
- dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
- buf->iovm->sgt->nents,
- DMA_FROM_DEVICE);
- omap_iommu_vfree(isp->domain, isp->dev,
- buf->iommu_addr);
- } else {
- if (!buf->virt_addr)
- continue;
- dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
- buf->virt_addr, buf->dma_addr);
- }
- buf->iommu_addr = 0;
- buf->iovm = NULL;
+ if (!buf->virt_addr)
+ continue;
+
+ sg_free_table(&buf->sgt);
+
+ dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr,
+ buf->dma_addr);
+
buf->dma_addr = 0;
buf->virt_addr = NULL;
buf->empty = 1;
@@ -389,83 +369,51 @@ static void isp_stat_bufs_free(struct ispstat *stat)
stat->active_buf = NULL;
}
-static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
-{
- struct isp_device *isp = stat->isp;
- int i;
-
- stat->buf_alloc_size = size;
-
- for (i = 0; i < STAT_MAX_BUFS; i++) {
- struct ispstat_buffer *buf = &stat->buf[i];
- struct iovm_struct *iovm;
-
- WARN_ON(buf->dma_addr);
- buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
- size, IOMMU_FLAG);
- if (IS_ERR((void *)buf->iommu_addr)) {
- dev_err(stat->isp->dev,
- "%s: Can't acquire memory for "
- "buffer %d\n", stat->subdev.name, i);
- isp_stat_bufs_free(stat);
- return -ENOMEM;
- }
-
- iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
- if (!iovm ||
- !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
- DMA_FROM_DEVICE)) {
- isp_stat_bufs_free(stat);
- return -ENOMEM;
- }
- buf->iovm = iovm;
-
- buf->virt_addr = omap_da_to_va(stat->isp->dev,
- (u32)buf->iommu_addr);
- buf->empty = 1;
- dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
- "iommu_addr=0x%08lx virt_addr=0x%08lx",
- stat->subdev.name, i, buf->iommu_addr,
- (unsigned long)buf->virt_addr);
- }
-
- return 0;
-}
-
-static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
+static int isp_stat_bufs_alloc_one(struct device *dev,
+ struct ispstat_buffer *buf,
+ unsigned int size)
{
- int i;
-
- stat->buf_alloc_size = size;
-
- for (i = 0; i < STAT_MAX_BUFS; i++) {
- struct ispstat_buffer *buf = &stat->buf[i];
-
- WARN_ON(buf->iommu_addr);
- buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
- &buf->dma_addr, GFP_KERNEL | GFP_DMA);
+ int ret;
- if (!buf->virt_addr || !buf->dma_addr) {
- dev_info(stat->isp->dev,
- "%s: Can't acquire memory for "
- "DMA buffer %d\n", stat->subdev.name, i);
- isp_stat_bufs_free(stat);
- return -ENOMEM;
- }
- buf->empty = 1;
+ buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr,
+ GFP_KERNEL | GFP_DMA);
+ if (!buf->virt_addr)
+ return -ENOMEM;
- dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
- "dma_addr=0x%08lx virt_addr=0x%08lx\n",
- stat->subdev.name, i, (unsigned long)buf->dma_addr,
- (unsigned long)buf->virt_addr);
+ ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr,
+ size);
+ if (ret < 0) {
+ dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr);
+ buf->virt_addr = NULL;
+ buf->dma_addr = 0;
+ return ret;
}
return 0;
}
+/*
+ * The device passed to the DMA API depends on whether the statistics block uses
+ * ISP DMA, external DMA or PIO to transfer data.
+ *
+ * The first case (for the AEWB and AF engines) passes the ISP device, resulting
+ * in the DMA buffers being mapped through the ISP IOMMU.
+ *
+ * The second case (for the histogram engine) should pass the DMA engine device.
+ * As that device isn't accessible through the OMAP DMA engine API the driver
+ * passes NULL instead, resulting in the buffers being mapped directly as
+ * physical pages.
+ *
+ * The third case (for the histogram engine) doesn't require any mapping. The
+ * buffers could be allocated with kmalloc/vmalloc, but we still use
+ * dma_alloc_coherent() for consistency purpose.
+ */
static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
{
+ struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
+ ? NULL : stat->isp->dev;
unsigned long flags;
+ unsigned int i;
spin_lock_irqsave(&stat->isp->stat_lock, flags);
@@ -489,10 +437,31 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
isp_stat_bufs_free(stat);
- if (IS_COHERENT_BUF(stat))
- return isp_stat_bufs_alloc_dma(stat, size);
- else
- return isp_stat_bufs_alloc_iommu(stat, size);
+ stat->buf_alloc_size = size;
+
+ for (i = 0; i < STAT_MAX_BUFS; i++) {
+ struct ispstat_buffer *buf = &stat->buf[i];
+ int ret;
+
+ ret = isp_stat_bufs_alloc_one(dev, buf, size);
+ if (ret < 0) {
+ dev_err(stat->isp->dev,
+ "%s: Failed to allocate DMA buffer %u\n",
+ stat->subdev.name, i);
+ isp_stat_bufs_free(stat);
+ return ret;
+ }
+
+ buf->empty = 1;
+
+ dev_dbg(stat->isp->dev,
+ "%s: buffer[%u] allocated. dma=0x%08lx virt=0x%08lx",
+ stat->subdev.name, i,
+ (unsigned long)buf->dma_addr,
+ (unsigned long)buf->virt_addr);
+ }
+
+ return 0;
}
static void isp_stat_queue_event(struct ispstat *stat, int err)
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
index 9a047c929b9f..58d6ac7cb664 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -46,8 +46,7 @@
struct ispstat;
struct ispstat_buffer {
- unsigned long iommu_addr;
- struct iovm_struct *iovm;
+ struct sg_table sgt;
void *virt_addr;
dma_addr_t dma_addr;
struct timespec ts;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 85b4036ba5e4..e36bac26476c 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -27,7 +27,6 @@
#include <linux/clk.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/omap-iommu.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
@@ -35,6 +34,7 @@
#include <linux/vmalloc.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
#include "ispvideo.h"
#include "isp.h"
@@ -326,90 +326,36 @@ isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
}
/* -----------------------------------------------------------------------------
- * IOMMU management
- */
-
-#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
-
-/*
- * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
- * @isp: Device pointer specific to the OMAP3 ISP.
- * @sglist: Pointer to source Scatter gather list to allocate.
- * @sglen: Number of elements of the scatter-gatter list.
- *
- * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
- * we ran out of memory.
- */
-static dma_addr_t
-ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
-{
- struct sg_table *sgt;
- u32 da;
-
- sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
- if (sgt == NULL)
- return -ENOMEM;
-
- sgt->sgl = (struct scatterlist *)sglist;
- sgt->nents = sglen;
- sgt->orig_nents = sglen;
-
- da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
- if (IS_ERR_VALUE(da))
- kfree(sgt);
-
- return da;
-}
-
-/*
- * ispmmu_vunmap - Unmap a device address from the ISP MMU
- * @isp: Device pointer specific to the OMAP3 ISP.
- * @da: Device address generated from a ispmmu_vmap call.
- */
-static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
-{
- struct sg_table *sgt;
-
- sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
- kfree(sgt);
-}
-
-/* -----------------------------------------------------------------------------
* Video queue operations
*/
-static void isp_video_queue_prepare(struct isp_video_queue *queue,
- unsigned int *nbuffers, unsigned int *size)
+static int isp_video_queue_setup(struct vb2_queue *queue,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct isp_video_fh *vfh =
- container_of(queue, struct isp_video_fh, queue);
+ struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
struct isp_video *video = vfh->video;
- *size = vfh->format.fmt.pix.sizeimage;
- if (*size == 0)
- return;
+ *num_planes = 1;
- *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
-}
+ sizes[0] = vfh->format.fmt.pix.sizeimage;
+ if (sizes[0] == 0)
+ return -EINVAL;
-static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
-{
- struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
- struct isp_buffer *buffer = to_isp_buffer(buf);
- struct isp_video *video = vfh->video;
+ alloc_ctxs[0] = video->alloc_ctx;
- if (buffer->isp_addr) {
- ispmmu_vunmap(video->isp, buffer->isp_addr);
- buffer->isp_addr = 0;
- }
+ *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
+
+ return 0;
}
-static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
+static int isp_video_buffer_prepare(struct vb2_buffer *buf)
{
- struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
+ struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
struct isp_buffer *buffer = to_isp_buffer(buf);
struct isp_video *video = vfh->video;
- unsigned long addr;
+ dma_addr_t addr;
/* Refuse to prepare the buffer is the video node has registered an
* error. We don't need to take any lock here as the operation is
@@ -420,19 +366,16 @@ static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
if (unlikely(video->error))
return -EIO;
- addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
- if (IS_ERR_VALUE(addr))
- return -EIO;
-
+ addr = vb2_dma_contig_plane_dma_addr(buf, 0);
if (!IS_ALIGNED(addr, 32)) {
- dev_dbg(video->isp->dev, "Buffer address must be "
- "aligned to 32 bytes boundary.\n");
- ispmmu_vunmap(video->isp, buffer->isp_addr);
+ dev_dbg(video->isp->dev,
+ "Buffer address must be aligned to 32 bytes boundary.\n");
return -EINVAL;
}
- buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
- buffer->isp_addr = addr;
+ vb2_set_plane_payload(&buffer->vb, 0, vfh->format.fmt.pix.sizeimage);
+ buffer->dma = addr;
+
return 0;
}
@@ -445,9 +388,9 @@ static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
* If the pipeline is busy, it will be restarted in the output module interrupt
* handler.
*/
-static void isp_video_buffer_queue(struct isp_video_buffer *buf)
+static void isp_video_buffer_queue(struct vb2_buffer *buf)
{
- struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
+ struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
struct isp_buffer *buffer = to_isp_buffer(buf);
struct isp_video *video = vfh->video;
struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
@@ -456,14 +399,18 @@ static void isp_video_buffer_queue(struct isp_video_buffer *buf)
unsigned int empty;
unsigned int start;
+ spin_lock_irqsave(&video->irqlock, flags);
+
if (unlikely(video->error)) {
- buf->state = ISP_BUF_STATE_ERROR;
- wake_up(&buf->wait);
+ vb2_buffer_done(&buffer->vb, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&video->irqlock, flags);
return;
}
empty = list_empty(&video->dmaqueue);
- list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
+ list_add_tail(&buffer->irqlist, &video->dmaqueue);
+
+ spin_unlock_irqrestore(&video->irqlock, flags);
if (empty) {
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -487,23 +434,22 @@ static void isp_video_buffer_queue(struct isp_video_buffer *buf)
}
}
-static const struct isp_video_queue_operations isp_video_queue_ops = {
- .queue_prepare = &isp_video_queue_prepare,
- .buffer_prepare = &isp_video_buffer_prepare,
- .buffer_queue = &isp_video_buffer_queue,
- .buffer_cleanup = &isp_video_buffer_cleanup,
+static const struct vb2_ops isp_video_queue_ops = {
+ .queue_setup = isp_video_queue_setup,
+ .buf_prepare = isp_video_buffer_prepare,
+ .buf_queue = isp_video_buffer_queue,
};
/*
* omap3isp_video_buffer_next - Complete the current buffer and return the next
* @video: ISP video object
*
- * Remove the current video buffer from the DMA queue and fill its timestamp,
- * field count and state fields before waking up its completion handler.
+ * Remove the current video buffer from the DMA queue and fill its timestamp and
+ * field count before handing it back to videobuf2.
*
- * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
- * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
- * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
+ * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no
+ * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
+ * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE.
*
* The DMA queue is expected to contain at least one buffer.
*
@@ -513,26 +459,25 @@ static const struct isp_video_queue_operations isp_video_queue_ops = {
struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
{
struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
- struct isp_video_queue *queue = video->queue;
enum isp_pipeline_state state;
- struct isp_video_buffer *buf;
+ struct isp_buffer *buf;
unsigned long flags;
struct timespec ts;
- spin_lock_irqsave(&queue->irqlock, flags);
+ spin_lock_irqsave(&video->irqlock, flags);
if (WARN_ON(list_empty(&video->dmaqueue))) {
- spin_unlock_irqrestore(&queue->irqlock, flags);
+ spin_unlock_irqrestore(&video->irqlock, flags);
return NULL;
}
- buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
+ buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
irqlist);
list_del(&buf->irqlist);
- spin_unlock_irqrestore(&queue->irqlock, flags);
+ spin_unlock_irqrestore(&video->irqlock, flags);
ktime_get_ts(&ts);
- buf->vbuf.timestamp.tv_sec = ts.tv_sec;
- buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
+ buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
/* Do frame number propagation only if this is the output video node.
* Frame number either comes from the CSI receivers or it gets
@@ -541,22 +486,27 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
* first, so the input number might lag behind by 1 in some cases.
*/
if (video == pipe->output && !pipe->do_propagation)
- buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
+ buf->vb.v4l2_buf.sequence =
+ atomic_inc_return(&pipe->frame_number);
else
- buf->vbuf.sequence = atomic_read(&pipe->frame_number);
+ buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
/* Report pipeline errors to userspace on the capture device side. */
- if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
- buf->state = ISP_BUF_STATE_ERROR;
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
+ state = VB2_BUF_STATE_ERROR;
pipe->error = false;
} else {
- buf->state = ISP_BUF_STATE_DONE;
+ state = VB2_BUF_STATE_DONE;
}
- wake_up(&buf->wait);
+ vb2_buffer_done(&buf->vb, state);
+
+ spin_lock_irqsave(&video->irqlock, flags);
if (list_empty(&video->dmaqueue)) {
- if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
state = ISP_PIPELINE_QUEUE_OUTPUT
| ISP_PIPELINE_STREAM;
else
@@ -571,16 +521,19 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
return NULL;
}
- if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
- spin_lock_irqsave(&pipe->lock, flags);
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
+ spin_lock(&pipe->lock);
pipe->state &= ~ISP_PIPELINE_STREAM;
- spin_unlock_irqrestore(&pipe->lock, flags);
+ spin_unlock(&pipe->lock);
}
- buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
+ buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
irqlist);
- buf->state = ISP_BUF_STATE_ACTIVE;
- return to_isp_buffer(buf);
+ buf->vb.state = VB2_BUF_STATE_ACTIVE;
+
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ return buf;
}
/*
@@ -592,25 +545,22 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
*/
void omap3isp_video_cancel_stream(struct isp_video *video)
{
- struct isp_video_queue *queue = video->queue;
unsigned long flags;
- spin_lock_irqsave(&queue->irqlock, flags);
+ spin_lock_irqsave(&video->irqlock, flags);
while (!list_empty(&video->dmaqueue)) {
- struct isp_video_buffer *buf;
+ struct isp_buffer *buf;
buf = list_first_entry(&video->dmaqueue,
- struct isp_video_buffer, irqlist);
+ struct isp_buffer, irqlist);
list_del(&buf->irqlist);
-
- buf->state = ISP_BUF_STATE_ERROR;
- wake_up(&buf->wait);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
video->error = true;
- spin_unlock_irqrestore(&queue->irqlock, flags);
+ spin_unlock_irqrestore(&video->irqlock, flags);
}
/*
@@ -627,12 +577,15 @@ void omap3isp_video_resume(struct isp_video *video, int continuous)
{
struct isp_buffer *buf = NULL;
- if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- omap3isp_video_queue_discard_done(video->queue);
+ if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ mutex_lock(&video->queue_lock);
+ vb2_discard_done(video->queue);
+ mutex_unlock(&video->queue_lock);
+ }
if (!list_empty(&video->dmaqueue)) {
buf = list_first_entry(&video->dmaqueue,
- struct isp_buffer, buffer.irqlist);
+ struct isp_buffer, irqlist);
video->ops->queue(video, buf);
video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
} else {
@@ -840,33 +793,56 @@ static int
isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
{
struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ int ret;
- return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
+ mutex_lock(&video->queue_lock);
+ ret = vb2_reqbufs(&vfh->queue, rb);
+ mutex_unlock(&video->queue_lock);
+
+ return ret;
}
static int
isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&video->queue_lock);
+ ret = vb2_querybuf(&vfh->queue, b);
+ mutex_unlock(&video->queue_lock);
- return omap3isp_video_queue_querybuf(&vfh->queue, b);
+ return ret;
}
static int
isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ int ret;
- return omap3isp_video_queue_qbuf(&vfh->queue, b);
+ mutex_lock(&video->queue_lock);
+ ret = vb2_qbuf(&vfh->queue, b);
+ mutex_unlock(&video->queue_lock);
+
+ return ret;
}
static int
isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video *video = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&video->queue_lock);
+ ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
+ mutex_unlock(&video->queue_lock);
- return omap3isp_video_queue_dqbuf(&vfh->queue, b,
- file->f_flags & O_NONBLOCK);
+ return ret;
}
static int isp_video_check_external_subdevs(struct isp_video *video,
@@ -1006,11 +982,6 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
mutex_lock(&video->stream_lock);
- if (video->streaming) {
- mutex_unlock(&video->stream_lock);
- return -EBUSY;
- }
-
/* Start streaming on the pipeline. No link touching an entity in the
* pipeline can be activated or deactivated once streaming is started.
*/
@@ -1069,7 +1040,9 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
INIT_LIST_HEAD(&video->dmaqueue);
atomic_set(&pipe->frame_number, -1);
- ret = omap3isp_video_queue_streamon(&vfh->queue);
+ mutex_lock(&video->queue_lock);
+ ret = vb2_streamon(&vfh->queue, type);
+ mutex_unlock(&video->queue_lock);
if (ret < 0)
goto err_check_format;
@@ -1082,19 +1055,19 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
ISP_PIPELINE_STREAM_CONTINUOUS);
if (ret < 0)
goto err_set_stream;
- spin_lock_irqsave(&video->queue->irqlock, flags);
+ spin_lock_irqsave(&video->irqlock, flags);
if (list_empty(&video->dmaqueue))
video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
- spin_unlock_irqrestore(&video->queue->irqlock, flags);
+ spin_unlock_irqrestore(&video->irqlock, flags);
}
- video->streaming = 1;
-
mutex_unlock(&video->stream_lock);
return 0;
err_set_stream:
- omap3isp_video_queue_streamoff(&vfh->queue);
+ mutex_lock(&video->queue_lock);
+ vb2_streamoff(&vfh->queue, type);
+ mutex_unlock(&video->queue_lock);
err_check_format:
media_entity_pipeline_stop(&video->video.entity);
err_pipeline_start:
@@ -1130,9 +1103,9 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
mutex_lock(&video->stream_lock);
/* Make sure we're not streaming yet. */
- mutex_lock(&vfh->queue.lock);
- streaming = vfh->queue.streaming;
- mutex_unlock(&vfh->queue.lock);
+ mutex_lock(&video->queue_lock);
+ streaming = vb2_is_streaming(&vfh->queue);
+ mutex_unlock(&video->queue_lock);
if (!streaming)
goto done;
@@ -1151,9 +1124,12 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
/* Stop the stream. */
omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
- omap3isp_video_queue_streamoff(&vfh->queue);
+ omap3isp_video_cancel_stream(video);
+
+ mutex_lock(&video->queue_lock);
+ vb2_streamoff(&vfh->queue, type);
+ mutex_unlock(&video->queue_lock);
video->queue = NULL;
- video->streaming = 0;
video->error = false;
if (video->isp->pdata->set_constraints)
@@ -1223,6 +1199,7 @@ static int isp_video_open(struct file *file)
{
struct isp_video *video = video_drvdata(file);
struct isp_video_fh *handle;
+ struct vb2_queue *queue;
int ret = 0;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
@@ -1244,9 +1221,20 @@ static int isp_video_open(struct file *file)
goto done;
}
- omap3isp_video_queue_init(&handle->queue, video->type,
- &isp_video_queue_ops, video->isp->dev,
- sizeof(struct isp_buffer));
+ queue = &handle->queue;
+ queue->type = video->type;
+ queue->io_modes = VB2_MMAP | VB2_USERPTR;
+ queue->drv_priv = handle;
+ queue->ops = &isp_video_queue_ops;
+ queue->mem_ops = &vb2_dma_contig_memops;
+ queue->buf_struct_size = sizeof(struct isp_buffer);
+ queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ ret = vb2_queue_init(&handle->queue);
+ if (ret < 0) {
+ omap3isp_put(video->isp);
+ goto done;
+ }
memset(&handle->format, 0, sizeof(handle->format));
handle->format.type = video->type;
@@ -1273,9 +1261,9 @@ static int isp_video_release(struct file *file)
/* Disable streaming and free the buffers queue resources. */
isp_video_streamoff(file, vfh, video->type);
- mutex_lock(&handle->queue.lock);
- omap3isp_video_queue_cleanup(&handle->queue);
- mutex_unlock(&handle->queue.lock);
+ mutex_lock(&video->queue_lock);
+ vb2_queue_release(&handle->queue);
+ mutex_unlock(&video->queue_lock);
omap3isp_pipeline_pm_use(&video->video.entity, 0);
@@ -1292,16 +1280,27 @@ static int isp_video_release(struct file *file)
static unsigned int isp_video_poll(struct file *file, poll_table *wait)
{
struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
- struct isp_video_queue *queue = &vfh->queue;
+ struct isp_video *video = video_drvdata(file);
+ int ret;
- return omap3isp_video_queue_poll(queue, file, wait);
+ mutex_lock(&video->queue_lock);
+ ret = vb2_poll(&vfh->queue, file, wait);
+ mutex_unlock(&video->queue_lock);
+
+ return ret;
}
static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
{
struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
+ struct isp_video *video = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&video->queue_lock);
+ ret = vb2_mmap(&vfh->queue, vma);
+ mutex_unlock(&video->queue_lock);
- return omap3isp_video_queue_mmap(&vfh->queue, vma);
+ return ret;
}
static struct v4l2_file_operations isp_video_fops = {
@@ -1342,15 +1341,23 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
return -EINVAL;
}
+ video->alloc_ctx = vb2_dma_contig_init_ctx(video->isp->dev);
+ if (IS_ERR(video->alloc_ctx))
+ return PTR_ERR(video->alloc_ctx);
+
ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
- if (ret < 0)
+ if (ret < 0) {
+ vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
return ret;
+ }
mutex_init(&video->mutex);
atomic_set(&video->active, 0);
spin_lock_init(&video->pipe.lock);
mutex_init(&video->stream_lock);
+ mutex_init(&video->queue_lock);
+ spin_lock_init(&video->irqlock);
/* Initialize the video device. */
if (video->ops == NULL)
@@ -1371,7 +1378,9 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
void omap3isp_video_cleanup(struct isp_video *video)
{
+ vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
media_entity_cleanup(&video->video.entity);
+ mutex_destroy(&video->queue_lock);
mutex_destroy(&video->stream_lock);
mutex_destroy(&video->mutex);
}
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 4e194076cc60..7d2e82122ecd 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -30,8 +30,7 @@
#include <media/media-entity.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
-
-#include "ispqueue.h"
+#include <media/videobuf2-core.h>
#define ISP_VIDEO_DRIVER_NAME "ispvideo"
#define ISP_VIDEO_DRIVER_VERSION "0.0.2"
@@ -124,17 +123,19 @@ static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
ISP_PIPELINE_IDLE_OUTPUT);
}
-/*
- * struct isp_buffer - ISP buffer
- * @buffer: ISP video buffer
- * @isp_addr: MMU mapped address (a.k.a. device address) of the buffer.
+/**
+ * struct isp_buffer - ISP video buffer
+ * @vb: videobuf2 buffer
+ * @irqlist: List head for insertion into IRQ queue
+ * @dma: DMA address
*/
struct isp_buffer {
- struct isp_video_buffer buffer;
- dma_addr_t isp_addr;
+ struct vb2_buffer vb;
+ struct list_head irqlist;
+ dma_addr_t dma;
};
-#define to_isp_buffer(buf) container_of(buf, struct isp_buffer, buffer)
+#define to_isp_buffer(buf) container_of(buf, struct isp_buffer, vb)
enum isp_video_dmaqueue_flags {
/* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */
@@ -172,16 +173,16 @@ struct isp_video {
unsigned int bpl_value; /* bytes per line value */
unsigned int bpl_padding; /* padding at end of line */
- /* Entity video node streaming */
- unsigned int streaming:1;
-
/* Pipeline state */
struct isp_pipeline pipe;
struct mutex stream_lock; /* pipeline and stream states */
bool error;
/* Video buffers queue */
- struct isp_video_queue *queue;
+ void *alloc_ctx;
+ struct vb2_queue *queue;
+ struct mutex queue_lock; /* protects the queue */
+ spinlock_t irqlock; /* protects dmaqueue */
struct list_head dmaqueue;
enum isp_video_dmaqueue_flags dmaqueue_flags;
@@ -193,7 +194,7 @@ struct isp_video {
struct isp_video_fh {
struct v4l2_fh vfh;
struct isp_video *video;
- struct isp_video_queue queue;
+ struct vb2_queue queue;
struct v4l2_format format;
struct v4l2_fract timeperframe;
};
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 349e659d75fb..7c4489c42365 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1200,6 +1200,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
EXPORT_SYMBOL_GPL(vb2_buffer_done);
/**
+ * vb2_discard_done() - discard all buffers marked as DONE
+ * @q: videobuf2 queue
+ *
+ * This function is intended to be used with suspend/resume operations. It
+ * discards all 'done' buffers as they would be too old to be requested after
+ * resume.
+ *
+ * Drivers must stop the hardware and synchronize with interrupt handlers and/or
+ * delayed works before calling this function to make sure no buffer will be
+ * touched by the driver and/or hardware.
+ */
+void vb2_discard_done(struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ list_for_each_entry(vb, &q->done_list, done_entry)
+ vb->state = VB2_BUF_STATE_ERROR;
+ spin_unlock_irqrestore(&q->done_lock, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_discard_done);
+
+/**
* __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
* v4l2_buffer by the userspace. The caller has already verified that struct
* v4l2_buffer has a valid number of planes.
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 2a635b6fdaf7..c880ba685754 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -601,6 +601,7 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
pcr->slots[RTSX_MS_CARD].card_event = NULL;
msh = host->msh;
host->eject = true;
+ cancel_work_sync(&host->handle_req);
mutex_lock(&host->host_mutex);
if (host->req) {
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 570b18a113ff..ebc0af7d769c 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1037,7 +1037,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
goto out;
/* signature to know if this mf is freed */
mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
- list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
+ list_add(&mf->u.frame.linkage.list, &ioc->FreeQ);
#ifdef MFCNT
ioc->mfcnt--;
#endif
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index dcc8385adeb3..8a050e885688 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2432,9 +2432,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
int rc, cim_rev;
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
MPT_FRAME_HDR *mf = NULL;
- MPIHeader_t *mpi_hdr;
unsigned long timeleft;
int retval;
+ u32 msgcontext;
/* Reset long to int. Should affect IA64 and SPARC only
*/
@@ -2581,11 +2581,11 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
}
IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf;
- mpi_hdr = (MPIHeader_t *) mf;
+ msgcontext = IstwiRWRequest->MsgContext;
memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t));
+ IstwiRWRequest->MsgContext = msgcontext;
IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX;
IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
- IstwiRWRequest->MsgContext = mpi_hdr->MsgContext;
IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ;
IstwiRWRequest->NumAddressBytes = 0x01;
IstwiRWRequest->DataLength = cpu_to_le16(0x04);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index fd75108c355e..02a3eefd6931 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -649,7 +649,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
}
static int
-mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
{
struct mptfc_rport_info *ri;
struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
@@ -658,14 +658,14 @@ mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
if (!vdevice || !vdevice->vtarget) {
SCpnt->result = DID_NO_CONNECT << 16;
- done(SCpnt);
+ SCpnt->scsi_done(SCpnt);
return 0;
}
err = fc_remote_port_chkready(rport);
if (unlikely(err)) {
SCpnt->result = err;
- done(SCpnt);
+ SCpnt->scsi_done(SCpnt);
return 0;
}
@@ -673,15 +673,13 @@ mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
ri = *((struct mptfc_rport_info **)rport->dd_data);
if (unlikely(!ri)) {
SCpnt->result = DID_IMM_RETRY << 16;
- done(SCpnt);
+ SCpnt->scsi_done(SCpnt);
return 0;
}
- return mptscsih_qcmd(SCpnt,done);
+ return mptscsih_qcmd(SCpnt);
}
-static DEF_SCSI_QCMD(mptfc_qcmd)
-
/*
* mptfc_display_port_link_speed - displaying link speed
* @ioc: Pointer to MPT_ADAPTER structure
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 00d339c361fc..711fcb5cec87 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1896,7 +1896,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
}
static int
-mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
{
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
@@ -1904,11 +1904,11 @@ mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
SCpnt->result = DID_NO_CONNECT << 16;
- done(SCpnt);
+ SCpnt->scsi_done(SCpnt);
return 0;
}
- hd = shost_priv(SCpnt->device->host);
+ hd = shost_priv(shost);
ioc = hd->ioc;
if (ioc->sas_discovery_quiesce_io)
@@ -1917,11 +1917,9 @@ mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
if (ioc->debug_level & MPT_DEBUG_SCSI)
scsi_print_command(SCpnt);
- return mptscsih_qcmd(SCpnt,done);
+ return mptscsih_qcmd(SCpnt);
}
-static DEF_SCSI_QCMD(mptsas_qcmd)
-
/**
* mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
* if the device under question is currently in the
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 727819cc7034..2a1c6f21af27 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1304,7 +1304,6 @@ int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host)
/**
* mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine.
* @SCpnt: Pointer to scsi_cmnd structure
- * @done: Pointer SCSI mid-layer IO completion function
*
* (linux scsi_host_template.queuecommand routine)
* This is the primary SCSI IO start routine. Create a MPI SCSIIORequest
@@ -1313,7 +1312,7 @@ int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host)
* Returns 0. (rtn value discarded by linux scsi mid-layer)
*/
int
-mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptscsih_qcmd(struct scsi_cmnd *SCpnt)
{
MPT_SCSI_HOST *hd;
MPT_FRAME_HDR *mf;
@@ -1329,10 +1328,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
hd = shost_priv(SCpnt->device->host);
ioc = hd->ioc;
- SCpnt->scsi_done = done;
- dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
- ioc->name, SCpnt, done));
+ dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p\n",
+ ioc->name, SCpnt));
if (ioc->taskmgmt_quiesce_io)
return SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 83f503162f7a..99e3390807f3 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -113,7 +113,7 @@ extern int mptscsih_resume(struct pci_dev *pdev);
#endif
extern int mptscsih_show_info(struct seq_file *, struct Scsi_Host *);
extern const char * mptscsih_info(struct Scsi_Host *SChost);
-extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
+extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt);
extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
u8 id, int lun, int ctx2abort, ulong timeout);
extern void mptscsih_slave_destroy(struct scsi_device *device);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 5653e505f91f..49d11338294b 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -780,33 +780,31 @@ static int mptspi_slave_configure(struct scsi_device *sdev)
}
static int
-mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
{
- struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host);
+ struct _MPT_SCSI_HOST *hd = shost_priv(shost);
VirtDevice *vdevice = SCpnt->device->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
if (!vdevice || !vdevice->vtarget) {
SCpnt->result = DID_NO_CONNECT << 16;
- done(SCpnt);
+ SCpnt->scsi_done(SCpnt);
return 0;
}
if (SCpnt->device->channel == 1 &&
mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) {
SCpnt->result = DID_NO_CONNECT << 16;
- done(SCpnt);
+ SCpnt->scsi_done(SCpnt);
return 0;
}
if (spi_dv_pending(scsi_target(SCpnt->device)))
ddvprintk(ioc, scsi_print_command(SCpnt));
- return mptscsih_qcmd(SCpnt,done);
+ return mptscsih_qcmd(SCpnt);
}
-static DEF_SCSI_QCMD(mptspi_qcmd)
-
static void mptspi_slave_destroy(struct scsi_device *sdev)
{
struct scsi_target *starget = scsi_target(sdev);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ee8204cc31e9..6cc4b6acc22a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -760,6 +760,7 @@ config MFD_SYSCON
config MFD_DAVINCI_VOICECODEC
tristate
select MFD_CORE
+ select REGMAP_MMIO
config MFD_TI_AM335X_TSCADC
tristate "TI ADC / Touch Screen chip support"
@@ -1225,7 +1226,7 @@ config MFD_WM8994
functionaltiy of the device other drivers must be enabled.
config MFD_STW481X
- bool "Support for ST Microelectronics STw481x"
+ tristate "Support for ST Microelectronics STw481x"
depends on I2C && ARCH_NOMADIK
select REGMAP_I2C
select MFD_CORE
@@ -1248,7 +1249,7 @@ config MCP_SA11X0
# Chip drivers
config MCP_UCB1200
- bool "Support for UCB1200 / UCB1300"
+ tristate "Support for UCB1200 / UCB1300"
depends on MCP_SA11X0
select MCP
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index a8ee4a36a1d8..cf2e6a198c6b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -591,7 +591,7 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
num_irqs = AB8500_NR_IRQS;
/* If ->irq_base is zero this will give a linear mapping */
- ab8500->domain = irq_domain_add_simple(NULL,
+ ab8500->domain = irq_domain_add_simple(ab8500->dev->of_node,
num_irqs, 0,
&ab8500_irq_ops, ab8500);
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 96162b62f3c0..3bc969a5916b 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -29,13 +29,21 @@
#include <linux/i2c/twl.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/mach-types.h>
static u8 twl4030_start_script_address = 0x2b;
-#define PWR_P1_SW_EVENTS 0x10
-#define PWR_DEVOFF (1 << 0)
+/* Register bits for P1, P2 and P3_SW_EVENTS */
+#define PWR_STOPON_PRWON BIT(6)
+#define PWR_STOPON_SYSEN BIT(5)
+#define PWR_ENABLE_WARMRESET BIT(4)
+#define PWR_LVL_WAKEUP BIT(3)
+#define PWR_DEVACT BIT(2)
+#define PWR_DEVSLP BIT(1)
+#define PWR_DEVOFF BIT(0)
+
#define SEQ_OFFSYNC (1 << 0)
#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36)
@@ -52,10 +60,6 @@ static u8 twl4030_start_script_address = 0x2b;
#define R_CFG_P2_TRANSITION PHY_TO_OFF_PM_MASTER(0x37)
#define R_CFG_P3_TRANSITION PHY_TO_OFF_PM_MASTER(0x38)
-#define LVL_WAKEUP 0x08
-
-#define ENABLE_WARMRESET (1<<4)
-
#define END_OF_SCRIPT 0x3f
#define R_SEQ_ADD_A2S PHY_TO_OFF_PM_MASTER(0x55)
@@ -125,6 +129,53 @@ static u8 res_config_addrs[] = {
[RES_MAIN_REF] = 0x94,
};
+/*
+ * Usable values for .remap_sleep and .remap_off
+ * Based on table "5.3.3 Resource Operating modes"
+ */
+enum {
+ TWL_REMAP_OFF = 0,
+ TWL_REMAP_SLEEP = 8,
+ TWL_REMAP_ACTIVE = 9,
+};
+
+/*
+ * Macros to configure the PM register states for various resources.
+ * Note that we can make MSG_SINGULAR etc private to this driver once
+ * omap3 has been made DT only.
+ */
+#define TWL_DFLT_DELAY 2 /* typically 2 32 KiHz cycles */
+#define TWL_DEV_GRP_P123 (DEV_GRP_P1 | DEV_GRP_P2 | DEV_GRP_P3)
+#define TWL_RESOURCE_SET(res, state) \
+ { MSG_SINGULAR(DEV_GRP_NULL, (res), (state)), TWL_DFLT_DELAY }
+#define TWL_RESOURCE_ON(res) TWL_RESOURCE_SET(res, RES_STATE_ACTIVE)
+#define TWL_RESOURCE_OFF(res) TWL_RESOURCE_SET(res, RES_STATE_OFF)
+#define TWL_RESOURCE_RESET(res) TWL_RESOURCE_SET(res, RES_STATE_WRST)
+/*
+ * It seems that type1 and type2 is just the resource init order
+ * number for the type1 and type2 group.
+ */
+#define TWL_RESOURCE_SET_ACTIVE(res, state) \
+ { MSG_SINGULAR(DEV_GRP_NULL, (res), RES_STATE_ACTIVE), (state) }
+#define TWL_RESOURCE_GROUP_RESET(group, type1, type2) \
+ { MSG_BROADCAST(DEV_GRP_NULL, (group), (type1), (type2), \
+ RES_STATE_WRST), TWL_DFLT_DELAY }
+#define TWL_RESOURCE_GROUP_SLEEP(group, type, type2) \
+ { MSG_BROADCAST(DEV_GRP_NULL, (group), (type), (type2), \
+ RES_STATE_SLEEP), TWL_DFLT_DELAY }
+#define TWL_RESOURCE_GROUP_ACTIVE(group, type, type2) \
+ { MSG_BROADCAST(DEV_GRP_NULL, (group), (type), (type2), \
+ RES_STATE_ACTIVE), TWL_DFLT_DELAY }
+#define TWL_REMAP_SLEEP(res, devgrp, typ, typ2) \
+ { .resource = (res), .devgroup = (devgrp), \
+ .type = (typ), .type2 = (typ2), \
+ .remap_off = TWL_REMAP_OFF, \
+ .remap_sleep = TWL_REMAP_SLEEP, }
+#define TWL_REMAP_OFF(res, devgrp, typ, typ2) \
+ { .resource = (res), .devgroup = (devgrp), \
+ .type = (typ), .type2 = (typ2), \
+ .remap_off = TWL_REMAP_OFF, .remap_sleep = TWL_REMAP_OFF, }
+
static int twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
@@ -196,7 +247,7 @@ static int twl4030_config_wakeup3_sequence(u8 address)
err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P3_SW_EVENTS);
if (err)
goto out;
- data |= LVL_WAKEUP;
+ data |= PWR_LVL_WAKEUP;
err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P3_SW_EVENTS);
out:
if (err)
@@ -219,7 +270,7 @@ static int twl4030_config_wakeup12_sequence(u8 address)
if (err)
goto out;
- data |= LVL_WAKEUP;
+ data |= PWR_LVL_WAKEUP;
err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P1_SW_EVENTS);
if (err)
goto out;
@@ -228,7 +279,7 @@ static int twl4030_config_wakeup12_sequence(u8 address)
if (err)
goto out;
- data |= LVL_WAKEUP;
+ data |= PWR_LVL_WAKEUP;
err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P2_SW_EVENTS);
if (err)
goto out;
@@ -281,7 +332,7 @@ static int twl4030_config_warmreset_sequence(u8 address)
if (err)
goto out;
- rd_data |= ENABLE_WARMRESET;
+ rd_data |= PWR_ENABLE_WARMRESET;
err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P1_SW_EVENTS);
if (err)
goto out;
@@ -290,7 +341,7 @@ static int twl4030_config_warmreset_sequence(u8 address)
if (err)
goto out;
- rd_data |= ENABLE_WARMRESET;
+ rd_data |= PWR_ENABLE_WARMRESET;
err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P2_SW_EVENTS);
if (err)
goto out;
@@ -299,7 +350,7 @@ static int twl4030_config_warmreset_sequence(u8 address)
if (err)
goto out;
- rd_data |= ENABLE_WARMRESET;
+ rd_data |= PWR_ENABLE_WARMRESET;
err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P3_SW_EVENTS);
out:
if (err)
@@ -421,6 +472,12 @@ static int load_twl4030_script(struct twl4030_script *tscript,
goto out;
}
if (tscript->flags & TWL4030_WAKEUP12_SCRIPT) {
+ /* Reset any existing sleep script to avoid hangs on reboot */
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_A2S);
+ if (err)
+ goto out;
+
err = twl4030_config_wakeup12_sequence(address);
if (err)
goto out;
@@ -493,7 +550,8 @@ int twl4030_remove_script(u8 flags)
return err;
}
-static int twl4030_power_configure_scripts(struct twl4030_power_data *pdata)
+static int
+twl4030_power_configure_scripts(const struct twl4030_power_data *pdata)
{
int err;
int i;
@@ -509,12 +567,34 @@ static int twl4030_power_configure_scripts(struct twl4030_power_data *pdata)
return 0;
}
-static int twl4030_power_configure_resources(struct twl4030_power_data *pdata)
+static void twl4030_patch_rconfig(struct twl4030_resconfig *common,
+ struct twl4030_resconfig *board)
+{
+ while (common->resource) {
+ struct twl4030_resconfig *b = board;
+
+ while (b->resource) {
+ if (b->resource == common->resource) {
+ *common = *b;
+ break;
+ }
+ b++;
+ }
+ common++;
+ }
+}
+
+static int
+twl4030_power_configure_resources(const struct twl4030_power_data *pdata)
{
struct twl4030_resconfig *resconfig = pdata->resource_config;
+ struct twl4030_resconfig *boardconf = pdata->board_config;
int err;
if (resconfig) {
+ if (boardconf)
+ twl4030_patch_rconfig(resconfig, boardconf);
+
while (resconfig->resource) {
err = twl4030_configure_resource(resconfig);
if (err)
@@ -541,7 +621,7 @@ void twl4030_power_off(void)
pr_err("TWL4030 Unable to power off\n");
}
-static bool twl4030_power_use_poweroff(struct twl4030_power_data *pdata,
+static bool twl4030_power_use_poweroff(const struct twl4030_power_data *pdata,
struct device_node *node)
{
if (pdata && pdata->use_poweroff)
@@ -553,10 +633,170 @@ static bool twl4030_power_use_poweroff(struct twl4030_power_data *pdata,
return false;
}
+#ifdef CONFIG_OF
+
+/* Generic warm reset configuration for omap3 */
+
+static struct twl4030_ins omap3_wrst_seq[] = {
+ TWL_RESOURCE_OFF(RES_NRES_PWRON),
+ TWL_RESOURCE_OFF(RES_RESET),
+ TWL_RESOURCE_RESET(RES_MAIN_REF),
+ TWL_RESOURCE_GROUP_RESET(RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R2),
+ TWL_RESOURCE_RESET(RES_VUSB_3V1),
+ TWL_RESOURCE_GROUP_RESET(RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R1),
+ TWL_RESOURCE_GROUP_RESET(RES_GRP_RC, RES_TYPE_ALL, RES_TYPE2_R0),
+ TWL_RESOURCE_ON(RES_RESET),
+ TWL_RESOURCE_ON(RES_NRES_PWRON),
+};
+
+static struct twl4030_script omap3_wrst_script = {
+ .script = omap3_wrst_seq,
+ .size = ARRAY_SIZE(omap3_wrst_seq),
+ .flags = TWL4030_WRST_SCRIPT,
+};
+
+static struct twl4030_script *omap3_reset_scripts[] = {
+ &omap3_wrst_script,
+};
+
+static struct twl4030_resconfig omap3_rconfig[] = {
+ TWL_REMAP_SLEEP(RES_HFCLKOUT, DEV_GRP_P3, -1, -1),
+ TWL_REMAP_SLEEP(RES_VDD1, DEV_GRP_P1, -1, -1),
+ TWL_REMAP_SLEEP(RES_VDD2, DEV_GRP_P1, -1, -1),
+ { 0, 0 },
+};
+
+static struct twl4030_power_data omap3_reset = {
+ .scripts = omap3_reset_scripts,
+ .num = ARRAY_SIZE(omap3_reset_scripts),
+ .resource_config = omap3_rconfig,
+};
+
+/* Recommended generic default idle configuration for off-idle */
+
+/* Broadcast message to put res to sleep */
+static struct twl4030_ins omap3_idle_sleep_on_seq[] = {
+ TWL_RESOURCE_GROUP_SLEEP(RES_GRP_ALL, RES_TYPE_ALL, 0),
+};
+
+static struct twl4030_script omap3_idle_sleep_on_script = {
+ .script = omap3_idle_sleep_on_seq,
+ .size = ARRAY_SIZE(omap3_idle_sleep_on_seq),
+ .flags = TWL4030_SLEEP_SCRIPT,
+};
+
+/* Broadcast message to put res to active */
+static struct twl4030_ins omap3_idle_wakeup_p12_seq[] = {
+ TWL_RESOURCE_GROUP_ACTIVE(RES_GRP_ALL, RES_TYPE_ALL, 0),
+};
+
+static struct twl4030_script omap3_idle_wakeup_p12_script = {
+ .script = omap3_idle_wakeup_p12_seq,
+ .size = ARRAY_SIZE(omap3_idle_wakeup_p12_seq),
+ .flags = TWL4030_WAKEUP12_SCRIPT,
+};
+
+/* Broadcast message to put res to active */
+static struct twl4030_ins omap3_idle_wakeup_p3_seq[] = {
+ TWL_RESOURCE_SET_ACTIVE(RES_CLKEN, 0x37),
+ TWL_RESOURCE_GROUP_ACTIVE(RES_GRP_ALL, RES_TYPE_ALL, 0),
+};
+
+static struct twl4030_script omap3_idle_wakeup_p3_script = {
+ .script = omap3_idle_wakeup_p3_seq,
+ .size = ARRAY_SIZE(omap3_idle_wakeup_p3_seq),
+ .flags = TWL4030_WAKEUP3_SCRIPT,
+};
+
+static struct twl4030_script *omap3_idle_scripts[] = {
+ &omap3_idle_wakeup_p12_script,
+ &omap3_idle_wakeup_p3_script,
+ &omap3_wrst_script,
+ &omap3_idle_sleep_on_script,
+};
+
+/*
+ * Recommended configuration based on "Recommended Sleep
+ * Sequences for the Zoom Platform":
+ * http://omappedia.com/wiki/File:Recommended_Sleep_Sequences_Zoom.pdf
+ * Note that the type1 and type2 seem to be just the init order number
+ * for type1 and type2 groups as specified in the document mentioned
+ * above.
+ */
+static struct twl4030_resconfig omap3_idle_rconfig[] = {
+ TWL_REMAP_SLEEP(RES_VAUX1, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_SLEEP(RES_VAUX2, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_SLEEP(RES_VAUX3, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_SLEEP(RES_VAUX4, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_SLEEP(RES_VMMC1, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_SLEEP(RES_VMMC2, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_OFF(RES_VPLL1, DEV_GRP_P1, 3, 1),
+ TWL_REMAP_SLEEP(RES_VPLL2, DEV_GRP_P1, 0, 0),
+ TWL_REMAP_SLEEP(RES_VSIM, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_SLEEP(RES_VDAC, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_SLEEP(RES_VINTANA1, TWL_DEV_GRP_P123, 1, 2),
+ TWL_REMAP_SLEEP(RES_VINTANA2, TWL_DEV_GRP_P123, 0, 2),
+ TWL_REMAP_SLEEP(RES_VINTDIG, TWL_DEV_GRP_P123, 1, 2),
+ TWL_REMAP_SLEEP(RES_VIO, TWL_DEV_GRP_P123, 2, 2),
+ TWL_REMAP_OFF(RES_VDD1, DEV_GRP_P1, 4, 1),
+ TWL_REMAP_OFF(RES_VDD2, DEV_GRP_P1, 3, 1),
+ TWL_REMAP_SLEEP(RES_VUSB_1V5, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_SLEEP(RES_VUSB_1V8, DEV_GRP_NULL, 0, 0),
+ TWL_REMAP_SLEEP(RES_VUSB_3V1, TWL_DEV_GRP_P123, 0, 0),
+ /* Resource #20 USB charge pump skipped */
+ TWL_REMAP_SLEEP(RES_REGEN, TWL_DEV_GRP_P123, 2, 1),
+ TWL_REMAP_SLEEP(RES_NRES_PWRON, TWL_DEV_GRP_P123, 0, 1),
+ TWL_REMAP_SLEEP(RES_CLKEN, TWL_DEV_GRP_P123, 3, 2),
+ TWL_REMAP_SLEEP(RES_SYSEN, TWL_DEV_GRP_P123, 6, 1),
+ TWL_REMAP_SLEEP(RES_HFCLKOUT, DEV_GRP_P3, 0, 2),
+ TWL_REMAP_SLEEP(RES_32KCLKOUT, TWL_DEV_GRP_P123, 0, 0),
+ TWL_REMAP_SLEEP(RES_RESET, TWL_DEV_GRP_P123, 6, 0),
+ TWL_REMAP_SLEEP(RES_MAIN_REF, TWL_DEV_GRP_P123, 0, 0),
+ { /* Terminator */ },
+};
+
+static struct twl4030_power_data omap3_idle = {
+ .scripts = omap3_idle_scripts,
+ .num = ARRAY_SIZE(omap3_idle_scripts),
+ .resource_config = omap3_idle_rconfig,
+};
+
+/* Disable 32 KiHz oscillator during idle */
+static struct twl4030_resconfig osc_off_rconfig[] = {
+ TWL_REMAP_OFF(RES_CLKEN, DEV_GRP_P1 | DEV_GRP_P3, 3, 2),
+ { /* Terminator */ },
+};
+
+static struct twl4030_power_data osc_off_idle = {
+ .scripts = omap3_idle_scripts,
+ .num = ARRAY_SIZE(omap3_idle_scripts),
+ .resource_config = omap3_idle_rconfig,
+ .board_config = osc_off_rconfig,
+};
+
+static struct of_device_id twl4030_power_of_match[] = {
+ {
+ .compatible = "ti,twl4030-power-reset",
+ .data = &omap3_reset,
+ },
+ {
+ .compatible = "ti,twl4030-power-idle",
+ .data = &omap3_idle,
+ },
+ {
+ .compatible = "ti,twl4030-power-idle-osc-off",
+ .data = &osc_off_idle,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl4030_power_of_match);
+#endif /* CONFIG_OF */
+
static int twl4030_power_probe(struct platform_device *pdev)
{
- struct twl4030_power_data *pdata = dev_get_platdata(&pdev->dev);
+ const struct twl4030_power_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
int err = 0;
int err2 = 0;
u8 val;
@@ -577,8 +817,12 @@ static int twl4030_power_probe(struct platform_device *pdev)
return err;
}
+ match = of_match_device(of_match_ptr(twl4030_power_of_match),
+ &pdev->dev);
+ if (match && match->data)
+ pdata = match->data;
+
if (pdata) {
- /* TODO: convert to device tree */
err = twl4030_power_configure_scripts(pdata);
if (err) {
pr_err("TWL4030 failed to load scripts\n");
@@ -628,14 +872,6 @@ static int twl4030_power_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id twl4030_power_of_match[] = {
- {.compatible = "ti,twl4030-power", },
- { },
-};
-MODULE_DEVICE_TABLE(of, twl4030_power_of_match);
-#endif
-
static struct platform_driver twl4030_power_driver = {
.driver = {
.name = "twl4030_power",
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a43d0c467274..ee9402324a23 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -54,7 +54,7 @@ config AD525X_DPOT_SPI
config ATMEL_PWM
tristate "Atmel AT32/AT91 PWM support"
depends on HAVE_CLK
- depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45
+ depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
help
This option enables device driver support for the PWM channels
on certain Atmel processors. Pulse Width Modulation is used for
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 73068e50e56d..3250fc1df0aa 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -199,7 +199,7 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
func = kzalloc(sizeof(*func) + sizeof(*func->template) * num,
GFP_KERNEL);
if (!func)
- return NULL;
+ return ERR_PTR(-ENOMEM);
func->syscfg = syscfg;
func->num_templates = num;
@@ -231,10 +231,14 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
func->regmap = regmap_init(dev, NULL, func,
&vexpress_syscfg_regmap_config);
- if (IS_ERR(func->regmap))
+ if (IS_ERR(func->regmap)) {
+ void *err = func->regmap;
+
kfree(func);
- else
- list_add(&func->list, &syscfg->funcs);
+ return err;
+ }
+
+ list_add(&func->list, &syscfg->funcs);
return func->regmap;
}
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2421835d5daf..191617492181 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -17,7 +17,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * Maintained by: Dmitry Torokhov <dtor@vmware.com>
+ * Maintained by: Xavier Deguillard <xdeguillard@vmware.com>
+ * Philip Moltmann <moltmann@vmware.com>
*/
/*
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 824644875d41..d2dbf02022bd 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -341,16 +341,17 @@ int mmc_add_card(struct mmc_card *card)
if (mmc_host_is_spi(card->host)) {
pr_info("%s: new %s%s%s card on SPI\n",
mmc_hostname(card->host),
- mmc_card_highspeed(card) ? "high speed " : "",
- mmc_card_ddr_mode(card) ? "DDR " : "",
+ mmc_card_hs(card) ? "high speed " : "",
+ mmc_card_ddr52(card) ? "DDR " : "",
type);
} else {
pr_info("%s: new %s%s%s%s%s card at address %04x\n",
mmc_hostname(card->host),
mmc_card_uhs(card) ? "ultra high speed " :
- (mmc_card_highspeed(card) ? "high speed " : ""),
+ (mmc_card_hs(card) ? "high speed " : ""),
+ mmc_card_hs400(card) ? "HS400 " :
(mmc_card_hs200(card) ? "HS200 " : ""),
- mmc_card_ddr_mode(card) ? "DDR " : "",
+ mmc_card_ddr52(card) ? "DDR " : "",
uhs_bus_speed_mode, type, card->rca);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index acbc3f2aaaf9..7dc0c85fdb60 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -800,6 +800,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
data->timeout_ns = limit_us * 1000;
data->timeout_clks = 0;
}
+
+ /* assign limit value if invalid */
+ if (timeout_us == 0)
+ data->timeout_ns = limit_us * 1000;
}
/*
@@ -1310,31 +1314,38 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
}
EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
+#endif /* CONFIG_REGULATOR */
+
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
- struct regulator *supply;
int ret;
- supply = devm_regulator_get(dev, "vmmc");
- mmc->supply.vmmc = supply;
+ mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
- if (IS_ERR(supply))
- return PTR_ERR(supply);
+ if (IS_ERR(mmc->supply.vmmc)) {
+ if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "No vmmc regulator found\n");
+ } else {
+ ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
+ if (ret > 0)
+ mmc->ocr_avail = ret;
+ else
+ dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
+ }
- ret = mmc_regulator_get_ocrmask(supply);
- if (ret > 0)
- mmc->ocr_avail = ret;
- else
- dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret);
+ if (IS_ERR(mmc->supply.vqmmc)) {
+ if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "No vqmmc regulator found\n");
+ }
return 0;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
-#endif /* CONFIG_REGULATOR */
-
/*
* Mask off any voltages we don't support and select
* the lowest voltage
@@ -1533,8 +1544,13 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
- /* Set signal voltage to 3.3V */
- __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
+ /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
+ if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
+ dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
+ else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
+ dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
+ else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
+ dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
/*
* This delay should be sufficient to allow the power supply
@@ -2183,7 +2199,7 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
struct mmc_command cmd = {0};
- if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
+ if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
return 0;
cmd.opcode = MMC_SET_BLOCKLEN;
@@ -2263,7 +2279,6 @@ static int mmc_do_hw_reset(struct mmc_host *host, int check)
}
}
- host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
if (mmc_host_is_spi(host)) {
host->ios.chip_select = MMC_CS_HIGH;
host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
@@ -2403,6 +2418,11 @@ void mmc_rescan(struct work_struct *work)
container_of(work, struct mmc_host, detect.work);
int i;
+ if (host->trigger_card_event && host->ops->card_event) {
+ host->ops->card_event(host);
+ host->trigger_card_event = false;
+ }
+
if (host->rescan_disable)
return;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 54829c0ed000..91eb16223246 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -135,8 +135,14 @@ static int mmc_ios_show(struct seq_file *s, void *data)
case MMC_TIMING_UHS_DDR50:
str = "sd uhs DDR50";
break;
+ case MMC_TIMING_MMC_DDR52:
+ str = "mmc DDR52";
+ break;
case MMC_TIMING_MMC_HS200:
- str = "mmc high-speed SDR200";
+ str = "mmc HS200";
+ break;
+ case MMC_TIMING_MMC_HS400:
+ str = "mmc HS400";
break;
default:
str = "invalid";
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index fdea825dbb24..95cceae96944 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -447,6 +447,10 @@ int mmc_of_parse(struct mmc_host *host)
host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
if (of_find_property(np, "mmc-hs200-1_2v", &len))
host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+ if (of_find_property(np, "mmc-hs400-1_8v", &len))
+ host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
+ if (of_find_property(np, "mmc-hs400-1_2v", &len))
+ host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
return 0;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 1ab5f3a0af5b..793c6f7ddb04 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -240,31 +240,62 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
static void mmc_select_card_type(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+ u8 card_type = card->ext_csd.raw_card_type;
u32 caps = host->caps, caps2 = host->caps2;
- unsigned int hs_max_dtr = 0;
+ unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
+ unsigned int avail_type = 0;
- if (card_type & EXT_CSD_CARD_TYPE_26)
+ if (caps & MMC_CAP_MMC_HIGHSPEED &&
+ card_type & EXT_CSD_CARD_TYPE_HS_26) {
hs_max_dtr = MMC_HIGH_26_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS_26;
+ }
if (caps & MMC_CAP_MMC_HIGHSPEED &&
- card_type & EXT_CSD_CARD_TYPE_52)
+ card_type & EXT_CSD_CARD_TYPE_HS_52) {
hs_max_dtr = MMC_HIGH_52_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS_52;
+ }
+
+ if (caps & MMC_CAP_1_8V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
+ hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
+ }
- if ((caps & MMC_CAP_1_8V_DDR &&
- card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
- (caps & MMC_CAP_1_2V_DDR &&
- card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
+ if (caps & MMC_CAP_1_2V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
+ }
+
+ if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
+ }
+
+ if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
+ }
+
+ if (caps2 & MMC_CAP2_HS400_1_8V &&
+ card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
+ }
- if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
- card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
- (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
- card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
- hs_max_dtr = MMC_HS200_MAX_DTR;
+ if (caps2 & MMC_CAP2_HS400_1_2V &&
+ card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
+ }
card->ext_csd.hs_max_dtr = hs_max_dtr;
- card->ext_csd.card_type = card_type;
+ card->ext_csd.hs200_max_dtr = hs200_max_dtr;
+ card->mmc_avail_type = avail_type;
}
/*
@@ -480,6 +511,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
card->ext_csd.raw_pwr_cl_ddr_52_360 =
ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
+ card->ext_csd.raw_pwr_cl_ddr_200_360 =
+ ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
}
if (card->ext_csd.rev >= 5) {
@@ -646,7 +679,10 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
(card->ext_csd.raw_pwr_cl_ddr_52_195 ==
bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
(card->ext_csd.raw_pwr_cl_ddr_52_360 ==
- bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]));
+ bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
+ (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
+
if (err)
err = -EINVAL;
@@ -694,18 +730,10 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_rel_sectors.attr,
NULL,
};
-
-static struct attribute_group mmc_std_attr_group = {
- .attrs = mmc_std_attrs,
-};
-
-static const struct attribute_group *mmc_attr_groups[] = {
- &mmc_std_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(mmc_std);
static struct device_type mmc_type = {
- .groups = mmc_attr_groups,
+ .groups = mmc_std_groups,
};
/*
@@ -714,17 +742,13 @@ static struct device_type mmc_type = {
* extended CSD register, select it by executing the
* mmc_switch command.
*/
-static int mmc_select_powerclass(struct mmc_card *card,
- unsigned int bus_width)
+static int __mmc_select_powerclass(struct mmc_card *card,
+ unsigned int bus_width)
{
- int err = 0;
+ struct mmc_host *host = card->host;
+ struct mmc_ext_csd *ext_csd = &card->ext_csd;
unsigned int pwrclass_val = 0;
- struct mmc_host *host;
-
- BUG_ON(!card);
-
- host = card->host;
- BUG_ON(!host);
+ int err = 0;
/* Power class selection is supported for versions >= 4.0 */
if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
@@ -736,14 +760,14 @@ static int mmc_select_powerclass(struct mmc_card *card,
switch (1 << host->ios.vdd) {
case MMC_VDD_165_195:
- if (host->ios.clock <= 26000000)
- pwrclass_val = card->ext_csd.raw_pwr_cl_26_195;
- else if (host->ios.clock <= 52000000)
+ if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
+ pwrclass_val = ext_csd->raw_pwr_cl_26_195;
+ else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
- card->ext_csd.raw_pwr_cl_52_195 :
- card->ext_csd.raw_pwr_cl_ddr_52_195;
- else if (host->ios.clock <= 200000000)
- pwrclass_val = card->ext_csd.raw_pwr_cl_200_195;
+ ext_csd->raw_pwr_cl_52_195 :
+ ext_csd->raw_pwr_cl_ddr_52_195;
+ else if (host->ios.clock <= MMC_HS200_MAX_DTR)
+ pwrclass_val = ext_csd->raw_pwr_cl_200_195;
break;
case MMC_VDD_27_28:
case MMC_VDD_28_29:
@@ -754,14 +778,16 @@ static int mmc_select_powerclass(struct mmc_card *card,
case MMC_VDD_33_34:
case MMC_VDD_34_35:
case MMC_VDD_35_36:
- if (host->ios.clock <= 26000000)
- pwrclass_val = card->ext_csd.raw_pwr_cl_26_360;
- else if (host->ios.clock <= 52000000)
+ if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
+ pwrclass_val = ext_csd->raw_pwr_cl_26_360;
+ else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
- card->ext_csd.raw_pwr_cl_52_360 :
- card->ext_csd.raw_pwr_cl_ddr_52_360;
- else if (host->ios.clock <= 200000000)
- pwrclass_val = card->ext_csd.raw_pwr_cl_200_360;
+ ext_csd->raw_pwr_cl_52_360 :
+ ext_csd->raw_pwr_cl_ddr_52_360;
+ else if (host->ios.clock <= MMC_HS200_MAX_DTR)
+ pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
+ ext_csd->raw_pwr_cl_ddr_200_360 :
+ ext_csd->raw_pwr_cl_200_360;
break;
default:
pr_warning("%s: Voltage range not supported "
@@ -787,40 +813,79 @@ static int mmc_select_powerclass(struct mmc_card *card,
return err;
}
+static int mmc_select_powerclass(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 bus_width, ext_csd_bits;
+ int err, ddr;
+
+ /* Power class selection is supported for versions >= 4.0 */
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+ return 0;
+
+ bus_width = host->ios.bus_width;
+ /* Power class values are defined only for 4/8 bit bus */
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
+ ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
+ if (ddr)
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
+ else
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
+
+ err = __mmc_select_powerclass(card, ext_csd_bits);
+ if (err)
+ pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
+ mmc_hostname(host), 1 << bus_width, ddr);
+
+ return err;
+}
+
/*
- * Selects the desired buswidth and switch to the HS200 mode
- * if bus width set without error
+ * Set the bus speed for the selected speed mode.
*/
-static int mmc_select_hs200(struct mmc_card *card)
+static void mmc_set_bus_speed(struct mmc_card *card)
+{
+ unsigned int max_dtr = (unsigned int)-1;
+
+ if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
+ max_dtr > card->ext_csd.hs200_max_dtr)
+ max_dtr = card->ext_csd.hs200_max_dtr;
+ else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
+ max_dtr = card->ext_csd.hs_max_dtr;
+ else if (max_dtr > card->csd.max_dtr)
+ max_dtr = card->csd.max_dtr;
+
+ mmc_set_clock(card->host, max_dtr);
+}
+
+/*
+ * Select the bus width amoung 4-bit and 8-bit(SDR).
+ * If the bus width is changed successfully, return the selected width value.
+ * Zero is returned instead of error value if the wide width is not supported.
+ */
+static int mmc_select_bus_width(struct mmc_card *card)
{
- int idx, err = -EINVAL;
- struct mmc_host *host;
static unsigned ext_csd_bits[] = {
- EXT_CSD_BUS_WIDTH_4,
EXT_CSD_BUS_WIDTH_8,
+ EXT_CSD_BUS_WIDTH_4,
};
static unsigned bus_widths[] = {
- MMC_BUS_WIDTH_4,
MMC_BUS_WIDTH_8,
+ MMC_BUS_WIDTH_4,
};
+ struct mmc_host *host = card->host;
+ unsigned idx, bus_width = 0;
+ int err = 0;
- BUG_ON(!card);
-
- host = card->host;
-
- if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
- host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
-
- if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
- host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
-
- /* If fails try again during next card power cycle */
- if (err)
- goto err;
+ if ((card->csd.mmca_vsn < CSD_SPEC_VER_4) &&
+ !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
+ return 0;
- idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0;
+ idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
/*
* Unlike SD, MMC cards dont have a configuration register to notify
@@ -828,8 +893,7 @@ static int mmc_select_hs200(struct mmc_card *card)
* the supported bus width or compare the ext csd values of current
* bus width and ext csd values of 1 bit mode read earlier.
*/
- for (; idx >= 0; idx--) {
-
+ for (; idx < ARRAY_SIZE(bus_widths); idx++) {
/*
* Host is capable of 8bit transfer, then switch
* the device to work in 8bit transfer mode. If the
@@ -844,27 +908,266 @@ static int mmc_select_hs200(struct mmc_card *card)
if (err)
continue;
- mmc_set_bus_width(card->host, bus_widths[idx]);
+ bus_width = bus_widths[idx];
+ mmc_set_bus_width(host, bus_width);
+ /*
+ * If controller can't handle bus width test,
+ * compare ext_csd previously read in 1 bit mode
+ * against ext_csd at new bus width
+ */
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
- err = mmc_compare_ext_csds(card, bus_widths[idx]);
+ err = mmc_compare_ext_csds(card, bus_width);
else
- err = mmc_bus_test(card, bus_widths[idx]);
- if (!err)
+ err = mmc_bus_test(card, bus_width);
+
+ if (!err) {
+ err = bus_width;
break;
+ } else {
+ pr_warn("%s: switch to bus width %d failed\n",
+ mmc_hostname(host), ext_csd_bits[idx]);
+ }
}
- /* switch to HS200 mode if bus width set successfully */
+ return err;
+}
+
+/*
+ * Switch to the high-speed mode
+ */
+static int mmc_select_hs(struct mmc_card *card)
+{
+ int err;
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
if (!err)
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+
+ return err;
+}
+
+/*
+ * Activate wide bus and DDR if supported.
+ */
+static int mmc_select_hs_ddr(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 bus_width, ext_csd_bits;
+ int err = 0;
+
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
+ return 0;
+
+ bus_width = host->ios.bus_width;
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_warn("%s: switch to bus width %d ddr failed\n",
+ mmc_hostname(host), 1 << bus_width);
+ return err;
+ }
+
+ /*
+ * eMMC cards can support 3.3V to 1.2V i/o (vccq)
+ * signaling.
+ *
+ * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
+ *
+ * 1.8V vccq at 3.3V core voltage (vcc) is not required
+ * in the JEDEC spec for DDR.
+ *
+ * Do not force change in vccq since we are obviously
+ * working and no change to vccq is needed.
+ *
+ * WARNING: eMMC rules are NOT the same as SD DDR
+ */
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
+ err = __mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_120);
+ if (err)
+ return err;
+ }
+
+ mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+
+ return err;
+}
+
+static int mmc_select_hs400(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+
+ /*
+ * HS400 mode requires 8-bit bus width
+ */
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->ios.bus_width == MMC_BUS_WIDTH_8))
+ return 0;
+
+ /*
+ * Before switching to dual data rate operation for HS400,
+ * it is required to convert from HS200 mode to HS mode.
+ */
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ mmc_set_bus_speed(card);
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
+ if (err) {
+ pr_warn("%s: switch to high-speed from hs200 failed, err:%d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ EXT_CSD_DDR_BUS_WIDTH_8,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_warn("%s: switch to bus width for hs400 failed, err:%d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
+ if (err) {
+ pr_warn("%s: switch to hs400 failed, err:%d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ mmc_set_timing(host, MMC_TIMING_MMC_HS400);
+ mmc_set_bus_speed(card);
+
+ return 0;
+}
+
+/*
+ * For device supporting HS200 mode, the following sequence
+ * should be done before executing the tuning process.
+ * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
+ * 2. switch to HS200 mode
+ * 3. set the clock to > 52Mhz and <=200MHz
+ */
+static int mmc_select_hs200(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = -EINVAL;
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto err;
+
+ /*
+ * Set the bus width(4 or 8) with host's support and
+ * switch to HS200 mode if bus width is set successfully.
+ */
+ err = mmc_select_bus_width(card);
+ if (!IS_ERR_VALUE(err)) {
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, 2,
- card->ext_csd.generic_cmd6_time,
- true, true, true);
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
+ if (!err)
+ mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+ }
err:
return err;
}
/*
+ * Activate High Speed or HS200 mode if supported.
+ */
+static int mmc_select_timing(struct mmc_card *card)
+{
+ int err = 0;
+
+ if ((card->csd.mmca_vsn < CSD_SPEC_VER_4 &&
+ card->ext_csd.hs_max_dtr == 0))
+ goto bus_speed;
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+ err = mmc_select_hs200(card);
+ else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
+ err = mmc_select_hs(card);
+
+ if (err && err != -EBADMSG)
+ return err;
+
+ if (err) {
+ pr_warn("%s: switch to %s failed\n",
+ mmc_card_hs(card) ? "high-speed" :
+ (mmc_card_hs200(card) ? "hs200" : ""),
+ mmc_hostname(card->host));
+ err = 0;
+ }
+
+bus_speed:
+ /*
+ * Set the bus speed to the selected bus timing.
+ * If timing is not selected, backward compatible is the default.
+ */
+ mmc_set_bus_speed(card);
+ return err;
+}
+
+/*
+ * Execute tuning sequence to seek the proper bus operating
+ * conditions for HS200 and HS400, which sends CMD21 to the device.
+ */
+static int mmc_hs200_tuning(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+
+ /*
+ * Timing should be adjusted to the HS400 target
+ * operation frequency for tuning process
+ */
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->ios.bus_width == MMC_BUS_WIDTH_8)
+ if (host->ops->prepare_hs400_tuning)
+ host->ops->prepare_hs400_tuning(host, &host->ios);
+
+ if (host->ops->execute_tuning) {
+ mmc_host_clk_hold(host);
+ err = host->ops->execute_tuning(host,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ mmc_host_clk_release(host);
+
+ if (err)
+ pr_warn("%s: tuning execution failed\n",
+ mmc_hostname(host));
+ }
+
+ return err;
+}
+
+/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
@@ -874,9 +1177,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *oldcard)
{
struct mmc_card *card;
- int err, ddr = 0;
+ int err;
u32 cid[4];
- unsigned int max_dtr;
u32 rocr;
u8 *ext_csd = NULL;
@@ -1068,206 +1370,34 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
- * Activate high speed (if supported)
- */
- if (card->ext_csd.hs_max_dtr != 0) {
- err = 0;
- if (card->ext_csd.hs_max_dtr > 52000000 &&
- host->caps2 & MMC_CAP2_HS200)
- err = mmc_select_hs200(card);
- else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
- err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, 1,
- card->ext_csd.generic_cmd6_time,
- true, true, true);
-
- if (err && err != -EBADMSG)
- goto free_card;
-
- if (err) {
- pr_warning("%s: switch to highspeed failed\n",
- mmc_hostname(card->host));
- err = 0;
- } else {
- if (card->ext_csd.hs_max_dtr > 52000000 &&
- host->caps2 & MMC_CAP2_HS200) {
- mmc_card_set_hs200(card);
- mmc_set_timing(card->host,
- MMC_TIMING_MMC_HS200);
- } else {
- mmc_card_set_highspeed(card);
- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
- }
- }
- }
-
- /*
- * Compute bus speed.
- */
- max_dtr = (unsigned int)-1;
-
- if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
- if (max_dtr > card->ext_csd.hs_max_dtr)
- max_dtr = card->ext_csd.hs_max_dtr;
- if (mmc_card_highspeed(card) && (max_dtr > 52000000))
- max_dtr = 52000000;
- } else if (max_dtr > card->csd.max_dtr) {
- max_dtr = card->csd.max_dtr;
- }
-
- mmc_set_clock(host, max_dtr);
-
- /*
- * Indicate DDR mode (if supported).
+ * Select timing interface
*/
- if (mmc_card_highspeed(card)) {
- if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
- && (host->caps & MMC_CAP_1_8V_DDR))
- ddr = MMC_1_8V_DDR_MODE;
- else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
- && (host->caps & MMC_CAP_1_2V_DDR))
- ddr = MMC_1_2V_DDR_MODE;
- }
+ err = mmc_select_timing(card);
+ if (err)
+ goto free_card;
- /*
- * Indicate HS200 SDR mode (if supported).
- */
if (mmc_card_hs200(card)) {
- u32 ext_csd_bits;
- u32 bus_width = card->host->ios.bus_width;
-
- /*
- * For devices supporting HS200 mode, the bus width has
- * to be set before executing the tuning function. If
- * set before tuning, then device will respond with CRC
- * errors for responses on CMD line. So for HS200 the
- * sequence will be
- * 1. set bus width 4bit / 8 bit (1 bit not supported)
- * 2. switch to HS200 mode
- * 3. set the clock to > 52Mhz <=200MHz and
- * 4. execute tuning for HS200
- */
- if ((host->caps2 & MMC_CAP2_HS200) &&
- card->host->ops->execute_tuning) {
- mmc_host_clk_hold(card->host);
- err = card->host->ops->execute_tuning(card->host,
- MMC_SEND_TUNING_BLOCK_HS200);
- mmc_host_clk_release(card->host);
- }
- if (err) {
- pr_warning("%s: tuning execution failed\n",
- mmc_hostname(card->host));
+ err = mmc_hs200_tuning(card);
+ if (err)
goto err;
- }
- ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
- EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
- err = mmc_select_powerclass(card, ext_csd_bits);
+ err = mmc_select_hs400(card);
if (err)
- pr_warning("%s: power class selection to bus width %d"
- " failed\n", mmc_hostname(card->host),
- 1 << bus_width);
+ goto err;
+ } else if (mmc_card_hs(card)) {
+ /* Select the desired bus width optionally */
+ err = mmc_select_bus_width(card);
+ if (!IS_ERR_VALUE(err)) {
+ err = mmc_select_hs_ddr(card);
+ if (err)
+ goto err;
+ }
}
/*
- * Activate wide bus and DDR (if supported).
+ * Choose the power class with selected bus interface
*/
- if (!mmc_card_hs200(card) &&
- (card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
- (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
- static unsigned ext_csd_bits[][2] = {
- { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
- { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
- { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
- };
- static unsigned bus_widths[] = {
- MMC_BUS_WIDTH_8,
- MMC_BUS_WIDTH_4,
- MMC_BUS_WIDTH_1
- };
- unsigned idx, bus_width = 0;
-
- if (host->caps & MMC_CAP_8_BIT_DATA)
- idx = 0;
- else
- idx = 1;
- for (; idx < ARRAY_SIZE(bus_widths); idx++) {
- bus_width = bus_widths[idx];
- if (bus_width == MMC_BUS_WIDTH_1)
- ddr = 0; /* no DDR for 1-bit width */
- err = mmc_select_powerclass(card, ext_csd_bits[idx][0]);
- if (err)
- pr_warning("%s: power class selection to "
- "bus width %d failed\n",
- mmc_hostname(card->host),
- 1 << bus_width);
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BUS_WIDTH,
- ext_csd_bits[idx][0],
- card->ext_csd.generic_cmd6_time);
- if (!err) {
- mmc_set_bus_width(card->host, bus_width);
-
- /*
- * If controller can't handle bus width test,
- * compare ext_csd previously read in 1 bit mode
- * against ext_csd at new bus width
- */
- if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
- err = mmc_compare_ext_csds(card,
- bus_width);
- else
- err = mmc_bus_test(card, bus_width);
- if (!err)
- break;
- }
- }
-
- if (!err && ddr) {
- err = mmc_select_powerclass(card, ext_csd_bits[idx][1]);
- if (err)
- pr_warning("%s: power class selection to "
- "bus width %d ddr %d failed\n",
- mmc_hostname(card->host),
- 1 << bus_width, ddr);
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BUS_WIDTH,
- ext_csd_bits[idx][1],
- card->ext_csd.generic_cmd6_time);
- }
- if (err) {
- pr_warning("%s: switch to bus width %d ddr %d "
- "failed\n", mmc_hostname(card->host),
- 1 << bus_width, ddr);
- goto free_card;
- } else if (ddr) {
- /*
- * eMMC cards can support 3.3V to 1.2V i/o (vccq)
- * signaling.
- *
- * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
- *
- * 1.8V vccq at 3.3V core voltage (vcc) is not required
- * in the JEDEC spec for DDR.
- *
- * Do not force change in vccq since we are obviously
- * working and no change to vccq is needed.
- *
- * WARNING: eMMC rules are NOT the same as SD DDR
- */
- if (ddr == MMC_1_2V_DDR_MODE) {
- err = __mmc_set_signal_voltage(host,
- MMC_SIGNAL_VOLTAGE_120);
- if (err)
- goto err;
- }
- mmc_card_set_ddr_mode(card);
- mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
- mmc_set_bus_width(card->host, bus_width);
- }
- }
+ mmc_select_powerclass(card);
/*
* Enable HPI feature (if supported)
@@ -1507,7 +1637,6 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
err = mmc_sleep(host);
else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
- host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
if (!err) {
mmc_power_off(host);
@@ -1637,7 +1766,6 @@ static int mmc_power_restore(struct mmc_host *host)
{
int ret;
- host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
mmc_claim_host(host);
ret = mmc_init_card(host, host->card->ocr, host->card);
mmc_release_host(host);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 2dd359d2242f..0c44510bf717 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -707,18 +707,10 @@ static struct attribute *sd_std_attrs[] = {
&dev_attr_serial.attr,
NULL,
};
-
-static struct attribute_group sd_std_attr_group = {
- .attrs = sd_std_attrs,
-};
-
-static const struct attribute_group *sd_attr_groups[] = {
- &sd_std_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(sd_std);
struct device_type sd_type = {
- .groups = sd_attr_groups,
+ .groups = sd_std_groups,
};
/*
@@ -895,7 +887,7 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)
{
unsigned max_dtr = (unsigned int)-1;
- if (mmc_card_highspeed(card)) {
+ if (mmc_card_hs(card)) {
if (max_dtr > card->sw_caps.hs_max_dtr)
max_dtr = card->sw_caps.hs_max_dtr;
} else if (max_dtr > card->csd.max_dtr) {
@@ -905,12 +897,6 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)
return max_dtr;
}
-void mmc_sd_go_highspeed(struct mmc_card *card)
-{
- mmc_card_set_highspeed(card);
- mmc_set_timing(card->host, MMC_TIMING_SD_HS);
-}
-
/*
* Handle the detection and initialisation of a card.
*
@@ -985,16 +971,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
err = mmc_sd_init_uhs_card(card);
if (err)
goto free_card;
-
- /* Card is an ultra-high-speed card */
- mmc_card_set_uhs(card);
} else {
/*
* Attempt to change to high-speed (if supported)
*/
err = mmc_sd_switch_hs(card);
if (err > 0)
- mmc_sd_go_highspeed(card);
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
else if (err)
goto free_card;
@@ -1089,7 +1072,7 @@ static int _mmc_sd_suspend(struct mmc_host *host)
if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
- host->card->state &= ~MMC_STATE_HIGHSPEED;
+
if (!err) {
mmc_power_off(host);
mmc_card_set_suspended(host->card);
@@ -1198,7 +1181,6 @@ static int mmc_sd_power_restore(struct mmc_host *host)
{
int ret;
- host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_claim_host(host);
ret = mmc_sd_init_card(host, host->card->ocr, host->card);
mmc_release_host(host);
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 4b34b24f3f76..aab824a9a7f3 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -12,6 +12,5 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit);
unsigned mmc_sd_get_max_clock(struct mmc_card *card);
int mmc_sd_switch_hs(struct mmc_card *card);
-void mmc_sd_go_highspeed(struct mmc_card *card);
#endif
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4d721c6e2af0..e636d9e99e4a 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -363,7 +363,7 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
{
unsigned max_dtr;
- if (mmc_card_highspeed(card)) {
+ if (mmc_card_hs(card)) {
/*
* The SDIO specification doesn't mention how
* the CIS transfer speed register relates to
@@ -733,7 +733,6 @@ try_again:
mmc_set_clock(host, card->cis.max_dtr);
if (card->cccr.high_speed) {
- mmc_card_set_highspeed(card);
mmc_set_timing(card->host, MMC_TIMING_SD_HS);
}
@@ -792,16 +791,13 @@ try_again:
err = mmc_sdio_init_uhs_card(card);
if (err)
goto remove;
-
- /* Card is an ultra-high-speed card */
- mmc_card_set_uhs(card);
} else {
/*
* Switch to high-speed (if supported).
*/
err = sdio_enable_hs(card);
if (err > 0)
- mmc_sd_go_highspeed(card);
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
else if (err)
goto remove;
@@ -943,40 +939,21 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
*/
static int mmc_sdio_suspend(struct mmc_host *host)
{
- int i, err = 0;
-
- for (i = 0; i < host->card->sdio_funcs; i++) {
- struct sdio_func *func = host->card->sdio_func[i];
- if (func && sdio_func_present(func) && func->dev.driver) {
- const struct dev_pm_ops *pmops = func->dev.driver->pm;
- err = pmops->suspend(&func->dev);
- if (err)
- break;
- }
- }
- while (err && --i >= 0) {
- struct sdio_func *func = host->card->sdio_func[i];
- if (func && sdio_func_present(func) && func->dev.driver) {
- const struct dev_pm_ops *pmops = func->dev.driver->pm;
- pmops->resume(&func->dev);
- }
- }
-
- if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
+ if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
mmc_claim_host(host);
sdio_disable_wide(host->card);
mmc_release_host(host);
}
- if (!err && !mmc_card_keep_power(host))
+ if (!mmc_card_keep_power(host))
mmc_power_off(host);
- return err;
+ return 0;
}
static int mmc_sdio_resume(struct mmc_host *host)
{
- int i, err = 0;
+ int err = 0;
BUG_ON(!host);
BUG_ON(!host->card);
@@ -1019,24 +996,6 @@ static int mmc_sdio_resume(struct mmc_host *host)
wake_up_process(host->sdio_irq_thread);
mmc_release_host(host);
- /*
- * If the card looked to be the same as before suspending, then
- * we proceed to resume all card functions. If one of them returns
- * an error then we simply return that error to the core and the
- * card will be redetected as new. It is the responsibility of
- * the function driver to perform further tests with the extra
- * knowledge it has of the card to confirm the card is indeed the
- * same as before suspending (same MAC address for network cards,
- * etc.) and return an error otherwise.
- */
- for (i = 0; !err && i < host->card->sdio_funcs; i++) {
- struct sdio_func *func = host->card->sdio_func[i];
- if (func && sdio_func_present(func) && func->dev.driver) {
- const struct dev_pm_ops *pmops = func->dev.driver->pm;
- err = pmops->resume(&func->dev);
- }
- }
-
host->pm_flags &= ~MMC_PM_KEEP_POWER;
return err;
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 92d1ba8e8153..4fa8fef9147f 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -197,20 +197,8 @@ static int sdio_bus_remove(struct device *dev)
#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
-static int pm_no_operation(struct device *dev)
-{
- /*
- * Prevent the PM core from calling SDIO device drivers' suspend
- * callback routines, which it is not supposed to do, by using this
- * empty function as the bus type suspend callaback for SDIO.
- */
- return 0;
-}
-#endif
-
static const struct dev_pm_ops sdio_bus_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index aaa90460ed23..5cc13c8d35bb 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -90,6 +90,15 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
return ret;
}
+void sdio_run_irqs(struct mmc_host *host)
+{
+ mmc_claim_host(host);
+ host->sdio_irq_pending = true;
+ process_sdio_pending_irqs(host);
+ mmc_release_host(host);
+}
+EXPORT_SYMBOL_GPL(sdio_run_irqs);
+
static int sdio_irq_thread(void *_host)
{
struct mmc_host *host = _host;
@@ -189,14 +198,20 @@ static int sdio_card_irq_get(struct mmc_card *card)
WARN_ON(!host->claimed);
if (!host->sdio_irqs++) {
- atomic_set(&host->sdio_irq_thread_abort, 0);
- host->sdio_irq_thread =
- kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
- mmc_hostname(host));
- if (IS_ERR(host->sdio_irq_thread)) {
- int err = PTR_ERR(host->sdio_irq_thread);
- host->sdio_irqs--;
- return err;
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ atomic_set(&host->sdio_irq_thread_abort, 0);
+ host->sdio_irq_thread =
+ kthread_run(sdio_irq_thread, host,
+ "ksdioirqd/%s", mmc_hostname(host));
+ if (IS_ERR(host->sdio_irq_thread)) {
+ int err = PTR_ERR(host->sdio_irq_thread);
+ host->sdio_irqs--;
+ return err;
+ }
+ } else {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
}
}
@@ -211,8 +226,14 @@ static int sdio_card_irq_put(struct mmc_card *card)
BUG_ON(host->sdio_irqs < 1);
if (!--host->sdio_irqs) {
- atomic_set(&host->sdio_irq_thread_abort, 1);
- kthread_stop(host->sdio_irq_thread);
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ atomic_set(&host->sdio_irq_thread_abort, 1);
+ kthread_stop(host->sdio_irq_thread);
+ } else {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 0);
+ mmc_host_clk_release(host);
+ }
}
return 0;
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index f7650b899e3d..5f89cb83d5f0 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -32,9 +32,7 @@ static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
/* Schedule a card detection after a debounce timeout */
struct mmc_host *host = dev_id;
- if (host->ops->card_event)
- host->ops->card_event(host);
-
+ host->trigger_card_event = true;
mmc_detect_change(host, msecs_to_jiffies(200));
return IRQ_HANDLED;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 779368b683d0..a5652548230a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -168,7 +168,7 @@ config MMC_SDHCI_ESDHC_IMX
config MMC_SDHCI_DOVE
tristate "SDHCI support on Marvell's Dove SoC"
- depends on ARCH_DOVE
+ depends on ARCH_DOVE || MACH_DOVE
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
@@ -216,8 +216,7 @@ config MMC_SDHCI_SIRF
config MMC_SDHCI_PXAV3
tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
depends on CLKDEV_LOOKUP
- select MMC_SDHCI
- select MMC_SDHCI_PLTFM
+ depends on MMC_SDHCI_PLTFM
default CPU_MMP2
help
This selects the Marvell(R) PXAV3 SD Host Controller.
@@ -229,8 +228,7 @@ config MMC_SDHCI_PXAV3
config MMC_SDHCI_PXAV2
tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
depends on CLKDEV_LOOKUP
- select MMC_SDHCI
- select MMC_SDHCI_PLTFM
+ depends on MMC_SDHCI_PLTFM
default CPU_PXA910
help
This selects the Marvell(R) PXAV2 SD Host Controller.
@@ -264,7 +262,7 @@ config MMC_SDHCI_S3C_DMA
config MMC_SDHCI_BCM_KONA
tristate "SDHCI support on Broadcom KONA platform"
depends on ARCH_BCM_MOBILE
- select MMC_SDHCI_PLTFM
+ depends on MMC_SDHCI_PLTFM
help
This selects the Broadcom Kona Secure Digital Host Controller
Interface(SDHCI) support.
@@ -283,10 +281,19 @@ config MMC_SDHCI_BCM2835
If unsure, say N.
+config MMC_MOXART
+ tristate "MOXART SD/MMC Host Controller support"
+ depends on ARCH_MOXART && MMC
+ help
+ This selects support for the MOXART SD/MMC Host Controller.
+ MOXA provides one multi-functional card reader which can
+ be found on some embedded hardware such as UC-7112-LX.
+ If you have a controller with this interface, say Y here.
+
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP
- select TPS65010 if MACH_OMAP_H2
+ depends on TPS65010 || !MACH_OMAP_H2
help
This selects the TI OMAP Multimedia card Interface.
If you have an OMAP board with a Multimedia Card slot,
@@ -688,6 +695,12 @@ config MMC_WMT
To compile this driver as a module, choose M here: the
module will be called wmt-sdmmc.
+config MMC_USDHI6ROL0
+ tristate "Renesas USDHI6ROL0 SD/SDIO Host Controller support"
+ help
+ This selects support for the Renesas USDHI6ROL0 SD/SDIO
+ Host Controller
+
config MMC_REALTEK_PCI
tristate "Realtek PCI-E SD/MMC Card Interface Driver"
depends on MFD_RTSX_PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 61cbc241935b..7f81ddf1dd2c 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -50,7 +50,9 @@ obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
+obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
+obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 42706ea0ba85..bb585d940901 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -37,6 +37,7 @@
#include <linux/atmel-mci.h>
#include <linux/atmel_pdc.h>
+#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/unaligned.h>
@@ -820,16 +821,9 @@ static void atmci_pdc_complete(struct atmel_mci *host)
atmci_pdc_cleanup(host);
- /*
- * If the card was removed, data will be NULL. No point trying
- * to send the stop command or waiting for NBUSY in this case.
- */
- if (host->data) {
- dev_dbg(&host->pdev->dev,
- "(%s) set pending xfer complete\n", __func__);
- atmci_set_pending(host, EVENT_XFER_COMPLETE);
- tasklet_schedule(&host->tasklet);
- }
+ dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
+ atmci_set_pending(host, EVENT_XFER_COMPLETE);
+ tasklet_schedule(&host->tasklet);
}
static void atmci_dma_cleanup(struct atmel_mci *host)
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 3423c5ed50c7..0fbc53ac7eae 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -187,7 +187,7 @@ static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
unsigned long actual;
u8 div = priv->ciu_div + 1;
- if (ios->timing == MMC_TIMING_UHS_DDR50) {
+ if (ios->timing == MMC_TIMING_MMC_DDR52) {
mci_writel(host, CLKSEL, priv->ddr_timing);
/* Should be double rate for DDR mode */
if (ios->bus_width == MMC_BUS_WIDTH_8)
@@ -386,8 +386,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode,
/* Common capabilities of Exynos4/Exynos5 SoC */
static unsigned long exynos_dwmmc_caps[4] = {
- MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR |
- MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
+ MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
MMC_CAP_CMD23,
MMC_CAP_CMD23,
MMC_CAP_CMD23,
@@ -426,7 +425,7 @@ static int dw_mci_exynos_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
-const struct dev_pm_ops dw_mci_exynos_pmops = {
+static const struct dev_pm_ops dw_mci_exynos_pmops = {
SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume)
.resume_noirq = dw_mci_exynos_resume_noirq,
.thaw_noirq = dw_mci_exynos_resume_noirq,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index cced599d5aeb..1ac227c603b7 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -235,12 +235,6 @@ err:
}
#endif /* defined(CONFIG_DEBUG_FS) */
-static void dw_mci_set_timeout(struct dw_mci *host)
-{
- /* timeout (maximum) */
- mci_writel(host, TMOUT, 0xffffffff);
-}
-
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct mmc_data *data;
@@ -257,9 +251,8 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
(cmd->opcode == SD_IO_RW_DIRECT &&
((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
cmdr |= SDMMC_CMD_STOP;
- else
- if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
- cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+ else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
+ cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
if (cmd->flags & MMC_RSP_PRESENT) {
/* We expect a response, so set this bit */
@@ -850,8 +843,6 @@ static void __dw_mci_start_request(struct dw_mci *host,
u32 cmdflags;
mrq = slot->mrq;
- if (host->pdata->select_slot)
- host->pdata->select_slot(slot->id);
host->cur_slot = slot;
host->mrq = mrq;
@@ -864,7 +855,7 @@ static void __dw_mci_start_request(struct dw_mci *host,
data = cmd->data;
if (data) {
- dw_mci_set_timeout(host);
+ mci_writel(host, TMOUT, 0xFFFFFFFF);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
@@ -962,7 +953,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
regs = mci_readl(slot->host, UHS_REG);
/* DDR mode set */
- if (ios->timing == MMC_TIMING_UHS_DDR50)
+ if (ios->timing == MMC_TIMING_MMC_DDR52)
regs |= ((0x1 << slot->id) << 16);
else
regs &= ~((0x1 << slot->id) << 16);
@@ -985,17 +976,11 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_UP:
set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
- /* Power up slot */
- if (slot->host->pdata->setpower)
- slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
regs = mci_readl(slot->host, PWREN);
regs |= (1 << slot->id);
mci_writel(slot->host, PWREN, regs);
break;
case MMC_POWER_OFF:
- /* Power down slot */
- if (slot->host->pdata->setpower)
- slot->host->pdata->setpower(slot->id, 0);
regs = mci_readl(slot->host, PWREN);
regs &= ~(1 << slot->id);
mci_writel(slot->host, PWREN, regs);
@@ -1009,15 +994,13 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
{
int read_only;
struct dw_mci_slot *slot = mmc_priv(mmc);
- struct dw_mci_board *brd = slot->host->pdata;
+ int gpio_ro = mmc_gpio_get_ro(mmc);
/* Use platform get_ro function, else try on board write protect */
if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
read_only = 0;
- else if (brd->get_ro)
- read_only = brd->get_ro(slot->id);
- else if (gpio_is_valid(slot->wp_gpio))
- read_only = gpio_get_value(slot->wp_gpio);
+ else if (!IS_ERR_VALUE(gpio_ro))
+ read_only = gpio_ro;
else
read_only =
mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
@@ -1039,8 +1022,6 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
/* Use platform get_cd function, else try onboard card detect */
if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
present = 1;
- else if (brd->get_cd)
- present = !brd->get_cd(slot->id);
else if (!IS_ERR_VALUE(gpio_cd))
present = gpio_cd;
else
@@ -1248,7 +1229,7 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
data->error = -EIO;
}
- dev_err(host->dev, "data error, status 0x%08x\n", status);
+ dev_dbg(host->dev, "data error, status 0x%08x\n", status);
/*
* After an error, there may be data lingering
@@ -2045,86 +2026,15 @@ static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
return quirks;
}
-
-/* find out bus-width for a given slot */
-static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
-{
- struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
- u32 bus_wd = 1;
-
- if (!np)
- return 1;
-
- if (of_property_read_u32(np, "bus-width", &bus_wd))
- dev_err(dev, "bus-width property not found, assuming width"
- " as 1\n");
- return bus_wd;
-}
-
-/* find the write protect gpio for a given slot; or -1 if none specified */
-static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
-{
- struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
- int gpio;
-
- if (!np)
- return -EINVAL;
-
- gpio = of_get_named_gpio(np, "wp-gpios", 0);
-
- /* Having a missing entry is valid; return silently */
- if (!gpio_is_valid(gpio))
- return -EINVAL;
-
- if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
- dev_warn(dev, "gpio [%d] request failed\n", gpio);
- return -EINVAL;
- }
-
- return gpio;
-}
-
-/* find the cd gpio for a given slot */
-static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
- struct mmc_host *mmc)
-{
- struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
- int gpio;
-
- if (!np)
- return;
-
- gpio = of_get_named_gpio(np, "cd-gpios", 0);
-
- /* Having a missing entry is valid; return silently */
- if (!gpio_is_valid(gpio))
- return;
-
- if (mmc_gpio_request_cd(mmc, gpio, 0))
- dev_warn(dev, "gpio [%d] request failed\n", gpio);
-}
#else /* CONFIG_OF */
static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
{
return 0;
}
-static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
-{
- return 1;
-}
static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
{
return NULL;
}
-static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
-{
- return -EINVAL;
-}
-static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
- struct mmc_host *mmc)
-{
- return;
-}
#endif /* CONFIG_OF */
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
@@ -2134,7 +2044,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
const struct dw_mci_drv_data *drv_data = host->drv_data;
int ctrl_id, ret;
u32 freq[2];
- u8 bus_width;
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
if (!mmc)
@@ -2158,17 +2067,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->f_max = freq[1];
}
- if (host->pdata->get_ocr)
- mmc->ocr_avail = host->pdata->get_ocr(id);
- else
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
-
- /*
- * Start with slot power disabled, it will be enabled when a card
- * is detected.
- */
- if (host->pdata->setpower)
- host->pdata->setpower(id, 0);
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
@@ -2189,19 +2088,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
- if (host->pdata->get_bus_wd)
- bus_width = host->pdata->get_bus_wd(slot->id);
- else if (host->dev->of_node)
- bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
- else
- bus_width = 1;
-
- switch (bus_width) {
- case 8:
- mmc->caps |= MMC_CAP_8_BIT_DATA;
- case 4:
- mmc->caps |= MMC_CAP_4_BIT_DATA;
- }
+ mmc_of_parse(mmc);
if (host->pdata->blk_settings) {
mmc->max_segs = host->pdata->blk_settings->max_segs;
@@ -2226,8 +2113,10 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
#endif /* CONFIG_MMC_DW_IDMAC */
}
- slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
- dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
+ if (dw_mci_get_cd(mmc))
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ else
+ clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
ret = mmc_add_host(mmc);
if (ret)
@@ -2249,10 +2138,6 @@ err_setup_bus:
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
{
- /* Shutdown detect IRQ */
- if (slot->host->pdata->exit)
- slot->host->pdata->exit(id);
-
/* Debugfs stuff is cleaned up by mmc core */
mmc_remove_host(slot->mmc);
slot->host->slot[id] = NULL;
@@ -2399,24 +2284,9 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
return ERR_PTR(ret);
}
- if (of_find_property(np, "keep-power-in-suspend", NULL))
- pdata->pm_caps |= MMC_PM_KEEP_POWER;
-
- if (of_find_property(np, "enable-sdio-wakeup", NULL))
- pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
-
if (of_find_property(np, "supports-highspeed", NULL))
pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
- if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
- pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
-
- if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
- pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
-
- if (of_get_property(np, "cd-inverted", NULL))
- pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
-
return pdata;
}
@@ -2442,9 +2312,9 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
+ if (host->pdata->num_slots > 1) {
dev_err(host->dev,
- "Platform data must supply select_slot function\n");
+ "Platform data must supply num_slots.\n");
return -ENODEV;
}
@@ -2474,12 +2344,19 @@ int dw_mci_probe(struct dw_mci *host)
ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
if (ret)
dev_warn(host->dev,
- "Unable to set bus rate to %ul\n",
+ "Unable to set bus rate to %uHz\n",
host->pdata->bus_hz);
}
host->bus_hz = clk_get_rate(host->ciu_clk);
}
+ if (!host->bus_hz) {
+ dev_err(host->dev,
+ "Platform data must supply bus speed\n");
+ ret = -ENODEV;
+ goto err_clk_ciu;
+ }
+
if (drv_data && drv_data->init) {
ret = drv_data->init(host);
if (ret) {
@@ -2516,13 +2393,6 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- if (!host->bus_hz) {
- dev_err(host->dev,
- "Platform data must supply bus speed\n");
- ret = -ENODEV;
- goto err_regulator;
- }
-
host->quirks = host->pdata->quirks;
spin_lock_init(&host->lock);
@@ -2666,8 +2536,6 @@ err_workqueue:
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
-
-err_regulator:
if (host->vmmc)
regulator_disable(host->vmmc);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 68349779c396..738fa241d058 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -195,7 +195,6 @@ extern int dw_mci_resume(struct dw_mci *host);
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
* @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
- * @wp_gpio: If gpio_is_valid() we'll use this to read write protect.
* @ctype: Card type for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
@@ -214,7 +213,6 @@ struct dw_mci_slot {
struct dw_mci *host;
int quirks;
- int wp_gpio;
u32 ctype;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index de2139cf3444..537d6c7a5ae4 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -515,10 +515,13 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
jz4740_mmc_send_command(host, req->stop);
- timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_PRG_DONE);
- if (timeout) {
- host->state = JZ4740_MMC_STATE_DONE;
- break;
+ if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
+ timeout = jz4740_mmc_poll_irq(host,
+ JZ_MMC_IRQ_PRG_DONE);
+ if (timeout) {
+ host->state = JZ4740_MMC_STATE_DONE;
+ break;
+ }
}
case JZ4740_MMC_STATE_DONE:
break;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 0a87e5691341..cc8d4a6099cd 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -448,7 +448,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,
{
struct scratch *data = host->data;
u8 *cp = data->status;
- u32 arg = cmd->arg;
int status;
struct spi_transfer *t;
@@ -465,14 +464,12 @@ mmc_spi_command_send(struct mmc_spi_host *host,
* We init the whole buffer to all-ones, which is what we need
* to write while we're reading (later) response data.
*/
- memset(cp++, 0xff, sizeof(data->status));
+ memset(cp, 0xff, sizeof(data->status));
- *cp++ = 0x40 | cmd->opcode;
- *cp++ = (u8)(arg >> 24);
- *cp++ = (u8)(arg >> 16);
- *cp++ = (u8)(arg >> 8);
- *cp++ = (u8)arg;
- *cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01;
+ cp[1] = 0x40 | cmd->opcode;
+ put_unaligned_be32(cmd->arg, cp+2);
+ cp[6] = crc7_be(0, cp+1, 5) | 0x01;
+ cp += 7;
/* Then, read up to 13 bytes (while writing all-ones):
* - N(CR) (== 1..8) bytes of all-ones
@@ -711,10 +708,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
* so we have to cope with this situation and check the response
* bit-by-bit. Arggh!!!
*/
- pattern = scratch->status[0] << 24;
- pattern |= scratch->status[1] << 16;
- pattern |= scratch->status[2] << 8;
- pattern |= scratch->status[3];
+ pattern = get_unaligned_be32(scratch->status);
/* First 3 bit of pattern are undefined */
pattern |= 0xE0000000;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index a084edd37af5..7ad463e9741c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -301,7 +301,8 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
clk |= MCI_ST_8BIT_BUS;
- if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
clk |= MCI_ST_UX500_NEG_EDGE;
mmci_write_clkreg(host, clk);
@@ -764,7 +765,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
mmci_write_clkreg(host, clk);
}
- if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
datactrl |= MCI_ST_DPSM_DDRMODE;
/*
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
new file mode 100644
index 000000000000..74924a04026e
--- /dev/null
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -0,0 +1,730 @@
+/*
+ * MOXA ART MMC host driver.
+ *
+ * Copyright (C) 2014 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technologies Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sd.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/clk.h>
+#include <linux/bitops.h>
+#include <linux/of_dma.h>
+#include <linux/spinlock.h>
+
+#define REG_COMMAND 0
+#define REG_ARGUMENT 4
+#define REG_RESPONSE0 8
+#define REG_RESPONSE1 12
+#define REG_RESPONSE2 16
+#define REG_RESPONSE3 20
+#define REG_RESPONSE_COMMAND 24
+#define REG_DATA_CONTROL 28
+#define REG_DATA_TIMER 32
+#define REG_DATA_LENGTH 36
+#define REG_STATUS 40
+#define REG_CLEAR 44
+#define REG_INTERRUPT_MASK 48
+#define REG_POWER_CONTROL 52
+#define REG_CLOCK_CONTROL 56
+#define REG_BUS_WIDTH 60
+#define REG_DATA_WINDOW 64
+#define REG_FEATURE 68
+#define REG_REVISION 72
+
+/* REG_COMMAND */
+#define CMD_SDC_RESET BIT(10)
+#define CMD_EN BIT(9)
+#define CMD_APP_CMD BIT(8)
+#define CMD_LONG_RSP BIT(7)
+#define CMD_NEED_RSP BIT(6)
+#define CMD_IDX_MASK 0x3f
+
+/* REG_RESPONSE_COMMAND */
+#define RSP_CMD_APP BIT(6)
+#define RSP_CMD_IDX_MASK 0x3f
+
+/* REG_DATA_CONTROL */
+#define DCR_DATA_FIFO_RESET BIT(8)
+#define DCR_DATA_THRES BIT(7)
+#define DCR_DATA_EN BIT(6)
+#define DCR_DMA_EN BIT(5)
+#define DCR_DATA_WRITE BIT(4)
+#define DCR_BLK_SIZE 0x0f
+
+/* REG_DATA_LENGTH */
+#define DATA_LEN_MASK 0xffffff
+
+/* REG_STATUS */
+#define WRITE_PROT BIT(12)
+#define CARD_DETECT BIT(11)
+/* 1-10 below can be sent to either registers, interrupt or clear. */
+#define CARD_CHANGE BIT(10)
+#define FIFO_ORUN BIT(9)
+#define FIFO_URUN BIT(8)
+#define DATA_END BIT(7)
+#define CMD_SENT BIT(6)
+#define DATA_CRC_OK BIT(5)
+#define RSP_CRC_OK BIT(4)
+#define DATA_TIMEOUT BIT(3)
+#define RSP_TIMEOUT BIT(2)
+#define DATA_CRC_FAIL BIT(1)
+#define RSP_CRC_FAIL BIT(0)
+
+#define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \
+ RSP_CRC_OK | CARD_DETECT | CMD_SENT)
+
+#define MASK_DATA (DATA_CRC_OK | DATA_END | \
+ DATA_CRC_FAIL | DATA_TIMEOUT)
+
+#define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE)
+
+/* REG_POWER_CONTROL */
+#define SD_POWER_ON BIT(4)
+#define SD_POWER_MASK 0x0f
+
+/* REG_CLOCK_CONTROL */
+#define CLK_HISPD BIT(9)
+#define CLK_OFF BIT(8)
+#define CLK_SD BIT(7)
+#define CLK_DIV_MASK 0x7f
+
+/* REG_BUS_WIDTH */
+#define BUS_WIDTH_8 BIT(2)
+#define BUS_WIDTH_4 BIT(1)
+#define BUS_WIDTH_1 BIT(0)
+
+#define MMC_VDD_360 23
+#define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK)
+#define MAX_RETRIES 500000
+
+struct moxart_host {
+ spinlock_t lock;
+
+ void __iomem *base;
+
+ phys_addr_t reg_phys;
+
+ struct dma_chan *dma_chan_tx;
+ struct dma_chan *dma_chan_rx;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct scatterlist *cur_sg;
+ struct completion dma_complete;
+ struct completion pio_complete;
+
+ u32 num_sg;
+ u32 data_remain;
+ u32 data_len;
+ u32 fifo_width;
+ u32 timeout;
+ u32 rate;
+
+ long sysclk;
+
+ bool have_dma;
+ bool is_removed;
+};
+
+static inline void moxart_init_sg(struct moxart_host *host,
+ struct mmc_data *data)
+{
+ host->cur_sg = data->sg;
+ host->num_sg = data->sg_len;
+ host->data_remain = host->cur_sg->length;
+
+ if (host->data_remain > host->data_len)
+ host->data_remain = host->data_len;
+}
+
+static inline int moxart_next_sg(struct moxart_host *host)
+{
+ int remain;
+ struct mmc_data *data = host->mrq->cmd->data;
+
+ host->cur_sg++;
+ host->num_sg--;
+
+ if (host->num_sg > 0) {
+ host->data_remain = host->cur_sg->length;
+ remain = host->data_len - data->bytes_xfered;
+ if (remain > 0 && remain < host->data_remain)
+ host->data_remain = remain;
+ }
+
+ return host->num_sg;
+}
+
+static int moxart_wait_for_status(struct moxart_host *host,
+ u32 mask, u32 *status)
+{
+ int ret = -ETIMEDOUT;
+ u32 i;
+
+ for (i = 0; i < MAX_RETRIES; i++) {
+ *status = readl(host->base + REG_STATUS);
+ if (!(*status & mask)) {
+ udelay(5);
+ continue;
+ }
+ writel(*status & mask, host->base + REG_CLEAR);
+ ret = 0;
+ break;
+ }
+
+ if (ret)
+ dev_err(mmc_dev(host->mmc), "timed out waiting for status\n");
+
+ return ret;
+}
+
+
+static void moxart_send_command(struct moxart_host *host,
+ struct mmc_command *cmd)
+{
+ u32 status, cmdctrl;
+
+ writel(RSP_TIMEOUT | RSP_CRC_OK |
+ RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR);
+ writel(cmd->arg, host->base + REG_ARGUMENT);
+
+ cmdctrl = cmd->opcode & CMD_IDX_MASK;
+ if (cmdctrl == SD_APP_SET_BUS_WIDTH || cmdctrl == SD_APP_OP_COND ||
+ cmdctrl == SD_APP_SEND_SCR || cmdctrl == SD_APP_SD_STATUS ||
+ cmdctrl == SD_APP_SEND_NUM_WR_BLKS)
+ cmdctrl |= CMD_APP_CMD;
+
+ if (cmd->flags & MMC_RSP_PRESENT)
+ cmdctrl |= CMD_NEED_RSP;
+
+ if (cmd->flags & MMC_RSP_136)
+ cmdctrl |= CMD_LONG_RSP;
+
+ writel(cmdctrl | CMD_EN, host->base + REG_COMMAND);
+
+ if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT)
+ cmd->error = -ETIMEDOUT;
+
+ if (status & RSP_TIMEOUT) {
+ cmd->error = -ETIMEDOUT;
+ return;
+ }
+ if (status & RSP_CRC_FAIL) {
+ cmd->error = -EIO;
+ return;
+ }
+ if (status & RSP_CRC_OK) {
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[3] = readl(host->base + REG_RESPONSE0);
+ cmd->resp[2] = readl(host->base + REG_RESPONSE1);
+ cmd->resp[1] = readl(host->base + REG_RESPONSE2);
+ cmd->resp[0] = readl(host->base + REG_RESPONSE3);
+ } else {
+ cmd->resp[0] = readl(host->base + REG_RESPONSE0);
+ }
+ }
+}
+
+static void moxart_dma_complete(void *param)
+{
+ struct moxart_host *host = param;
+
+ complete(&host->dma_complete);
+}
+
+static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
+{
+ u32 len, dir_data, dir_slave;
+ unsigned long dma_time;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *dma_chan;
+
+ if (host->data_len == data->bytes_xfered)
+ return;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_chan = host->dma_chan_tx;
+ dir_data = DMA_TO_DEVICE;
+ dir_slave = DMA_MEM_TO_DEV;
+ } else {
+ dma_chan = host->dma_chan_rx;
+ dir_data = DMA_FROM_DEVICE;
+ dir_slave = DMA_DEV_TO_MEM;
+ }
+
+ len = dma_map_sg(dma_chan->device->dev, data->sg,
+ data->sg_len, dir_data);
+
+ if (len > 0) {
+ desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
+ len, dir_slave,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n");
+ }
+
+ if (desc) {
+ host->tx_desc = desc;
+ desc->callback = moxart_dma_complete;
+ desc->callback_param = host;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma_chan);
+ }
+
+ data->bytes_xfered += host->data_remain;
+
+ dma_time = wait_for_completion_interruptible_timeout(
+ &host->dma_complete, host->timeout);
+
+ dma_unmap_sg(dma_chan->device->dev,
+ data->sg, data->sg_len,
+ dir_data);
+}
+
+
+static void moxart_transfer_pio(struct moxart_host *host)
+{
+ struct mmc_data *data = host->mrq->cmd->data;
+ u32 *sgp, len = 0, remain, status;
+
+ if (host->data_len == data->bytes_xfered)
+ return;
+
+ sgp = sg_virt(host->cur_sg);
+ remain = host->data_remain;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ while (remain > 0) {
+ if (moxart_wait_for_status(host, FIFO_URUN, &status)
+ == -ETIMEDOUT) {
+ data->error = -ETIMEDOUT;
+ complete(&host->pio_complete);
+ return;
+ }
+ for (len = 0; len < remain && len < host->fifo_width;) {
+ iowrite32(*sgp, host->base + REG_DATA_WINDOW);
+ sgp++;
+ len += 4;
+ }
+ remain -= len;
+ }
+
+ } else {
+ while (remain > 0) {
+ if (moxart_wait_for_status(host, FIFO_ORUN, &status)
+ == -ETIMEDOUT) {
+ data->error = -ETIMEDOUT;
+ complete(&host->pio_complete);
+ return;
+ }
+ for (len = 0; len < remain && len < host->fifo_width;) {
+ /* SCR data must be read in big endian. */
+ if (data->mrq->cmd->opcode == SD_APP_SEND_SCR)
+ *sgp = ioread32be(host->base +
+ REG_DATA_WINDOW);
+ else
+ *sgp = ioread32(host->base +
+ REG_DATA_WINDOW);
+ sgp++;
+ len += 4;
+ }
+ remain -= len;
+ }
+ }
+
+ data->bytes_xfered += host->data_remain - remain;
+ host->data_remain = remain;
+
+ if (host->data_len != data->bytes_xfered)
+ moxart_next_sg(host);
+ else
+ complete(&host->pio_complete);
+}
+
+static void moxart_prepare_data(struct moxart_host *host)
+{
+ struct mmc_data *data = host->mrq->cmd->data;
+ u32 datactrl;
+ int blksz_bits;
+
+ if (!data)
+ return;
+
+ host->data_len = data->blocks * data->blksz;
+ blksz_bits = ffs(data->blksz) - 1;
+ BUG_ON(1 << blksz_bits != data->blksz);
+
+ moxart_init_sg(host, data);
+
+ datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE);
+
+ if (data->flags & MMC_DATA_WRITE)
+ datactrl |= DCR_DATA_WRITE;
+
+ if ((host->data_len > host->fifo_width) && host->have_dma)
+ datactrl |= DCR_DMA_EN;
+
+ writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
+ writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
+ writel(host->rate, host->base + REG_DATA_TIMER);
+ writel(host->data_len, host->base + REG_DATA_LENGTH);
+ writel(datactrl, host->base + REG_DATA_CONTROL);
+}
+
+static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct moxart_host *host = mmc_priv(mmc);
+ unsigned long pio_time, flags;
+ u32 status;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ init_completion(&host->dma_complete);
+ init_completion(&host->pio_complete);
+
+ host->mrq = mrq;
+
+ if (readl(host->base + REG_STATUS) & CARD_DETECT) {
+ mrq->cmd->error = -ETIMEDOUT;
+ goto request_done;
+ }
+
+ moxart_prepare_data(host);
+ moxart_send_command(host, host->mrq->cmd);
+
+ if (mrq->cmd->data) {
+ if ((host->data_len > host->fifo_width) && host->have_dma) {
+
+ writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ moxart_transfer_dma(mrq->cmd->data, host);
+
+ spin_lock_irqsave(&host->lock, flags);
+ } else {
+
+ writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* PIO transfers start from interrupt. */
+ pio_time = wait_for_completion_interruptible_timeout(
+ &host->pio_complete, host->timeout);
+
+ spin_lock_irqsave(&host->lock, flags);
+ }
+
+ if (host->is_removed) {
+ dev_err(mmc_dev(host->mmc), "card removed\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ goto request_done;
+ }
+
+ if (moxart_wait_for_status(host, MASK_DATA, &status)
+ == -ETIMEDOUT) {
+ mrq->cmd->data->error = -ETIMEDOUT;
+ goto request_done;
+ }
+
+ if (status & DATA_CRC_FAIL)
+ mrq->cmd->data->error = -ETIMEDOUT;
+
+ if (mrq->cmd->data->stop)
+ moxart_send_command(host, mrq->cmd->data->stop);
+ }
+
+request_done:
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_request_done(host->mmc, mrq);
+}
+
+static irqreturn_t moxart_irq(int irq, void *devid)
+{
+ struct moxart_host *host = (struct moxart_host *)devid;
+ u32 status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ status = readl(host->base + REG_STATUS);
+ if (status & CARD_CHANGE) {
+ host->is_removed = status & CARD_DETECT;
+ if (host->is_removed && host->have_dma) {
+ dmaengine_terminate_all(host->dma_chan_tx);
+ dmaengine_terminate_all(host->dma_chan_rx);
+ }
+ host->mrq = NULL;
+ writel(MASK_INTR_PIO, host->base + REG_CLEAR);
+ writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
+ mmc_detect_change(host->mmc, 0);
+ }
+ if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq)
+ moxart_transfer_pio(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct moxart_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u8 power, div;
+ u32 ctrl;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (ios->clock) {
+ for (div = 0; div < CLK_DIV_MASK; ++div) {
+ if (ios->clock >= host->sysclk / (2 * (div + 1)))
+ break;
+ }
+ ctrl = CLK_SD | div;
+ host->rate = host->sysclk / (2 * (div + 1));
+ if (host->rate > host->sysclk)
+ ctrl |= CLK_HISPD;
+ writel(ctrl, host->base + REG_CLOCK_CONTROL);
+ }
+
+ if (ios->power_mode == MMC_POWER_OFF) {
+ writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON,
+ host->base + REG_POWER_CONTROL);
+ } else {
+ if (ios->vdd < MIN_POWER)
+ power = 0;
+ else
+ power = ios->vdd - MIN_POWER;
+
+ writel(SD_POWER_ON | (u32) power,
+ host->base + REG_POWER_CONTROL);
+ }
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_4:
+ writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
+ break;
+ case MMC_BUS_WIDTH_8:
+ writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH);
+ break;
+ default:
+ writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
+ break;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+
+static int moxart_get_ro(struct mmc_host *mmc)
+{
+ struct moxart_host *host = mmc_priv(mmc);
+
+ return !!(readl(host->base + REG_STATUS) & WRITE_PROT);
+}
+
+static struct mmc_host_ops moxart_ops = {
+ .request = moxart_request,
+ .set_ios = moxart_set_ios,
+ .get_ro = moxart_get_ro,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource res_mmc;
+ struct mmc_host *mmc;
+ struct moxart_host *host = NULL;
+ struct dma_slave_config cfg;
+ struct clk *clk;
+ void __iomem *reg_mmc;
+ dma_cap_mask_t mask;
+ int irq, ret;
+ u32 i;
+
+ mmc = mmc_alloc_host(sizeof(struct moxart_host), dev);
+ if (!mmc) {
+ dev_err(dev, "mmc_alloc_host failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_address_to_resource(node, 0, &res_mmc);
+ if (ret) {
+ dev_err(dev, "of_address_to_resource failed\n");
+ goto out;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ dev_err(dev, "irq_of_parse_and_map failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "of_clk_get failed\n");
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+
+ reg_mmc = devm_ioremap_resource(dev, &res_mmc);
+ if (IS_ERR(reg_mmc)) {
+ ret = PTR_ERR(reg_mmc);
+ goto out;
+ }
+
+ mmc_of_parse(mmc);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->base = reg_mmc;
+ host->reg_phys = res_mmc.start;
+ host->timeout = msecs_to_jiffies(1000);
+ host->sysclk = clk_get_rate(clk);
+ host->fifo_width = readl(host->base + REG_FEATURE) << 2;
+ host->dma_chan_tx = of_dma_request_slave_channel(node, "tx");
+ host->dma_chan_rx = of_dma_request_slave_channel(node, "rx");
+
+ spin_lock_init(&host->lock);
+
+ mmc->ops = &moxart_ops;
+ mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2);
+ mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2);
+ mmc->ocr_avail = 0xffff00; /* Support 2.0v - 3.6v power. */
+
+ if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) {
+ dev_dbg(dev, "PIO mode transfer enabled\n");
+ host->have_dma = false;
+ } else {
+ dev_dbg(dev, "DMA channels found (%p,%p)\n",
+ host->dma_chan_tx, host->dma_chan_rx);
+ host->have_dma = true;
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.src_addr = 0;
+ cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW;
+ dmaengine_slave_config(host->dma_chan_tx, &cfg);
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.src_addr = host->reg_phys + REG_DATA_WINDOW;
+ cfg.dst_addr = 0;
+ dmaengine_slave_config(host->dma_chan_rx, &cfg);
+ }
+
+ switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) {
+ case 1:
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 2:
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ break;
+ default:
+ break;
+ }
+
+ writel(0, host->base + REG_INTERRUPT_MASK);
+
+ writel(CMD_SDC_RESET, host->base + REG_COMMAND);
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET))
+ break;
+ udelay(5);
+ }
+
+ ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host);
+ if (ret)
+ goto out;
+
+ dev_set_drvdata(dev, mmc);
+ mmc_add_host(mmc);
+
+ dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
+
+ return 0;
+
+out:
+ if (mmc)
+ mmc_free_host(mmc);
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pdev->dev);
+ struct moxart_host *host = mmc_priv(mmc);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (mmc) {
+ if (!IS_ERR(host->dma_chan_tx))
+ dma_release_channel(host->dma_chan_tx);
+ if (!IS_ERR(host->dma_chan_rx))
+ dma_release_channel(host->dma_chan_rx);
+ mmc_remove_host(mmc);
+ mmc_free_host(mmc);
+
+ writel(0, host->base + REG_INTERRUPT_MASK);
+ writel(0, host->base + REG_POWER_CONTROL);
+ writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
+ host->base + REG_CLOCK_CONTROL);
+ }
+
+ kfree(host);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_mmc_match[] = {
+ { .compatible = "moxa,moxart-mmc" },
+ { .compatible = "faraday,ftsdc010" },
+ { }
+};
+
+static struct platform_driver moxart_mmc_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "mmc-moxart",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_mmc_match,
+ },
+};
+module_platform_driver(moxart_mmc_driver);
+
+MODULE_ALIAS("platform:mmc-moxart");
+MODULE_DESCRIPTION("MOXA ART MMC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 45aa2206741d..6b4c5ad3b393 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -79,11 +79,11 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
unsigned long t = jiffies + HZ;
unsigned int hw_state, count = 0;
do {
+ hw_state = mvsd_read(MVSD_HW_STATE);
if (time_after(jiffies, t)) {
dev_warn(host->dev, "FIFO_EMPTY bit missing\n");
break;
}
- hw_state = mvsd_read(MVSD_HW_STATE);
count++;
} while (!(hw_state & (1 << 13)));
dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit "
@@ -354,6 +354,20 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
intr_status, mvsd_read(MVSD_NOR_INTR_EN),
mvsd_read(MVSD_HW_STATE));
+ /*
+ * It looks like, SDIO IP can issue one late, spurious irq
+ * although all irqs should be disabled. To work around this,
+ * bail out early, if we didn't expect any irqs to occur.
+ */
+ if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) {
+ dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n",
+ mvsd_read(MVSD_NOR_INTR_STATUS),
+ mvsd_read(MVSD_NOR_INTR_EN),
+ mvsd_read(MVSD_ERR_INTR_STATUS),
+ mvsd_read(MVSD_ERR_INTR_EN));
+ return IRQ_HANDLED;
+ }
+
spin_lock(&host->lock);
/* PIO handling, if needed. Messy business... */
@@ -801,10 +815,10 @@ static int mvsd_probe(struct platform_device *pdev)
goto out;
if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
- dev_notice(&pdev->dev, "using GPIO for card detection\n");
+ dev_dbg(&pdev->dev, "using GPIO for card detection\n");
else
- dev_notice(&pdev->dev,
- "lacking card detect (fall back to polling)\n");
+ dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n");
+
return 0;
out:
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index f7199c83f5cf..ed1cb93c3784 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -124,9 +124,8 @@ enum mxcmci_type {
struct mxcmci_host {
struct mmc_host *mmc;
- struct resource *res;
void __iomem *base;
- int irq;
+ dma_addr_t phys_base;
int detect_irq;
struct dma_chan *dma;
struct dma_async_tx_descriptor *desc;
@@ -154,8 +153,6 @@ struct mxcmci_host {
struct work_struct datawork;
spinlock_t lock;
- struct regulator *vcc;
-
int burstlen;
int dmareq;
struct dma_slave_config dma_slave_config;
@@ -241,37 +238,15 @@ static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
-static inline void mxcmci_init_ocr(struct mxcmci_host *host)
-{
- host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
-
- if (IS_ERR(host->vcc)) {
- host->vcc = NULL;
- } else {
- host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
- if (host->pdata && host->pdata->ocr_avail)
- dev_warn(mmc_dev(host->mmc),
- "pdata->ocr_avail will not be used\n");
- }
-
- if (host->vcc == NULL) {
- /* fall-back to platform data */
- if (host->pdata && host->pdata->ocr_avail)
- host->mmc->ocr_avail = host->pdata->ocr_avail;
- else
- host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
- }
-}
-
-static inline void mxcmci_set_power(struct mxcmci_host *host,
- unsigned char power_mode,
- unsigned int vdd)
+static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd)
{
- if (host->vcc) {
- if (power_mode == MMC_POWER_UP)
- mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
- else if (power_mode == MMC_POWER_OFF)
- mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ if (host->power_mode == MMC_POWER_UP)
+ mmc_regulator_set_ocr(host->mmc,
+ host->mmc->supply.vmmc, vdd);
+ else if (host->power_mode == MMC_POWER_OFF)
+ mmc_regulator_set_ocr(host->mmc,
+ host->mmc->supply.vmmc, 0);
}
if (host->pdata && host->pdata->setpower)
@@ -299,7 +274,6 @@ static void mxcmci_softreset(struct mxcmci_host *host)
mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
}
-static int mxcmci_setup_dma(struct mmc_host *mmc);
#if IS_ENABLED(CONFIG_PPC_MPC512x)
static inline void buffer_swap32(u32 *buf, int len)
@@ -868,8 +842,8 @@ static int mxcmci_setup_dma(struct mmc_host *mmc)
struct mxcmci_host *host = mmc_priv(mmc);
struct dma_slave_config *config = &host->dma_slave_config;
- config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
- config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
+ config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
+ config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
config->dst_addr_width = 4;
config->src_addr_width = 4;
config->dst_maxburst = host->burstlen;
@@ -911,8 +885,8 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
if (host->power_mode != ios->power_mode) {
- mxcmci_set_power(host, ios->power_mode, ios->vdd);
host->power_mode = ios->power_mode;
+ mxcmci_set_power(host, ios->vdd);
if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMD_DAT_CONT_INIT;
@@ -1040,8 +1014,8 @@ static const struct mmc_host_ops mxcmci_ops = {
static int mxcmci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
- struct mxcmci_host *host = NULL;
- struct resource *iores, *r;
+ struct mxcmci_host *host;
+ struct resource *res;
int ret = 0, irq;
bool dat3_card_detect = false;
dma_cap_mask_t mask;
@@ -1052,21 +1026,25 @@ static int mxcmci_probe(struct platform_device *pdev)
of_id = of_match_device(mxcmci_of_match, &pdev->dev);
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!iores || irq < 0)
+ if (irq < 0)
return -EINVAL;
- r = request_mem_region(iores->start, resource_size(iores), pdev->name);
- if (!r)
- return -EBUSY;
+ mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
- mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out_release_mem;
+ host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto out_free;
}
+ host->phys_base = res->start;
+
ret = mmc_of_parse(mmc);
if (ret)
goto out_free;
@@ -1084,13 +1062,6 @@ static int mxcmci_probe(struct platform_device *pdev)
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- host = mmc_priv(mmc);
- host->base = ioremap(r->start, resource_size(r));
- if (!host->base) {
- ret = -ENOMEM;
- goto out_free;
- }
-
if (of_id) {
const struct platform_device_id *id_entry = of_id->data;
host->devtype = id_entry->driver_data;
@@ -1112,7 +1083,14 @@ static int mxcmci_probe(struct platform_device *pdev)
&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
dat3_card_detect = true;
- mxcmci_init_ocr(host);
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret) {
+ if (pdata && ret != -EPROBE_DEFER)
+ mmc->ocr_avail = pdata->ocr_avail ? :
+ MMC_VDD_32_33 | MMC_VDD_33_34;
+ else
+ goto out_free;
+ }
if (dat3_card_detect)
host->default_irq_mask =
@@ -1120,19 +1098,16 @@ static int mxcmci_probe(struct platform_device *pdev)
else
host->default_irq_mask = 0;
- host->res = r;
- host->irq = irq;
-
host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(host->clk_ipg)) {
ret = PTR_ERR(host->clk_ipg);
- goto out_iounmap;
+ goto out_free;
}
host->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(host->clk_per)) {
ret = PTR_ERR(host->clk_per);
- goto out_iounmap;
+ goto out_free;
}
clk_prepare_enable(host->clk_per);
@@ -1159,9 +1134,9 @@ static int mxcmci_probe(struct platform_device *pdev)
if (!host->pdata) {
host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
} else {
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r) {
- host->dmareq = r->start;
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res) {
+ host->dmareq = res->start;
host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
host->dma_data.priority = DMA_PRIO_LOW;
host->dma_data.dma_request = host->dmareq;
@@ -1178,7 +1153,8 @@ static int mxcmci_probe(struct platform_device *pdev)
INIT_WORK(&host->datawork, mxcmci_datawork);
- ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
+ ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0,
+ dev_name(&pdev->dev), host);
if (ret)
goto out_free_dma;
@@ -1188,7 +1164,7 @@ static int mxcmci_probe(struct platform_device *pdev)
ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
host->mmc);
if (ret)
- goto out_free_irq;
+ goto out_free_dma;
}
init_timer(&host->watchdog);
@@ -1199,20 +1175,17 @@ static int mxcmci_probe(struct platform_device *pdev)
return 0;
-out_free_irq:
- free_irq(host->irq, host);
out_free_dma:
if (host->dma)
dma_release_channel(host->dma);
+
out_clk_put:
clk_disable_unprepare(host->clk_per);
clk_disable_unprepare(host->clk_ipg);
-out_iounmap:
- iounmap(host->base);
+
out_free:
mmc_free_host(mmc);
-out_release_mem:
- release_mem_region(iores->start, resource_size(iores));
+
return ret;
}
@@ -1223,30 +1196,21 @@ static int mxcmci_remove(struct platform_device *pdev)
mmc_remove_host(mmc);
- if (host->vcc)
- regulator_put(host->vcc);
-
if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc);
- free_irq(host->irq, host);
- iounmap(host->base);
-
if (host->dma)
dma_release_channel(host->dma);
clk_disable_unprepare(host->clk_per);
clk_disable_unprepare(host->clk_ipg);
- release_mem_region(host->res->start, resource_size(host->res));
-
mmc_free_host(mmc);
return 0;
}
-#ifdef CONFIG_PM
-static int mxcmci_suspend(struct device *dev)
+static int __maybe_unused mxcmci_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxcmci_host *host = mmc_priv(mmc);
@@ -1256,7 +1220,7 @@ static int mxcmci_suspend(struct device *dev)
return 0;
}
-static int mxcmci_resume(struct device *dev)
+static int __maybe_unused mxcmci_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxcmci_host *host = mmc_priv(mmc);
@@ -1266,11 +1230,7 @@ static int mxcmci_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops mxcmci_pm_ops = {
- .suspend = mxcmci_suspend,
- .resume = mxcmci_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
static struct platform_driver mxcmci_driver = {
.probe = mxcmci_probe,
@@ -1279,9 +1239,7 @@ static struct platform_driver mxcmci_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &mxcmci_pm_ops,
-#endif
.of_match_table = mxcmci_of_match,
}
};
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 073e871a0fc8..babfea03ba8a 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -70,6 +70,7 @@ struct mxs_mmc_host {
unsigned char bus_width;
spinlock_t lock;
int sdio_irq_en;
+ bool broken_cd;
};
static int mxs_mmc_get_cd(struct mmc_host *mmc)
@@ -78,6 +79,9 @@ static int mxs_mmc_get_cd(struct mmc_host *mmc)
struct mxs_ssp *ssp = &host->ssp;
int present, ret;
+ if (host->broken_cd)
+ return -ENOSYS;
+
ret = mmc_gpio_get_cd(mmc);
if (ret >= 0)
return ret;
@@ -568,6 +572,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxs_mmc_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
struct mxs_mmc_host *host;
struct mmc_host *mmc;
struct resource *iores;
@@ -634,6 +639,8 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
+ host->broken_cd = of_property_read_bool(np, "broken-cd");
+
mmc->f_min = 400000;
mmc->f_max = 288000000;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 5c2e58b29305..81974ecdfcbc 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -177,7 +177,7 @@ static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
unsigned long tick_ns;
if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
- tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq;
+ tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq);
ndelay(8 * tick_ns);
}
}
@@ -435,7 +435,7 @@ static void mmc_omap_send_stop_work(struct work_struct *work)
struct mmc_data *data = host->stop_data;
unsigned long tick_ns;
- tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq;
+ tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq);
ndelay(8*tick_ns);
mmc_omap_start_command(host, data->stop);
@@ -477,7 +477,7 @@ mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
u16 stat = 0;
/* Sending abort takes 80 clocks. Have some extra and round up */
- timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq;
+ timeout = DIV_ROUND_UP(120 * USEC_PER_SEC, slot->fclk_freq);
restarts = 0;
while (restarts < maxloops) {
OMAP_MMC_WRITE(host, STAT, 0xFFFF);
@@ -677,8 +677,8 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
if (n > host->buffer_bytes_left)
n = host->buffer_bytes_left;
- nwords = n / 2;
- nwords += n & 1; /* handle odd number of bytes to transfer */
+ /* Round up to handle odd number of bytes to transfer */
+ nwords = DIV_ROUND_UP(n, 2);
host->buffer_bytes_left -= n;
host->total_bytes_left -= n;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e91ee21549d0..6b7b75585926 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -31,7 +31,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
-#include <linux/omap-dma.h>
+#include <linux/omap-dmaengine.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
@@ -582,7 +582,7 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
* - MMC/SD clock coming out of controller > 25MHz
*/
if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) &&
- (ios->timing != MMC_TIMING_UHS_DDR50) &&
+ (ios->timing != MMC_TIMING_MMC_DDR52) &&
((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
regval = OMAP_HSMMC_READ(host->base, HCTL);
if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
@@ -602,7 +602,7 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
u32 con;
con = OMAP_HSMMC_READ(host->base, CON);
- if (ios->timing == MMC_TIMING_UHS_DDR50)
+ if (ios->timing == MMC_TIMING_MMC_DDR52)
con |= DDR; /* configure in DDR mode */
else
con &= ~DDR;
@@ -920,16 +920,17 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
static void
omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
{
- host->cmd = NULL;
-
if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
!host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
+ host->cmd = NULL;
omap_hsmmc_start_dma_transfer(host);
omap_hsmmc_start_command(host, host->mrq->cmd,
host->mrq->data);
return;
}
+ host->cmd = NULL;
+
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
/* response type 2 */
@@ -1851,6 +1852,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
unsigned tx_req, rx_req;
struct pinctrl *pinctrl;
const struct omap_mmc_of_data *data;
+ void __iomem *base;
match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
if (match) {
@@ -1881,9 +1883,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (res == NULL || irq < 0)
return -ENXIO;
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (res == NULL)
- return -EBUSY;
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
ret = omap_hsmmc_gpio_init(pdata);
if (ret)
@@ -1904,7 +1906,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
host->irq = irq;
host->slot_id = 0;
host->mapbase = res->start + pdata->reg_offset;
- host->base = ioremap(host->mapbase, SZ_4K);
+ host->base = base + pdata->reg_offset;
host->power_mode = MMC_POWER_OFF;
host->next_data.cookie = 1;
host->pbias_enabled = 0;
@@ -1922,7 +1924,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
spin_lock_init(&host->irq_lock);
- host->fclk = clk_get(&pdev->dev, "fck");
+ host->fclk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(host->fclk)) {
ret = PTR_ERR(host->fclk);
host->fclk = NULL;
@@ -1941,7 +1943,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_context_save(host);
- host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
+ host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck");
/*
* MMC can still work without debounce clock.
*/
@@ -1949,7 +1951,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
host->dbclk = NULL;
} else if (clk_prepare_enable(host->dbclk) != 0) {
dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
- clk_put(host->dbclk);
host->dbclk = NULL;
}
@@ -2018,7 +2019,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
}
/* Request IRQ for MMC operations */
- ret = request_irq(host->irq, omap_hsmmc_irq, 0,
+ ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
mmc_hostname(mmc), host);
if (ret) {
dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
@@ -2029,7 +2030,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (pdata->init(&pdev->dev) != 0) {
dev_err(mmc_dev(host->mmc),
"Unable to configure MMC IRQs\n");
- goto err_irq_cd_init;
+ goto err_irq;
}
}
@@ -2044,9 +2045,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
/* Request IRQ for card detect */
if ((mmc_slot(host).card_detect_irq)) {
- ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
- NULL,
- omap_hsmmc_detect,
+ ret = devm_request_threaded_irq(&pdev->dev,
+ mmc_slot(host).card_detect_irq,
+ NULL, omap_hsmmc_detect,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
mmc_hostname(mmc), host);
if (ret) {
@@ -2089,15 +2090,12 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
err_slot_name:
mmc_remove_host(mmc);
- free_irq(mmc_slot(host).card_detect_irq, host);
err_irq_cd:
if (host->use_reg)
omap_hsmmc_reg_put(host);
err_reg:
if (host->pdata->cleanup)
host->pdata->cleanup(&pdev->dev);
-err_irq_cd_init:
- free_irq(host->irq, host);
err_irq:
if (host->tx_chan)
dma_release_channel(host->tx_chan);
@@ -2105,27 +2103,19 @@ err_irq:
dma_release_channel(host->rx_chan);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
- clk_put(host->fclk);
- if (host->dbclk) {
+ if (host->dbclk)
clk_disable_unprepare(host->dbclk);
- clk_put(host->dbclk);
- }
err1:
- iounmap(host->base);
mmc_free_host(mmc);
err_alloc:
omap_hsmmc_gpio_free(pdata);
err:
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
return ret;
}
static int omap_hsmmc_remove(struct platform_device *pdev)
{
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
- struct resource *res;
pm_runtime_get_sync(host->dev);
mmc_remove_host(host->mmc);
@@ -2133,9 +2123,6 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
omap_hsmmc_reg_put(host);
if (host->pdata->cleanup)
host->pdata->cleanup(&pdev->dev);
- free_irq(host->irq, host);
- if (mmc_slot(host).card_detect_irq)
- free_irq(mmc_slot(host).card_detect_irq, host);
if (host->tx_chan)
dma_release_channel(host->tx_chan);
@@ -2144,20 +2131,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
- clk_put(host->fclk);
- if (host->dbclk) {
+ if (host->dbclk)
clk_disable_unprepare(host->dbclk);
- clk_put(host->dbclk);
- }
omap_hsmmc_gpio_free(host->pdata);
- iounmap(host->base);
mmc_free_host(host->mmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
-
return 0;
}
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 0b9ded13a3ae..0d519649b575 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -236,6 +236,9 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
case MMC_RSP_R1:
rsp_type = SD_RSP_TYPE_R1;
break;
+ case MMC_RSP_R1 & ~MMC_RSP_CRC:
+ rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
+ break;
case MMC_RSP_R1B:
rsp_type = SD_RSP_TYPE_R1b;
break;
@@ -816,6 +819,7 @@ static int sd_set_timing(struct realtek_pci_sdmmc *host, unsigned char timing)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0);
break;
+ case MMC_TIMING_MMC_DDR52:
case MMC_TIMING_UHS_DDR50:
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1,
0x0C | SD_ASYNC_FIFO_NOT_RST,
@@ -896,6 +900,7 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->vpclk = true;
host->double_clk = false;
break;
+ case MMC_TIMING_MMC_DDR52:
case MMC_TIMING_UHS_DDR50:
case MMC_TIMING_UHS_SDR25:
host->ssc_depth = RTSX_SSC_DEPTH_1M;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index e11fafa6fc6b..5d3766e792f0 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -34,7 +34,8 @@
#include <linux/mfd/rtsx_usb.h>
#include <asm/unaligned.h>
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
+ defined(CONFIG_MMC_REALTEK_USB_MODULE))
#include <linux/leds.h>
#include <linux/workqueue.h>
#define RTSX_USB_USE_LEDS_CLASS
@@ -59,7 +60,7 @@ struct rtsx_usb_sdmmc {
unsigned char power_mode;
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#ifdef RTSX_USB_USE_LEDS_CLASS
struct led_classdev led;
char led_name[32];
struct work_struct led_work;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index ebb3f392b589..8ce3c28cb76e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -102,11 +102,19 @@ static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
}
static const struct sdhci_ops sdhci_acpi_ops_dflt = {
+ .set_clock = sdhci_set_clock,
.enable_dma = sdhci_acpi_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_ops sdhci_acpi_ops_int = {
+ .set_clock = sdhci_set_clock,
.enable_dma = sdhci_acpi_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
.hw_reset = sdhci_acpi_int_hw_reset,
};
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 6f166e63b817..dd780c315a63 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -206,9 +206,13 @@ static void sdhci_bcm_kona_init_74_clocks(struct sdhci_host *host,
}
static struct sdhci_ops sdhci_bcm_kona_ops = {
+ .set_clock = sdhci_set_clock,
.get_max_clock = sdhci_bcm_kona_get_max_clk,
.get_timeout_clock = sdhci_bcm_kona_get_timeout_clock,
.platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
.card_event = sdhci_bcm_kona_card_event,
};
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index f6d8d67c545f..46af9a439d7b 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -131,8 +131,12 @@ static const struct sdhci_ops bcm2835_sdhci_ops = {
.read_l = bcm2835_sdhci_readl,
.read_w = bcm2835_sdhci_readw,
.read_b = bcm2835_sdhci_readb,
+ .set_clock = sdhci_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_min_clock = bcm2835_sdhci_get_min_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index f2cc26633cb2..14b74075589a 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -30,13 +30,12 @@ static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock)
u16 clk;
unsigned long timeout;
- if (clock == host->clock)
- return;
+ host->mmc->actual_clock = 0;
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
- goto out;
+ return;
while (host->max_clk / div > clock) {
/*
@@ -75,13 +74,14 @@ static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock)
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
-out:
- host->clock = clock;
}
static const struct sdhci_ops sdhci_cns3xxx_ops = {
.get_max_clock = sdhci_cns3xxx_get_max_clk,
.set_clock = sdhci_cns3xxx_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
@@ -90,8 +90,7 @@ static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_NONSTANDARD_CLOCK,
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
static int sdhci_cns3xxx_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 736d7a2eb7ec..e6278ec007d7 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -21,28 +21,17 @@
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include "sdhci-pltfm.h"
struct sdhci_dove_priv {
struct clk *clk;
- int gpio_cd;
};
-static irqreturn_t sdhci_dove_carddetect_irq(int irq, void *data)
-{
- struct sdhci_host *host = data;
-
- tasklet_schedule(&host->card_tasklet);
- return IRQ_HANDLED;
-}
-
static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
{
u16 ret;
@@ -60,8 +49,6 @@ static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_dove_priv *priv = pltfm_host->priv;
u32 ret;
ret = readl(host->ioaddr + reg);
@@ -71,14 +58,6 @@ static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
/* Mask the support for 3.0V */
ret &= ~SDHCI_CAN_VDD_300;
break;
- case SDHCI_PRESENT_STATE:
- if (gpio_is_valid(priv->gpio_cd)) {
- if (gpio_get_value(priv->gpio_cd) == 0)
- ret |= SDHCI_CARD_PRESENT;
- else
- ret &= ~SDHCI_CARD_PRESENT;
- }
- break;
}
return ret;
}
@@ -86,6 +65,10 @@ static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
static const struct sdhci_ops sdhci_dove_ops = {
.read_w = sdhci_dove_readw,
.read_l = sdhci_dove_readl,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_dove_pdata = {
@@ -113,28 +96,9 @@ static int sdhci_dove_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (pdev->dev.of_node) {
- priv->gpio_cd = of_get_named_gpio(pdev->dev.of_node,
- "cd-gpios", 0);
- } else {
- priv->gpio_cd = -EINVAL;
- }
-
- if (gpio_is_valid(priv->gpio_cd)) {
- ret = gpio_request(priv->gpio_cd, "sdhci-cd");
- if (ret) {
- dev_err(&pdev->dev, "card detect gpio request failed: %d\n",
- ret);
- return ret;
- }
- gpio_direction_input(priv->gpio_cd);
- }
-
host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata, 0);
- if (IS_ERR(host)) {
- ret = PTR_ERR(host);
- goto err_sdhci_pltfm_init;
- }
+ if (IS_ERR(host))
+ return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
pltfm_host->priv = priv;
@@ -142,39 +106,20 @@ static int sdhci_dove_probe(struct platform_device *pdev)
if (!IS_ERR(priv->clk))
clk_prepare_enable(priv->clk);
- sdhci_get_of_property(pdev);
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_sdhci_add;
ret = sdhci_add_host(host);
if (ret)
goto err_sdhci_add;
- /*
- * We must request the IRQ after sdhci_add_host(), as the tasklet only
- * gets setup in sdhci_add_host() and we oops.
- */
- if (gpio_is_valid(priv->gpio_cd)) {
- ret = request_irq(gpio_to_irq(priv->gpio_cd),
- sdhci_dove_carddetect_irq,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- mmc_hostname(host->mmc), host);
- if (ret) {
- dev_err(&pdev->dev, "card detect irq request failed: %d\n",
- ret);
- goto err_request_irq;
- }
- }
-
return 0;
-err_request_irq:
- sdhci_remove_host(host, 0);
err_sdhci_add:
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
sdhci_pltfm_free(pdev);
-err_sdhci_pltfm_init:
- if (gpio_is_valid(priv->gpio_cd))
- gpio_free(priv->gpio_cd);
return ret;
}
@@ -186,11 +131,6 @@ static int sdhci_dove_remove(struct platform_device *pdev)
sdhci_pltfm_unregister(pdev);
- if (gpio_is_valid(priv->gpio_cd)) {
- free_irq(gpio_to_irq(priv->gpio_cd), host);
- gpio_free(priv->gpio_cd);
- }
-
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index b841bb7cd371..ccec0e32590f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -160,7 +160,6 @@ struct pltfm_imx_data {
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
} multiblock_status;
- u32 uhs_mode;
u32 is_ddr;
};
@@ -382,7 +381,6 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
if (val & ESDHC_MIX_CTRL_SMPCLK_SEL)
ret |= SDHCI_CTRL_TUNED_CLK;
- ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK);
ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
return ret;
@@ -429,7 +427,6 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
else
new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
- imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK;
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
if (val & SDHCI_CTRL_TUNED_CLK)
@@ -600,12 +597,14 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
u32 temp, val;
if (clock == 0) {
+ host->mmc->actual_clock = 0;
+
if (esdhc_is_usdhc(imx_data)) {
val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
host->ioaddr + ESDHC_VENDOR_SPEC);
}
- goto out;
+ return;
}
if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
@@ -645,8 +644,6 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
}
mdelay(1);
-out:
- host->clock = clock;
}
static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
@@ -668,7 +665,7 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
return -ENOSYS;
}
-static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
+static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
{
u32 ctrl;
@@ -686,8 +683,6 @@ static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl,
SDHCI_HOST_CONTROL);
-
- return 0;
}
static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
@@ -697,6 +692,7 @@ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
/* FIXME: delay a bit for card to be ready for next tuning due to errors */
mdelay(1);
+ /* This is balanced by the runtime put in sdhci_tasklet_finish */
pm_runtime_get_sync(host->mmc->parent);
reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
@@ -713,13 +709,12 @@ static void esdhc_request_done(struct mmc_request *mrq)
complete(&mrq->completion);
}
-static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
+static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode,
+ struct scatterlist *sg)
{
struct mmc_command cmd = {0};
struct mmc_request mrq = {NULL};
struct mmc_data data = {0};
- struct scatterlist sg;
- char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN];
cmd.opcode = opcode;
cmd.arg = 0;
@@ -728,11 +723,9 @@ static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN;
data.blocks = 1;
data.flags = MMC_DATA_READ;
- data.sg = &sg;
+ data.sg = sg;
data.sg_len = 1;
- sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern));
-
mrq.cmd = &cmd;
mrq.cmd->mrq = &mrq;
mrq.data = &data;
@@ -742,14 +735,12 @@ static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
mrq.done = esdhc_request_done;
init_completion(&(mrq.completion));
- disable_irq(host->irq);
- spin_lock(&host->lock);
+ spin_lock_irq(&host->lock);
host->mrq = &mrq;
sdhci_send_command(host, mrq.cmd);
- spin_unlock(&host->lock);
- enable_irq(host->irq);
+ spin_unlock_irq(&host->lock);
wait_for_completion(&mrq.completion);
@@ -772,13 +763,21 @@ static void esdhc_post_tuning(struct sdhci_host *host)
static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
{
+ struct scatterlist sg;
+ char *tuning_pattern;
int min, max, avg, ret;
+ tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL);
+ if (!tuning_pattern)
+ return -ENOMEM;
+
+ sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN);
+
/* find the mininum delay first which can pass tuning */
min = ESDHC_TUNE_CTRL_MIN;
while (min < ESDHC_TUNE_CTRL_MAX) {
esdhc_prepare_tuning(host, min);
- if (!esdhc_send_tuning_cmd(host, opcode))
+ if (!esdhc_send_tuning_cmd(host, opcode, &sg))
break;
min += ESDHC_TUNE_CTRL_STEP;
}
@@ -787,7 +786,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
max = min + ESDHC_TUNE_CTRL_STEP;
while (max < ESDHC_TUNE_CTRL_MAX) {
esdhc_prepare_tuning(host, max);
- if (esdhc_send_tuning_cmd(host, opcode)) {
+ if (esdhc_send_tuning_cmd(host, opcode, &sg)) {
max -= ESDHC_TUNE_CTRL_STEP;
break;
}
@@ -797,9 +796,11 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
/* use average delay to get the best timing */
avg = (min + max) / 2;
esdhc_prepare_tuning(host, avg);
- ret = esdhc_send_tuning_cmd(host, opcode);
+ ret = esdhc_send_tuning_cmd(host, opcode, &sg);
esdhc_post_tuning(host);
+ kfree(tuning_pattern);
+
dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
ret ? "failed" : "passed", avg, ret);
@@ -837,28 +838,21 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
return pinctrl_select_state(imx_data->pinctrl, pinctrl);
}
-static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- switch (uhs) {
+ switch (timing) {
case MMC_TIMING_UHS_SDR12:
- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12;
- break;
case MMC_TIMING_UHS_SDR25:
- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25;
- break;
case MMC_TIMING_UHS_SDR50:
- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50;
- break;
case MMC_TIMING_UHS_SDR104:
case MMC_TIMING_MMC_HS200:
- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104;
break;
case MMC_TIMING_UHS_DDR50:
- imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50;
+ case MMC_TIMING_MMC_DDR52:
writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
ESDHC_MIX_CTRL_DDREN,
host->ioaddr + ESDHC_MIX_CTRL);
@@ -875,7 +869,15 @@ static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
break;
}
- return esdhc_change_pinstate(host, uhs);
+ esdhc_change_pinstate(host, timing);
+}
+
+static void esdhc_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
static struct sdhci_ops sdhci_esdhc_ops = {
@@ -888,8 +890,9 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.get_max_clock = esdhc_pltfm_get_max_clock,
.get_min_clock = esdhc_pltfm_get_min_clock,
.get_ro = esdhc_pltfm_get_ro,
- .platform_bus_width = esdhc_pltfm_bus_width,
+ .set_bus_width = esdhc_pltfm_set_bus_width,
.set_uhs_signaling = esdhc_set_uhs_signaling,
+ .reset = esdhc_reset,
};
static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -1170,8 +1173,10 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
ret = sdhci_runtime_suspend_host(host);
- clk_disable_unprepare(imx_data->clk_per);
- clk_disable_unprepare(imx_data->clk_ipg);
+ if (!sdhci_sdio_irq_enabled(host)) {
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ }
clk_disable_unprepare(imx_data->clk_ahb);
return ret;
@@ -1183,8 +1188,10 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
- clk_prepare_enable(imx_data->clk_per);
- clk_prepare_enable(imx_data->clk_ipg);
+ if (!sdhci_sdio_irq_enabled(host)) {
+ clk_prepare_enable(imx_data->clk_per);
+ clk_prepare_enable(imx_data->clk_ipg);
+ }
clk_prepare_enable(imx_data->clk_ahb);
return sdhci_runtime_resume_host(host);
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index a7d9f95a7b03..3497cfaf683c 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -20,10 +20,8 @@
#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
SDHCI_QUIRK_NO_BUSY_IRQ | \
- SDHCI_QUIRK_NONSTANDARD_CLOCK | \
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
- SDHCI_QUIRK_PIO_NEEDS_DELAY | \
- SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
+ SDHCI_QUIRK_PIO_NEEDS_DELAY)
#define ESDHC_SYSTEM_CONTROL 0x2c
#define ESDHC_CLOCK_MASK 0x0000fff0
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index acb0e9eb55f1..40573a58486a 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -468,6 +468,10 @@ MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
static struct sdhci_ops sdhci_msm_ops = {
.platform_execute_tuning = sdhci_msm_execute_tuning,
+ .reset = sdhci_reset,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
static int sdhci_msm_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index f7c7cf62437d..5bd1092310f2 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -52,8 +52,12 @@ static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
}
static struct sdhci_ops sdhci_arasan_ops = {
+ .set_clock = sdhci_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_arasan_get_timeout_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
static struct sdhci_pltfm_data sdhci_arasan_pdata = {
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 0b249970b119..8be4dcfb49a0 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -199,13 +199,14 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
-
int pre_div = 2;
int div = 1;
u32 temp;
+ host->mmc->actual_clock = 0;
+
if (clock == 0)
- goto out;
+ return;
/* Workaround to reduce the clock frequency for p1010 esdhc */
if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
@@ -238,24 +239,8 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
| (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
mdelay(1);
-out:
- host->clock = clock;
-}
-
-#ifdef CONFIG_PM
-static u32 esdhc_proctl;
-static void esdhc_of_suspend(struct sdhci_host *host)
-{
- esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
}
-static void esdhc_of_resume(struct sdhci_host *host)
-{
- esdhc_of_enable_dma(host);
- sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
-}
-#endif
-
static void esdhc_of_platform_init(struct sdhci_host *host)
{
u32 vvn;
@@ -269,7 +254,7 @@ static void esdhc_of_platform_init(struct sdhci_host *host)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
}
-static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
+static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
{
u32 ctrl;
@@ -289,8 +274,6 @@ static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
-
- return 0;
}
static const struct sdhci_ops sdhci_esdhc_ops = {
@@ -305,13 +288,46 @@ static const struct sdhci_ops sdhci_esdhc_ops = {
.get_max_clock = esdhc_of_get_max_clock,
.get_min_clock = esdhc_of_get_min_clock,
.platform_init = esdhc_of_platform_init,
-#ifdef CONFIG_PM
- .platform_suspend = esdhc_of_suspend,
- .platform_resume = esdhc_of_resume,
-#endif
.adma_workaround = esdhci_of_adma_workaround,
- .platform_bus_width = esdhc_pltfm_bus_width,
+ .set_bus_width = esdhc_pltfm_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+#ifdef CONFIG_PM
+
+static u32 esdhc_proctl;
+static int esdhc_of_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
+
+ return sdhci_suspend_host(host);
+}
+
+static int esdhc_of_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ int ret = sdhci_resume_host(host);
+
+ if (ret == 0) {
+ /* Isn't this already done by sdhci_resume_host() ? --rmk */
+ esdhc_of_enable_dma(host);
+ sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
+ }
+
+ return ret;
+}
+
+static const struct dev_pm_ops esdhc_pmops = {
+ .suspend = esdhc_of_suspend,
+ .resume = esdhc_of_resume,
};
+#define ESDHC_PMOPS (&esdhc_pmops)
+#else
+#define ESDHC_PMOPS NULL
+#endif
static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
/*
@@ -374,7 +390,7 @@ static struct platform_driver sdhci_esdhc_driver = {
.name = "sdhci-esdhc",
.owner = THIS_MODULE,
.of_match_table = sdhci_esdhc_of_match,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = ESDHC_PMOPS,
},
.probe = sdhci_esdhc_probe,
.remove = sdhci_esdhc_remove,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 57c514a81ca5..b341661369a2 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -58,6 +58,10 @@ static const struct sdhci_ops sdhci_hlwd_ops = {
.write_l = sdhci_hlwd_writel,
.write_w = sdhci_hlwd_writew,
.write_b = sdhci_hlwd_writeb,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index f49666bcc52a..5670e381b0cf 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -21,6 +21,45 @@
#include "sdhci-pci.h"
#include "sdhci-pci-o2micro.h"
+static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
+{
+ u32 scratch_32;
+ pci_read_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, &scratch_32);
+
+ scratch_32 &= 0x0000FFFF;
+ scratch_32 |= value;
+
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+}
+
+static void o2_pci_led_enable(struct sdhci_pci_chip *chip)
+{
+ int ret;
+ u32 scratch_32;
+
+ /* Set led of SD host function enable */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG0, &scratch_32);
+ if (ret)
+ return;
+
+ scratch_32 &= ~O2_SD_FREG0_LEDOFF;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_FUNC_REG0, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_TEST_REG, &scratch_32);
+ if (ret)
+ return;
+
+ scratch_32 |= O2_SD_LED_ENABLE;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_TEST_REG, scratch_32);
+
+}
+
void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
{
u32 scratch_32;
@@ -216,6 +255,40 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ /* DevId=8520 subId= 0x11 or 0x12 Type Chip support */
+ if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) {
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG0,
+ &scratch_32);
+ scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
+
+ /* Check Whether subId is 0x11 or 0x12 */
+ if ((scratch_32 == 0x11) || (scratch_32 == 0x12)) {
+ scratch_32 = 0x2c280000;
+
+ /* Set Base Clock to 208MZ */
+ o2_pci_set_baseclk(chip, scratch_32);
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4,
+ &scratch_32);
+
+ /* Enable Base Clk setting change */
+ scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4,
+ scratch_32);
+
+ /* Set Tuning Window to 4 */
+ pci_write_config_byte(chip->pdev,
+ O2_SD_TUNING_CTRL, 0x44);
+
+ break;
+ }
+ }
+
+ /* Enable 8520 led function */
+ o2_pci_led_enable(chip);
+
/* Set timeout CLK */
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLK_SETTING, &scratch_32);
@@ -276,7 +349,7 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
ret = pci_read_config_dword(chip->pdev,
- O2_SD_FUNC_REG0, &scratch_32);
+ O2_SD_PLL_SETTING, &scratch_32);
if ((scratch_32 & 0xff000000) == 0x01000000) {
scratch_32 &= 0x0000FFFF;
@@ -299,6 +372,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4, scratch_32);
}
+ /* Set Tuning Windows to 5 */
+ pci_write_config_byte(chip->pdev,
+ O2_SD_TUNING_CTRL, 0x55);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h
index dbec4c933488..f7ffc908d9a0 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.h
+++ b/drivers/mmc/host/sdhci-pci-o2micro.h
@@ -57,6 +57,9 @@
#define O2_SD_UHS2_L1_CTRL 0x35C
#define O2_SD_FUNC_REG3 0x3E0
#define O2_SD_FUNC_REG4 0x3E4
+#define O2_SD_LED_ENABLE BIT(6)
+#define O2_SD_FREG0_LEDOFF BIT(13)
+#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
#define O2_SD_VENDOR_SETTING 0x110
#define O2_SD_VENDOR_SETTING2 0x1C8
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index fdc612120362..52c42fcc284c 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -1031,7 +1031,7 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
return 0;
}
-static int sdhci_pci_bus_width(struct sdhci_host *host, int width)
+static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
@@ -1052,8 +1052,6 @@ static int sdhci_pci_bus_width(struct sdhci_host *host, int width)
}
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
-
- return 0;
}
static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
@@ -1080,8 +1078,11 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host)
}
static const struct sdhci_ops sdhci_pci_ops = {
+ .set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
- .platform_bus_width = sdhci_pci_bus_width,
+ .set_bus_width = sdhci_pci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
};
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index bef250e95418..7e834fb78f42 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -45,6 +45,10 @@ unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host)
EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
static const struct sdhci_ops sdhci_pltfm_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
#ifdef CONFIG_OF
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index d51e061ec576..3c0f3c0a1cc8 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -51,11 +51,13 @@
#define MMC_CARD 0x1000
#define MMC_WIDTH 0x0100
-static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
+static void pxav2_reset(struct sdhci_host *host, u8 mask)
{
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ sdhci_reset(host, mask);
+
if (mask == SDHCI_RESET_ALL) {
u16 tmp = 0;
@@ -88,7 +90,7 @@ static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
}
}
-static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
+static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
u16 tmp;
@@ -107,14 +109,14 @@ static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
}
writew(tmp, host->ioaddr + SD_CE_ATA_2);
writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
-
- return 0;
}
static const struct sdhci_ops pxav2_sdhci_ops = {
+ .set_clock = sdhci_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
- .platform_reset_exit = pxav2_set_private_registers,
- .platform_bus_width = pxav2_mmc_set_width,
+ .set_bus_width = pxav2_mmc_set_bus_width,
+ .reset = pxav2_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
#ifdef CONFIG_OF
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 2fd73b38c303..f4f128947561 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -112,11 +112,13 @@ static int mv_conf_mbus_windows(struct platform_device *pdev,
return 0;
}
-static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
+static void pxav3_reset(struct sdhci_host *host, u8 mask)
{
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ sdhci_reset(host, mask);
+
if (mask == SDHCI_RESET_ALL) {
/*
* tune timing of read data/command when crc error happen
@@ -184,7 +186,7 @@ static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
pxa->power_mode = power_mode;
}
-static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
{
u16 ctrl_2;
@@ -218,15 +220,16 @@ static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
dev_dbg(mmc_dev(host->mmc),
"%s uhs = %d, ctrl_2 = %04X\n",
__func__, uhs, ctrl_2);
-
- return 0;
}
static const struct sdhci_ops pxav3_sdhci_ops = {
- .platform_reset_exit = pxav3_set_private_registers,
+ .set_clock = sdhci_set_clock,
.set_uhs_signaling = pxav3_set_uhs_signaling,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = pxav3_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index d61eb5a70833..fa5954a05449 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -33,9 +33,6 @@
#define MAX_BUS_CLK (4)
-/* Number of gpio's used is max data bus width + command and clock lines */
-#define NUM_GPIOS(x) (x + 2)
-
/**
* struct sdhci_s3c - S3C SDHCI instance
* @host: The SDHCI host created
@@ -58,6 +55,8 @@ struct sdhci_s3c {
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
unsigned long clk_rates[MAX_BUS_CLK];
+
+ bool no_divider;
};
/**
@@ -70,6 +69,7 @@ struct sdhci_s3c {
*/
struct sdhci_s3c_drv_data {
unsigned int sdhci_quirks;
+ bool no_divider;
};
static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
@@ -119,7 +119,7 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
* If controller uses a non-standard clock division, find the best clock
* speed possible with selected clock source and skip the division.
*/
- if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
+ if (ourhost->no_divider) {
rate = clk_round_rate(clksrc, wanted);
return wanted - rate;
}
@@ -161,9 +161,13 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
int src;
u32 ctrl;
+ host->mmc->actual_clock = 0;
+
/* don't bother if the clock is going off. */
- if (clock == 0)
+ if (clock == 0) {
+ sdhci_set_clock(host, clock);
return;
+ }
for (src = 0; src < MAX_BUS_CLK; src++) {
delta = sdhci_s3c_consider_clock(ourhost, src, clock);
@@ -215,6 +219,8 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock < 25 * 1000000)
ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2);
writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3);
+
+ sdhci_set_clock(host, clock);
}
/**
@@ -295,10 +301,11 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
unsigned long timeout;
u16 clk = 0;
+ host->mmc->actual_clock = 0;
+
/* If the clock is going off, set to 0 at clock control register */
if (clock == 0) {
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
- host->clock = clock;
return;
}
@@ -306,8 +313,6 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
- host->clock = clock;
-
clk = SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -329,14 +334,14 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
}
/**
- * sdhci_s3c_platform_bus_width - support 8bit buswidth
+ * sdhci_s3c_set_bus_width - support 8bit buswidth
* @host: The SDHCI host being queried
* @width: MMC_BUS_WIDTH_ macro for the bus width being requested
*
* We have 8-bit width support but is not a v3 controller.
* So we add platform_bus_width() and support 8bit width.
*/
-static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width)
+static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
@@ -358,93 +363,23 @@ static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width)
}
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
-
- return 0;
}
static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
.get_min_clock = sdhci_s3c_get_min_clock,
- .platform_bus_width = sdhci_s3c_platform_bus_width,
+ .set_bus_width = sdhci_s3c_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
-static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
-{
- struct sdhci_host *host = platform_get_drvdata(dev);
-#ifdef CONFIG_PM_RUNTIME
- struct sdhci_s3c *sc = sdhci_priv(host);
-#endif
- unsigned long flags;
-
- if (host) {
- spin_lock_irqsave(&host->lock, flags);
- if (state) {
- dev_dbg(&dev->dev, "card inserted.\n");
-#ifdef CONFIG_PM_RUNTIME
- clk_prepare_enable(sc->clk_io);
-#endif
- host->flags &= ~SDHCI_DEVICE_DEAD;
- host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
- } else {
- dev_dbg(&dev->dev, "card removed.\n");
- host->flags |= SDHCI_DEVICE_DEAD;
- host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
-#ifdef CONFIG_PM_RUNTIME
- clk_disable_unprepare(sc->clk_io);
-#endif
- }
- tasklet_schedule(&host->card_tasklet);
- spin_unlock_irqrestore(&host->lock, flags);
- }
-}
-
-static irqreturn_t sdhci_s3c_gpio_card_detect_thread(int irq, void *dev_id)
-{
- struct sdhci_s3c *sc = dev_id;
- int status = gpio_get_value(sc->ext_cd_gpio);
- if (sc->pdata->ext_cd_gpio_invert)
- status = !status;
- sdhci_s3c_notify_change(sc->pdev, status);
- return IRQ_HANDLED;
-}
-
-static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
-{
- struct s3c_sdhci_platdata *pdata = sc->pdata;
- struct device *dev = &sc->pdev->dev;
-
- if (devm_gpio_request(dev, pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) {
- sc->ext_cd_gpio = pdata->ext_cd_gpio;
- sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio);
- if (sc->ext_cd_irq &&
- request_threaded_irq(sc->ext_cd_irq, NULL,
- sdhci_s3c_gpio_card_detect_thread,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- dev_name(dev), sc) == 0) {
- int status = gpio_get_value(sc->ext_cd_gpio);
- if (pdata->ext_cd_gpio_invert)
- status = !status;
- sdhci_s3c_notify_change(sc->pdev, status);
- } else {
- dev_warn(dev, "cannot request irq for card detect\n");
- sc->ext_cd_irq = 0;
- }
- } else {
- dev_err(dev, "cannot request gpio for card detect\n");
- }
-}
-
#ifdef CONFIG_OF
static int sdhci_s3c_parse_dt(struct device *dev,
struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
{
struct device_node *node = dev->of_node;
- struct sdhci_s3c *ourhost = to_s3c(host);
u32 max_width;
- int gpio;
/* if the bus-width property is not specified, assume width as 1 */
if (of_property_read_u32(node, "bus-width", &max_width))
@@ -462,18 +397,8 @@ static int sdhci_s3c_parse_dt(struct device *dev,
return 0;
}
- gpio = of_get_named_gpio(node, "cd-gpios", 0);
- if (gpio_is_valid(gpio)) {
- pdata->cd_type = S3C_SDHCI_CD_GPIO;
- pdata->ext_cd_gpio = gpio;
- ourhost->ext_cd_gpio = -1;
- if (of_get_property(node, "cd-inverted", NULL))
- pdata->ext_cd_gpio_invert = 1;
+ if (of_get_named_gpio(node, "cd-gpios", 0))
return 0;
- } else if (gpio != -ENOENT) {
- dev_err(dev, "invalid card detect gpio specified\n");
- return -EINVAL;
- }
/* assuming internal card detect that will be configured by pinctrl */
pdata->cd_type = S3C_SDHCI_CD_INTERNAL;
@@ -606,8 +531,10 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
/* Setup quirks for the controller */
host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
- if (drv_data)
+ if (drv_data) {
host->quirks |= drv_data->sdhci_quirks;
+ sc->no_divider = drv_data->no_divider;
+ }
#ifndef CONFIG_MMC_SDHCI_S3C_DMA
@@ -656,7 +583,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
* If controller does not have internal clock divider,
* we can use overriding functions instead of default.
*/
- if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
+ if (sc->no_divider) {
sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
@@ -674,6 +601,8 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_suspend_ignore_children(&pdev->dev, 1);
+ mmc_of_parse(host->mmc);
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(dev, "sdhci_add_host() failed\n");
@@ -682,15 +611,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
goto err_req_regs;
}
- /* The following two methods of card detection might call
- sdhci_s3c_notify_change() immediately, so they can be called
- only after sdhci_add_host(). Setup errors are ignored. */
- if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init)
- pdata->ext_cd_init(&sdhci_s3c_notify_change);
- if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
- gpio_is_valid(pdata->ext_cd_gpio))
- sdhci_s3c_setup_card_detect_gpio(sc);
-
#ifdef CONFIG_PM_RUNTIME
if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
clk_disable_unprepare(sc->clk_io);
@@ -711,16 +631,12 @@ static int sdhci_s3c_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_s3c *sc = sdhci_priv(host);
- struct s3c_sdhci_platdata *pdata = sc->pdata;
-
- if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup)
- pdata->ext_cd_cleanup(&sdhci_s3c_notify_change);
if (sc->ext_cd_irq)
free_irq(sc->ext_cd_irq, sc);
#ifdef CONFIG_PM_RUNTIME
- if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
+ if (sc->pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
clk_prepare_enable(sc->clk_io);
#endif
sdhci_remove_host(host, 1);
@@ -797,7 +713,7 @@ static const struct dev_pm_ops sdhci_s3c_pmops = {
#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
- .sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK,
+ .no_divider = true,
};
#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
#else
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 696122c1b468..17004531d089 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -28,7 +28,11 @@ static unsigned int sdhci_sirf_get_max_clk(struct sdhci_host *host)
}
static struct sdhci_ops sdhci_sirf_ops = {
+ .set_clock = sdhci_set_clock,
.get_max_clock = sdhci_sirf_get_max_clk,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
static struct sdhci_pltfm_data sdhci_sirf_pdata = {
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 0316dec3f006..9d535c7336ef 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -38,7 +38,10 @@ struct spear_sdhci {
/* sdhci ops */
static const struct sdhci_ops sdhci_pltfm_ops = {
- /* Nothing to do for now. */
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
};
#ifdef CONFIG_OF
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index a835898a68dd..d93a063a36f3 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -32,11 +32,17 @@
/* Tegra SDHOST controller vendor register definitions */
#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
+#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
+#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
+#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
+#define NVQUIRK_DISABLE_SDR50 BIT(3)
+#define NVQUIRK_DISABLE_SDR104 BIT(4)
+#define NVQUIRK_DISABLE_DDR50 BIT(5)
struct sdhci_tegra_soc_data {
const struct sdhci_pltfm_data *pdata;
@@ -48,19 +54,6 @@ struct sdhci_tegra {
int power_gpio;
};
-static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
-{
- u32 val;
-
- if (unlikely(reg == SDHCI_PRESENT_STATE)) {
- /* Use wp_gpio here instead? */
- val = readl(host->ioaddr + reg);
- return val | SDHCI_WRITE_PROTECT;
- }
-
- return readl(host->ioaddr + reg);
-}
-
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -108,26 +101,33 @@ static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
return mmc_gpio_get_ro(host->mmc);
}
-static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
+static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+ u32 misc_ctrl;
+
+ sdhci_reset(host, mask);
if (!(mask & SDHCI_RESET_ALL))
return;
+ misc_ctrl = sdhci_readw(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
/* Erratum: Enable SDHCI spec v3.00 support */
- if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) {
- u32 misc_ctrl;
-
- misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
- sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
- }
+ /* Don't advertise UHS modes which aren't supported yet */
+ if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR50)
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
+ if (soc_data->nvquirks & NVQUIRK_DISABLE_DDR50)
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
+ if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR104)
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
+ sdhci_writew(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
}
-static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
+static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
{
u32 ctrl;
@@ -144,23 +144,25 @@ static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
ctrl &= ~SDHCI_CTRL_4BITBUS;
}
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
- return 0;
}
static const struct sdhci_ops tegra_sdhci_ops = {
.get_ro = tegra_sdhci_get_ro,
- .read_l = tegra_sdhci_readl,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
- .platform_bus_width = tegra_sdhci_buswidth,
- .platform_reset_exit = tegra_sdhci_reset_exit,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = tegra_sdhci_set_bus_width,
+ .reset = tegra_sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
};
static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
- SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.ops = &tegra_sdhci_ops,
};
@@ -175,13 +177,16 @@ static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
- SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.ops = &tegra_sdhci_ops,
};
static struct sdhci_tegra_soc_data soc_data_tegra30 = {
.pdata = &sdhci_tegra30_pdata,
- .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300,
+ .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
+ NVQUIRK_DISABLE_SDR50 |
+ NVQUIRK_DISABLE_SDR104,
};
static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
@@ -189,12 +194,16 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
- SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.ops = &tegra_sdhci_ops,
};
static struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
+ .nvquirks = NVQUIRK_DISABLE_SDR50 |
+ NVQUIRK_DISABLE_DDR50 |
+ NVQUIRK_DISABLE_SDR104,
};
static const struct of_device_id sdhci_tegra_dt_match[] = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9a79fc4b60ca..47055f3f01b8 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -44,6 +44,8 @@
#define MAX_TUNING_LOOP 40
+#define ADMA_SIZE ((128 * 2 + 1) * 4)
+
static unsigned int debug_quirks = 0;
static unsigned int debug_quirks2;
@@ -131,43 +133,26 @@ static void sdhci_dumpregs(struct sdhci_host *host)
* *
\*****************************************************************************/
-static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
-{
- u32 ier;
-
- ier = sdhci_readl(host, SDHCI_INT_ENABLE);
- ier &= ~clear;
- ier |= set;
- sdhci_writel(host, ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
-}
-
-static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
-{
- sdhci_clear_set_irqs(host, 0, irqs);
-}
-
-static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
-{
- sdhci_clear_set_irqs(host, irqs, 0);
-}
-
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
- u32 present, irqs;
+ u32 present;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
(host->mmc->caps & MMC_CAP_NONREMOVABLE))
return;
- present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
- SDHCI_CARD_PRESENT;
- irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
+ if (enable) {
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
- if (enable)
- sdhci_unmask_irqs(host, irqs);
- else
- sdhci_mask_irqs(host, irqs);
+ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT;
+ } else {
+ host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+ }
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
static void sdhci_enable_card_detection(struct sdhci_host *host)
@@ -180,22 +165,9 @@ static void sdhci_disable_card_detection(struct sdhci_host *host)
sdhci_set_card_detection(host, false);
}
-static void sdhci_reset(struct sdhci_host *host, u8 mask)
+void sdhci_reset(struct sdhci_host *host, u8 mask)
{
unsigned long timeout;
- u32 uninitialized_var(ier);
-
- if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
- if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
- SDHCI_CARD_PRESENT))
- return;
- }
-
- if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
- ier = sdhci_readl(host, SDHCI_INT_ENABLE);
-
- if (host->ops->platform_reset_enter)
- host->ops->platform_reset_enter(host, mask);
sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
@@ -220,16 +192,27 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
timeout--;
mdelay(1);
}
+}
+EXPORT_SYMBOL_GPL(sdhci_reset);
+
+static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
+{
+ if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
+ if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT))
+ return;
+ }
- if (host->ops->platform_reset_exit)
- host->ops->platform_reset_exit(host, mask);
+ host->ops->reset(host, mask);
- if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
- sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
+ if (mask & SDHCI_RESET_ALL) {
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if (host->ops->enable_dma)
+ host->ops->enable_dma(host);
+ }
- if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
- if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
- host->ops->enable_dma(host);
+ /* Resetting the controller clears many */
+ host->preset_enabled = false;
}
}
@@ -238,15 +221,18 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
static void sdhci_init(struct sdhci_host *host, int soft)
{
if (soft)
- sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
+ sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
else
- sdhci_reset(host, SDHCI_RESET_ALL);
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
- sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
- SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
- SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
- SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
- SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
+ host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
+ SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
+ SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
+ SDHCI_INT_RESPONSE;
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
if (soft) {
/* force clock reconfiguration */
@@ -502,11 +488,6 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
else
direction = DMA_TO_DEVICE;
- /*
- * The ADMA descriptor table is mapped further down as we
- * need to fill it with data first.
- */
-
host->align_addr = dma_map_single(mmc_dev(host->mmc),
host->align_buffer, 128 * 4, direction);
if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
@@ -567,7 +548,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
* If this triggers then we have a calculation bug
* somewhere. :/
*/
- WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
+ WARN_ON((desc - host->adma_desc) > ADMA_SIZE);
}
if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
@@ -595,17 +576,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
host->align_addr, 128 * 4, direction);
}
- host->adma_addr = dma_map_single(mmc_dev(host->mmc),
- host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
- if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
- goto unmap_entries;
- BUG_ON(host->adma_addr & 0x3);
-
return 0;
-unmap_entries:
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, direction);
unmap_align:
dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
128 * 4, direction);
@@ -623,19 +595,25 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
u8 *align;
char *buffer;
unsigned long flags;
+ bool has_unaligned;
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
else
direction = DMA_TO_DEVICE;
- dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
- (128 * 2 + 1) * 4, DMA_TO_DEVICE);
-
dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
128 * 4, direction);
- if (data->flags & MMC_DATA_READ) {
+ /* Do a quick scan of the SG list for any unaligned mappings */
+ has_unaligned = false;
+ for_each_sg(data->sg, sg, host->sg_count, i)
+ if (sg_dma_address(sg) & 3) {
+ has_unaligned = true;
+ break;
+ }
+
+ if (has_unaligned && data->flags & MMC_DATA_READ) {
dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
data->sg_len, direction);
@@ -721,9 +699,12 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
if (host->flags & SDHCI_REQ_USE_DMA)
- sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
+ host->ier = (host->ier & ~pio_irqs) | dma_irqs;
else
- sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
+ host->ier = (host->ier & ~dma_irqs) | pio_irqs;
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
@@ -976,8 +957,8 @@ static void sdhci_finish_data(struct sdhci_host *host)
* upon error conditions.
*/
if (data->error) {
- sdhci_reset(host, SDHCI_RESET_CMD);
- sdhci_reset(host, SDHCI_RESET_DATA);
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
}
sdhci_send_command(host, data->stop);
@@ -1107,24 +1088,23 @@ static void sdhci_finish_command(struct sdhci_host *host)
static u16 sdhci_get_preset_value(struct sdhci_host *host)
{
- u16 ctrl, preset = 0;
-
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ u16 preset = 0;
- switch (ctrl & SDHCI_CTRL_UHS_MASK) {
- case SDHCI_CTRL_UHS_SDR12:
+ switch (host->timing) {
+ case MMC_TIMING_UHS_SDR12:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
break;
- case SDHCI_CTRL_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR25:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
break;
- case SDHCI_CTRL_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR50:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
break;
- case SDHCI_CTRL_UHS_SDR104:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
break;
- case SDHCI_CTRL_UHS_DDR50:
+ case MMC_TIMING_UHS_DDR50:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
break;
default:
@@ -1136,32 +1116,22 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
return preset;
}
-static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
int div = 0; /* Initialized for compiler warning */
int real_div = div, clk_mul = 1;
u16 clk = 0;
unsigned long timeout;
- if (clock && clock == host->clock)
- return;
-
host->mmc->actual_clock = 0;
- if (host->ops->set_clock) {
- host->ops->set_clock(host, clock);
- if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
- return;
- }
-
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
- goto out;
+ return;
if (host->version >= SDHCI_SPEC_300) {
- if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
- SDHCI_CTRL_PRESET_VAL_ENABLE) {
+ if (host->preset_enabled) {
u16 pre_val;
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
@@ -1247,26 +1217,16 @@ clock_set:
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
-
-out:
- host->clock = clock;
}
+EXPORT_SYMBOL_GPL(sdhci_set_clock);
-static inline void sdhci_update_clock(struct sdhci_host *host)
-{
- unsigned int clock;
-
- clock = host->clock;
- host->clock = 0;
- sdhci_set_clock(host, clock);
-}
-
-static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
+static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
u8 pwr = 0;
- if (power != (unsigned short)-1) {
- switch (1 << power) {
+ if (mode != MMC_POWER_OFF) {
+ switch (1 << vdd) {
case MMC_VDD_165_195:
pwr = SDHCI_POWER_180;
break;
@@ -1284,7 +1244,7 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
}
if (host->pwr == pwr)
- return -1;
+ return;
host->pwr = pwr;
@@ -1292,38 +1252,43 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_off(host);
- return 0;
- }
-
- /*
- * Spec says that we should clear the power reg before setting
- * a new value. Some controllers don't seem to like this though.
- */
- if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
- sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ vdd = 0;
+ } else {
+ /*
+ * Spec says that we should clear the power reg before setting
+ * a new value. Some controllers don't seem to like this though.
+ */
+ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
- /*
- * At least the Marvell CaFe chip gets confused if we set the voltage
- * and set turn on power at the same time, so set the voltage first.
- */
- if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ /*
+ * At least the Marvell CaFe chip gets confused if we set the
+ * voltage and set turn on power at the same time, so set the
+ * voltage first.
+ */
+ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- pwr |= SDHCI_POWER_ON;
+ pwr |= SDHCI_POWER_ON;
- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
- sdhci_runtime_pm_bus_on(host);
+ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+ sdhci_runtime_pm_bus_on(host);
- /*
- * Some controllers need an extra 10ms delay of 10ms before they
- * can apply clock after applying power
- */
- if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
- mdelay(10);
+ /*
+ * Some controllers need an extra 10ms delay of 10ms before
+ * they can apply clock after applying power
+ */
+ if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
+ mdelay(10);
+ }
- return power;
+ if (host->vmmc) {
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd);
+ spin_lock_irq(&host->lock);
+ }
}
/*****************************************************************************\
@@ -1427,10 +1392,53 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+void sdhci_set_bus_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ } else {
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
+
+void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
+{
+ u16 ctrl_2;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if ((timing == MMC_TIMING_MMC_HS200) ||
+ (timing == MMC_TIMING_UHS_SDR104))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ else if (timing == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ else if ((timing == MMC_TIMING_UHS_DDR50) ||
+ (timing == MMC_TIMING_MMC_DDR52))
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
+
static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
{
unsigned long flags;
- int vdd_bit = -1;
u8 ctrl;
spin_lock_irqsave(&host->lock, flags);
@@ -1456,45 +1464,17 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
- sdhci_set_clock(host, ios->clock);
-
- if (ios->power_mode == MMC_POWER_OFF)
- vdd_bit = sdhci_set_power(host, -1);
- else
- vdd_bit = sdhci_set_power(host, ios->vdd);
-
- if (host->vmmc && vdd_bit != -1) {
- spin_unlock_irqrestore(&host->lock, flags);
- mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
- spin_lock_irqsave(&host->lock, flags);
+ if (!ios->clock || ios->clock != host->clock) {
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
}
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
+
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
- /*
- * If your platform has 8-bit width support but is not a v3 controller,
- * or if it requires special setup code, you should implement that in
- * platform_bus_width().
- */
- if (host->ops->platform_bus_width) {
- host->ops->platform_bus_width(host, ios->bus_width);
- } else {
- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
- if (ios->bus_width == MMC_BUS_WIDTH_8) {
- ctrl &= ~SDHCI_CTRL_4BITBUS;
- if (host->version >= SDHCI_SPEC_300)
- ctrl |= SDHCI_CTRL_8BITBUS;
- } else {
- if (host->version >= SDHCI_SPEC_300)
- ctrl &= ~SDHCI_CTRL_8BITBUS;
- if (ios->bus_width == MMC_BUS_WIDTH_4)
- ctrl |= SDHCI_CTRL_4BITBUS;
- else
- ctrl &= ~SDHCI_CTRL_4BITBUS;
- }
- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
- }
+ host->ops->set_bus_width(host, ios->bus_width);
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
@@ -1510,19 +1490,20 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
/* In case of UHS-I modes, set High Speed Enable */
if ((ios->timing == MMC_TIMING_MMC_HS200) ||
+ (ios->timing == MMC_TIMING_MMC_DDR52) ||
(ios->timing == MMC_TIMING_UHS_SDR50) ||
(ios->timing == MMC_TIMING_UHS_SDR104) ||
(ios->timing == MMC_TIMING_UHS_DDR50) ||
(ios->timing == MMC_TIMING_UHS_SDR25))
ctrl |= SDHCI_CTRL_HISPD;
- ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+ if (!host->preset_enabled) {
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/*
* We only need to set Driver Strength if the
* preset value enable is not set.
*/
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
@@ -1546,7 +1527,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/* Re-enable SD Clock */
- sdhci_update_clock(host);
+ host->ops->set_clock(host, host->clock);
}
@@ -1555,25 +1536,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
clk &= ~SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
- if (host->ops->set_uhs_signaling)
- host->ops->set_uhs_signaling(host, ios->timing);
- else {
- ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- /* Select Bus Speed Mode for host */
- ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
- if ((ios->timing == MMC_TIMING_MMC_HS200) ||
- (ios->timing == MMC_TIMING_UHS_SDR104))
- ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
- else if (ios->timing == MMC_TIMING_UHS_SDR12)
- ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
- else if (ios->timing == MMC_TIMING_UHS_SDR25)
- ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
- else if (ios->timing == MMC_TIMING_UHS_SDR50)
- ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
- else if (ios->timing == MMC_TIMING_UHS_DDR50)
- ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
- sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
- }
+ host->ops->set_uhs_signaling(host, ios->timing);
+ host->timing = ios->timing;
if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
((ios->timing == MMC_TIMING_UHS_SDR12) ||
@@ -1590,7 +1554,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
}
/* Re-enable SD Clock */
- sdhci_update_clock(host);
+ host->ops->set_clock(host, host->clock);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
@@ -1600,7 +1564,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
* it on each ios seems to solve the problem.
*/
if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
- sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
@@ -1709,24 +1673,16 @@ static int sdhci_get_ro(struct mmc_host *mmc)
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
-
- if (enable)
- host->flags |= SDHCI_SDIO_IRQ_ENABLED;
- else
- host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
-
- /* SDIO IRQ will be enabled as appropriate in runtime resume */
- if (host->runtime_suspended)
- goto out;
+ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
+ if (enable)
+ host->ier |= SDHCI_INT_CARD_INT;
+ else
+ host->ier &= ~SDHCI_INT_CARD_INT;
- if (enable)
- sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
- else
- sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
-out:
- mmiowb();
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ mmiowb();
+ }
}
static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1734,9 +1690,18 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
+ sdhci_runtime_pm_get(host);
+
spin_lock_irqsave(&host->lock, flags);
+ if (enable)
+ host->flags |= SDHCI_SDIO_IRQ_ENABLED;
+ else
+ host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
+
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
+
+ sdhci_runtime_pm_put(host);
}
static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
@@ -1855,22 +1820,15 @@ static int sdhci_card_busy(struct mmc_host *mmc)
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
- struct sdhci_host *host;
+ struct sdhci_host *host = mmc_priv(mmc);
u16 ctrl;
- u32 ier;
int tuning_loop_counter = MAX_TUNING_LOOP;
- unsigned long timeout;
int err = 0;
- bool requires_tuning_nonuhs = false;
unsigned long flags;
- host = mmc_priv(mmc);
-
sdhci_runtime_pm_get(host);
spin_lock_irqsave(&host->lock, flags);
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-
/*
* The Host Controller needs tuning only in case of SDR104 mode
* and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1878,15 +1836,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
* If the Host Controller supports the HS200 mode then the
* tuning function has to be executed.
*/
- if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
- (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
- host->flags & SDHCI_SDR104_NEEDS_TUNING))
- requires_tuning_nonuhs = true;
-
- if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
- requires_tuning_nonuhs)
- ctrl |= SDHCI_CTRL_EXEC_TUNING;
- else {
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_UHS_SDR104:
+ break;
+
+ case MMC_TIMING_UHS_SDR50:
+ if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
+ host->flags & SDHCI_SDR104_NEEDS_TUNING)
+ break;
+ /* FALLTHROUGH */
+
+ default:
spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
return 0;
@@ -1899,6 +1860,8 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
return err;
}
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl |= SDHCI_CTRL_EXEC_TUNING;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
/*
@@ -1911,21 +1874,17 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
* to make sure we don't hit a controller bug, we _only_
* enable Buffer Read Ready interrupt here.
*/
- ier = sdhci_readl(host, SDHCI_INT_ENABLE);
- sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
/*
* Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
* of loops reaches 40 times or a timeout of 150ms occurs.
*/
- timeout = 150;
do {
struct mmc_command cmd = {0};
struct mmc_request mrq = {NULL};
- if (!tuning_loop_counter && !timeout)
- break;
-
cmd.opcode = opcode;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -1933,6 +1892,9 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
cmd.data = NULL;
cmd.error = 0;
+ if (tuning_loop_counter-- == 0)
+ break;
+
mrq.cmd = &cmd;
host->mrq = &mrq;
@@ -1990,26 +1952,25 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
host->tuning_done = 0;
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- tuning_loop_counter--;
- timeout--;
- mdelay(1);
+
+ /* eMMC spec does not require a delay between tuning cycles */
+ if (opcode == MMC_SEND_TUNING_BLOCK)
+ mdelay(1);
} while (ctrl & SDHCI_CTRL_EXEC_TUNING);
/*
* The Host Driver has exhausted the maximum number of loops allowed,
* so use fixed sampling frequency.
*/
- if (!tuning_loop_counter || !timeout) {
+ if (tuning_loop_counter < 0) {
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ }
+ if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
+ pr_info(DRIVER_NAME ": Tuning procedure"
+ " failed, falling back to fixed sampling"
+ " clock\n");
err = -EIO;
- } else {
- if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
- pr_info(DRIVER_NAME ": Tuning procedure"
- " failed, falling back to fixed sampling"
- " clock\n");
- err = -EIO;
- }
}
out:
@@ -2044,7 +2005,8 @@ out:
if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
err = 0;
- sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
@@ -2054,26 +2016,30 @@ out:
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
{
- u16 ctrl;
-
/* Host Controller v3.00 defines preset value registers */
if (host->version < SDHCI_SPEC_300)
return;
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-
/*
* We only enable or disable Preset Value if they are not already
* enabled or disabled respectively. Otherwise, we bail out.
*/
- if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
- ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
- host->flags |= SDHCI_PV_ENABLED;
- } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
- ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
+ if (host->preset_enabled != enable) {
+ u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ if (enable)
+ ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
+ else
+ ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
+
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
- host->flags &= ~SDHCI_PV_ENABLED;
+
+ if (enable)
+ host->flags |= SDHCI_PV_ENABLED;
+ else
+ host->flags &= ~SDHCI_PV_ENABLED;
+
+ host->preset_enabled = enable;
}
}
@@ -2095,8 +2061,8 @@ static void sdhci_card_event(struct mmc_host *mmc)
pr_err("%s: Resetting controller.\n",
mmc_hostname(host->mmc));
- sdhci_reset(host, SDHCI_RESET_CMD);
- sdhci_reset(host, SDHCI_RESET_DATA);
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
@@ -2124,15 +2090,6 @@ static const struct mmc_host_ops sdhci_ops = {
* *
\*****************************************************************************/
-static void sdhci_tasklet_card(unsigned long param)
-{
- struct sdhci_host *host = (struct sdhci_host*)param;
-
- sdhci_card_event(host->mmc);
-
- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
-}
-
static void sdhci_tasklet_finish(unsigned long param)
{
struct sdhci_host *host;
@@ -2169,12 +2126,12 @@ static void sdhci_tasklet_finish(unsigned long param)
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
- sdhci_update_clock(host);
+ host->ops->set_clock(host, host->clock);
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
- sdhci_reset(host, SDHCI_RESET_CMD);
- sdhci_reset(host, SDHCI_RESET_DATA);
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
}
host->mrq = NULL;
@@ -2424,101 +2381,94 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
static irqreturn_t sdhci_irq(int irq, void *dev_id)
{
- irqreturn_t result;
+ irqreturn_t result = IRQ_NONE;
struct sdhci_host *host = dev_id;
- u32 intmask, unexpected = 0;
- int cardint = 0, max_loops = 16;
+ u32 intmask, mask, unexpected = 0;
+ int max_loops = 16;
spin_lock(&host->lock);
- if (host->runtime_suspended) {
+ if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
spin_unlock(&host->lock);
return IRQ_NONE;
}
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
-
if (!intmask || intmask == 0xffffffff) {
result = IRQ_NONE;
goto out;
}
-again:
- DBG("*** %s got interrupt: 0x%08x\n",
- mmc_hostname(host->mmc), intmask);
-
- if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
- u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
- SDHCI_CARD_PRESENT;
-
- /*
- * There is a observation on i.mx esdhc. INSERT bit will be
- * immediately set again when it gets cleared, if a card is
- * inserted. We have to mask the irq to prevent interrupt
- * storm which will freeze the system. And the REMOVE gets
- * the same situation.
- *
- * More testing are needed here to ensure it works for other
- * platforms though.
- */
- sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
- SDHCI_INT_CARD_REMOVE);
- sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
- SDHCI_INT_CARD_INSERT);
-
- sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
- SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
- intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
- tasklet_schedule(&host->card_tasklet);
- }
-
- if (intmask & SDHCI_INT_CMD_MASK) {
- sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
- SDHCI_INT_STATUS);
- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
- }
+ do {
+ /* Clear selected interrupts. */
+ mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
+ SDHCI_INT_BUS_POWER);
+ sdhci_writel(host, mask, SDHCI_INT_STATUS);
- if (intmask & SDHCI_INT_DATA_MASK) {
- sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
- SDHCI_INT_STATUS);
- sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
- }
+ DBG("*** %s got interrupt: 0x%08x\n",
+ mmc_hostname(host->mmc), intmask);
- intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
+ if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
- intmask &= ~SDHCI_INT_ERROR;
+ /*
+ * There is a observation on i.mx esdhc. INSERT
+ * bit will be immediately set again when it gets
+ * cleared, if a card is inserted. We have to mask
+ * the irq to prevent interrupt storm which will
+ * freeze the system. And the REMOVE gets the
+ * same situation.
+ *
+ * More testing are needed here to ensure it works
+ * for other platforms though.
+ */
+ host->ier &= ~(SDHCI_INT_CARD_INSERT |
+ SDHCI_INT_CARD_REMOVE);
+ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT;
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
+ SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+
+ host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
+ SDHCI_INT_CARD_REMOVE);
+ result = IRQ_WAKE_THREAD;
+ }
- if (intmask & SDHCI_INT_BUS_POWER) {
- pr_err("%s: Card is consuming too much power!\n",
- mmc_hostname(host->mmc));
- sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
- }
+ if (intmask & SDHCI_INT_CMD_MASK)
+ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
- intmask &= ~SDHCI_INT_BUS_POWER;
+ if (intmask & SDHCI_INT_DATA_MASK)
+ sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
- if (intmask & SDHCI_INT_CARD_INT)
- cardint = 1;
+ if (intmask & SDHCI_INT_BUS_POWER)
+ pr_err("%s: Card is consuming too much power!\n",
+ mmc_hostname(host->mmc));
- intmask &= ~SDHCI_INT_CARD_INT;
+ if (intmask & SDHCI_INT_CARD_INT) {
+ sdhci_enable_sdio_irq_nolock(host, false);
+ host->thread_isr |= SDHCI_INT_CARD_INT;
+ result = IRQ_WAKE_THREAD;
+ }
- if (intmask) {
- unexpected |= intmask;
- sdhci_writel(host, intmask, SDHCI_INT_STATUS);
- }
+ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
+ SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
+ SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
+ SDHCI_INT_CARD_INT);
- result = IRQ_HANDLED;
+ if (intmask) {
+ unexpected |= intmask;
+ sdhci_writel(host, intmask, SDHCI_INT_STATUS);
+ }
- intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+ if (result == IRQ_NONE)
+ result = IRQ_HANDLED;
- /*
- * If we know we'll call the driver to signal SDIO IRQ, disregard
- * further indications of Card Interrupt in the status to avoid a
- * needless loop.
- */
- if (cardint)
- intmask &= ~SDHCI_INT_CARD_INT;
- if (intmask && --max_loops)
- goto again;
+ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+ } while (intmask && --max_loops);
out:
spin_unlock(&host->lock);
@@ -2527,15 +2477,38 @@ out:
mmc_hostname(host->mmc), unexpected);
sdhci_dumpregs(host);
}
- /*
- * We have to delay this as it calls back into the driver.
- */
- if (cardint)
- mmc_signal_sdio_irq(host->mmc);
return result;
}
+static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
+{
+ struct sdhci_host *host = dev_id;
+ unsigned long flags;
+ u32 isr;
+
+ spin_lock_irqsave(&host->lock, flags);
+ isr = host->thread_isr;
+ host->thread_isr = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ sdhci_card_event(host->mmc);
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+ }
+
+ if (isr & SDHCI_INT_CARD_INT) {
+ sdio_run_irqs(host->mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ sdhci_enable_sdio_irq_nolock(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ return isr ? IRQ_HANDLED : IRQ_NONE;
+}
+
/*****************************************************************************\
* *
* Suspend/resume *
@@ -2572,9 +2545,6 @@ EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
- if (host->ops->platform_suspend)
- host->ops->platform_suspend(host);
-
sdhci_disable_card_detection(host);
/* Disable tuning since we are suspending */
@@ -2584,7 +2554,9 @@ int sdhci_suspend_host(struct sdhci_host *host)
}
if (!device_may_wakeup(mmc_dev(host->mmc))) {
- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+ host->ier = 0;
+ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
} else {
sdhci_enable_irq_wakeups(host);
@@ -2605,8 +2577,9 @@ int sdhci_resume_host(struct sdhci_host *host)
}
if (!device_may_wakeup(mmc_dev(host->mmc))) {
- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
- mmc_hostname(host->mmc), host);
+ ret = request_threaded_irq(host->irq, sdhci_irq,
+ sdhci_thread_irq, IRQF_SHARED,
+ mmc_hostname(host->mmc), host);
if (ret)
return ret;
} else {
@@ -2628,9 +2601,6 @@ int sdhci_resume_host(struct sdhci_host *host)
sdhci_enable_card_detection(host);
- if (host->ops->platform_resume)
- host->ops->platform_resume(host);
-
/* Set the re-tuning expiration flag */
if (host->flags & SDHCI_USING_RETUNING_TIMER)
host->flags |= SDHCI_NEEDS_RETUNING;
@@ -2682,10 +2652,12 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
}
spin_lock_irqsave(&host->lock, flags);
- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+ host->ier &= SDHCI_INT_CARD_INT;
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
spin_unlock_irqrestore(&host->lock, flags);
- synchronize_irq(host->irq);
+ synchronize_hardirq(host->irq);
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = true;
@@ -2729,7 +2701,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
host->runtime_suspended = false;
/* Enable SDIO IRQ */
- if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
+ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
sdhci_enable_sdio_irq_nolock(host, true);
/* Enable Card Detection */
@@ -2788,7 +2760,7 @@ int sdhci_add_host(struct sdhci_host *host)
if (debug_quirks2)
host->quirks2 = debug_quirks2;
- sdhci_reset(host, SDHCI_RESET_ALL);
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
host->version = (host->version & SDHCI_SPEC_VER_MASK)
@@ -2848,15 +2820,29 @@ int sdhci_add_host(struct sdhci_host *host)
* (128) and potentially one alignment transfer for
* each of those entries.
*/
- host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
+ host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc),
+ ADMA_SIZE, &host->adma_addr,
+ GFP_KERNEL);
host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
if (!host->adma_desc || !host->align_buffer) {
- kfree(host->adma_desc);
+ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
+ host->adma_desc, host->adma_addr);
kfree(host->align_buffer);
pr_warning("%s: Unable to allocate ADMA "
"buffers. Falling back to standard DMA.\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
+ host->adma_desc = NULL;
+ host->align_buffer = NULL;
+ } else if (host->adma_addr & 3) {
+ pr_warning("%s: unable to allocate aligned ADMA descriptor\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
+ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
+ host->adma_desc, host->adma_addr);
+ kfree(host->align_buffer);
+ host->adma_desc = NULL;
+ host->align_buffer = NULL;
}
}
@@ -2941,6 +2927,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->max_busy_timeout = (1 << 27) / host->timeout_clk;
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
host->flags |= SDHCI_AUTO_CMD12;
@@ -3212,8 +3199,6 @@ int sdhci_add_host(struct sdhci_host *host)
/*
* Init tasklets.
*/
- tasklet_init(&host->card_tasklet,
- sdhci_tasklet_card, (unsigned long)host);
tasklet_init(&host->finish_tasklet,
sdhci_tasklet_finish, (unsigned long)host);
@@ -3230,8 +3215,8 @@ int sdhci_add_host(struct sdhci_host *host)
sdhci_init(host, 0);
- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
- mmc_hostname(mmc), host);
+ ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
+ IRQF_SHARED, mmc_hostname(mmc), host);
if (ret) {
pr_err("%s: Failed to request IRQ %d: %d\n",
mmc_hostname(mmc), host->irq, ret);
@@ -3273,12 +3258,12 @@ int sdhci_add_host(struct sdhci_host *host)
#ifdef SDHCI_USE_LEDS_CLASS
reset:
- sdhci_reset(host, SDHCI_RESET_ALL);
- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
#endif
untasklet:
- tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
return ret;
@@ -3315,14 +3300,14 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
#endif
if (!dead)
- sdhci_reset(host, SDHCI_RESET_ALL);
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
del_timer_sync(&host->timer);
- tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
if (host->vmmc) {
@@ -3335,7 +3320,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
regulator_put(host->vqmmc);
}
- kfree(host->adma_desc);
+ if (host->adma_desc)
+ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
+ host->adma_desc, host->adma_addr);
kfree(host->align_buffer);
host->adma_desc = NULL;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0a3ed01887db..4a5cd5e3fa3e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -281,18 +281,14 @@ struct sdhci_ops {
unsigned int (*get_max_clock)(struct sdhci_host *host);
unsigned int (*get_min_clock)(struct sdhci_host *host);
unsigned int (*get_timeout_clock)(struct sdhci_host *host);
- int (*platform_bus_width)(struct sdhci_host *host,
- int width);
+ void (*set_bus_width)(struct sdhci_host *host, int width);
void (*platform_send_init_74_clocks)(struct sdhci_host *host,
u8 power_mode);
unsigned int (*get_ro)(struct sdhci_host *host);
- void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
- void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
+ void (*reset)(struct sdhci_host *host, u8 mask);
int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
- int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+ void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct sdhci_host *host);
- void (*platform_suspend)(struct sdhci_host *host);
- void (*platform_resume)(struct sdhci_host *host);
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
void (*platform_init)(struct sdhci_host *host);
void (*card_event)(struct sdhci_host *host);
@@ -397,6 +393,16 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead);
extern void sdhci_send_command(struct sdhci_host *host,
struct mmc_command *cmd);
+static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
+{
+ return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
+}
+
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_set_bus_width(struct sdhci_host *host, int width);
+void sdhci_reset(struct sdhci_host *host, u8 mask);
+void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+
#ifdef CONFIG_PM
extern int sdhci_suspend_host(struct sdhci_host *host);
extern int sdhci_resume_host(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 54730f4aac87..656fbba4c422 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -803,12 +803,13 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
break;
}
switch (host->timing) {
- case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
/*
* MMC core will only set this timing, if the host
- * advertises the MMC_CAP_UHS_DDR50 capability. MMCIF
- * implementations with this capability, e.g. sh73a0,
- * will have to set it in their platform data.
+ * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
+ * capability. MMCIF implementations with this
+ * capability, e.g. sh73a0, will have to set it
+ * in their platform data.
*/
tmp |= CMD_SET_DARS;
break;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
new file mode 100644
index 000000000000..f0a39eb049af
--- /dev/null
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -0,0 +1,1847 @@
+/*
+ * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/virtio.h>
+#include <linux/workqueue.h>
+
+#define USDHI6_SD_CMD 0x0000
+#define USDHI6_SD_PORT_SEL 0x0004
+#define USDHI6_SD_ARG 0x0008
+#define USDHI6_SD_STOP 0x0010
+#define USDHI6_SD_SECCNT 0x0014
+#define USDHI6_SD_RSP10 0x0018
+#define USDHI6_SD_RSP32 0x0020
+#define USDHI6_SD_RSP54 0x0028
+#define USDHI6_SD_RSP76 0x0030
+#define USDHI6_SD_INFO1 0x0038
+#define USDHI6_SD_INFO2 0x003c
+#define USDHI6_SD_INFO1_MASK 0x0040
+#define USDHI6_SD_INFO2_MASK 0x0044
+#define USDHI6_SD_CLK_CTRL 0x0048
+#define USDHI6_SD_SIZE 0x004c
+#define USDHI6_SD_OPTION 0x0050
+#define USDHI6_SD_ERR_STS1 0x0058
+#define USDHI6_SD_ERR_STS2 0x005c
+#define USDHI6_SD_BUF0 0x0060
+#define USDHI6_SDIO_MODE 0x0068
+#define USDHI6_SDIO_INFO1 0x006c
+#define USDHI6_SDIO_INFO1_MASK 0x0070
+#define USDHI6_CC_EXT_MODE 0x01b0
+#define USDHI6_SOFT_RST 0x01c0
+#define USDHI6_VERSION 0x01c4
+#define USDHI6_HOST_MODE 0x01c8
+#define USDHI6_SDIF_MODE 0x01cc
+
+#define USDHI6_SD_CMD_APP 0x0040
+#define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000
+#define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300
+#define USDHI6_SD_CMD_MODE_RSP_R1 0x0400 /* Also R5, R6, R7 */
+#define USDHI6_SD_CMD_MODE_RSP_R1B 0x0500 /* R1b */
+#define USDHI6_SD_CMD_MODE_RSP_R2 0x0600
+#define USDHI6_SD_CMD_MODE_RSP_R3 0x0700 /* Also R4 */
+#define USDHI6_SD_CMD_DATA 0x0800
+#define USDHI6_SD_CMD_READ 0x1000
+#define USDHI6_SD_CMD_MULTI 0x2000
+#define USDHI6_SD_CMD_CMD12_AUTO_OFF 0x4000
+
+#define USDHI6_CC_EXT_MODE_SDRW BIT(1)
+
+#define USDHI6_SD_INFO1_RSP_END BIT(0)
+#define USDHI6_SD_INFO1_ACCESS_END BIT(2)
+#define USDHI6_SD_INFO1_CARD_OUT BIT(3)
+#define USDHI6_SD_INFO1_CARD_IN BIT(4)
+#define USDHI6_SD_INFO1_CD BIT(5)
+#define USDHI6_SD_INFO1_WP BIT(7)
+#define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8)
+#define USDHI6_SD_INFO1_D3_CARD_IN BIT(9)
+
+#define USDHI6_SD_INFO2_CMD_ERR BIT(0)
+#define USDHI6_SD_INFO2_CRC_ERR BIT(1)
+#define USDHI6_SD_INFO2_END_ERR BIT(2)
+#define USDHI6_SD_INFO2_TOUT BIT(3)
+#define USDHI6_SD_INFO2_IWA_ERR BIT(4)
+#define USDHI6_SD_INFO2_IRA_ERR BIT(5)
+#define USDHI6_SD_INFO2_RSP_TOUT BIT(6)
+#define USDHI6_SD_INFO2_SDDAT0 BIT(7)
+#define USDHI6_SD_INFO2_BRE BIT(8)
+#define USDHI6_SD_INFO2_BWE BIT(9)
+#define USDHI6_SD_INFO2_SCLKDIVEN BIT(13)
+#define USDHI6_SD_INFO2_CBSY BIT(14)
+#define USDHI6_SD_INFO2_ILA BIT(15)
+
+#define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN)
+#define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT)
+#define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT)
+#define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT)
+
+#define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR | \
+ USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \
+ USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR | \
+ USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT | \
+ USDHI6_SD_INFO2_ILA)
+
+#define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \
+ USDHI6_SD_INFO1_CARD)
+
+#define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \
+ USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA)
+
+#define USDHI6_SD_CLK_CTRL_SCLKEN BIT(8)
+
+#define USDHI6_SD_STOP_STP BIT(0)
+#define USDHI6_SD_STOP_SEC BIT(8)
+
+#define USDHI6_SDIO_INFO1_IOIRQ BIT(0)
+#define USDHI6_SDIO_INFO1_EXPUB52 BIT(14)
+#define USDHI6_SDIO_INFO1_EXWT BIT(15)
+
+#define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13)
+
+#define USDHI6_SOFT_RST_RESERVED (BIT(1) | BIT(2))
+#define USDHI6_SOFT_RST_RESET BIT(0)
+
+#define USDHI6_SD_OPTION_TIMEOUT_SHIFT 4
+#define USDHI6_SD_OPTION_TIMEOUT_MASK (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT)
+#define USDHI6_SD_OPTION_WIDTH_1 BIT(15)
+
+#define USDHI6_SD_PORT_SEL_PORTS_SHIFT 8
+
+#define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff
+
+#define USDHI6_SDIO_INFO1_IRQ (USDHI6_SDIO_INFO1_IOIRQ | 3 | \
+ USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT)
+
+#define USDHI6_MIN_DMA 64
+
+enum usdhi6_wait_for {
+ USDHI6_WAIT_FOR_REQUEST,
+ USDHI6_WAIT_FOR_CMD,
+ USDHI6_WAIT_FOR_MREAD,
+ USDHI6_WAIT_FOR_MWRITE,
+ USDHI6_WAIT_FOR_READ,
+ USDHI6_WAIT_FOR_WRITE,
+ USDHI6_WAIT_FOR_DATA_END,
+ USDHI6_WAIT_FOR_STOP,
+ USDHI6_WAIT_FOR_DMA,
+};
+
+struct usdhi6_page {
+ struct page *page;
+ void *mapped; /* mapped page */
+};
+
+struct usdhi6_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ void __iomem *base;
+ struct clk *clk;
+
+ /* SG memory handling */
+
+ /* Common for multiple and single block requests */
+ struct usdhi6_page pg; /* current page from an SG */
+ void *blk_page; /* either a mapped page, or the bounce buffer */
+ size_t offset; /* offset within a page, including sg->offset */
+
+ /* Blocks, crossing a page boundary */
+ size_t head_len;
+ struct usdhi6_page head_pg;
+
+ /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */
+ struct scatterlist bounce_sg;
+ u8 bounce_buf[512];
+
+ /* Multiple block requests only */
+ struct scatterlist *sg; /* current SG segment */
+ int page_idx; /* page index within an SG segment */
+
+ enum usdhi6_wait_for wait;
+ u32 status_mask;
+ u32 status2_mask;
+ u32 sdio_mask;
+ u32 io_error;
+ u32 irq_status;
+ unsigned long imclk;
+ unsigned long rate;
+ bool app_cmd;
+
+ /* Timeout handling */
+ struct delayed_work timeout_work;
+ unsigned long timeout;
+
+ /* DMA support */
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ bool dma_active;
+};
+
+/* I/O primitives */
+
+static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
+{
+ iowrite32(data, host->base + reg);
+ dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
+ host->base, reg, data);
+}
+
+static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
+{
+ iowrite16(data, host->base + reg);
+ dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
+ host->base, reg, data);
+}
+
+static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
+{
+ u32 data = ioread32(host->base + reg);
+ dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
+ host->base, reg, data);
+ return data;
+}
+
+static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
+{
+ u16 data = ioread16(host->base + reg);
+ dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
+ host->base, reg, data);
+ return data;
+}
+
+static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
+{
+ host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
+ host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
+ usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
+ usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
+}
+
+static void usdhi6_wait_for_resp(struct usdhi6_host *host)
+{
+ usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
+ USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD,
+ USDHI6_SD_INFO2_ERR);
+}
+
+static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
+{
+ usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
+ USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR |
+ (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE));
+}
+
+static void usdhi6_only_cd(struct usdhi6_host *host)
+{
+ /* Mask all except card hotplug */
+ usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
+}
+
+static void usdhi6_mask_all(struct usdhi6_host *host)
+{
+ usdhi6_irq_enable(host, 0, 0);
+}
+
+static int usdhi6_error_code(struct usdhi6_host *host)
+{
+ u32 err;
+
+ usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
+
+ if (host->io_error &
+ (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) {
+ u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
+ int opc = host->mrq ? host->mrq->cmd->opcode : -1;
+
+ err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
+ /* Response timeout is often normal, don't spam the log */
+ if (host->wait == USDHI6_WAIT_FOR_CMD)
+ dev_dbg(mmc_dev(host->mmc),
+ "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
+ err, rsp54, host->wait, opc);
+ else
+ dev_warn(mmc_dev(host->mmc),
+ "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
+ err, rsp54, host->wait, opc);
+ return -ETIMEDOUT;
+ }
+
+ err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
+ if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR)
+ dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
+ err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
+ if (host->io_error & USDHI6_SD_INFO2_ILA)
+ return -EILSEQ;
+
+ return -EIO;
+}
+
+/* Scatter-Gather management */
+
+/*
+ * In PIO mode we have to map each page separately, using kmap(). That way
+ * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
+ * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks
+ * have been observed with an SDIO WiFi card (b43 driver).
+ */
+static void usdhi6_blk_bounce(struct usdhi6_host *host,
+ struct scatterlist *sg)
+{
+ struct mmc_data *data = host->mrq->data;
+ size_t blk_head = host->head_len;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
+ __func__, host->mrq->cmd->opcode, data->sg_len,
+ data->blksz, data->blocks, sg->offset);
+
+ host->head_pg.page = host->pg.page;
+ host->head_pg.mapped = host->pg.mapped;
+ host->pg.page = nth_page(host->pg.page, 1);
+ host->pg.mapped = kmap(host->pg.page);
+
+ host->blk_page = host->bounce_buf;
+ host->offset = 0;
+
+ if (data->flags & MMC_DATA_READ)
+ return;
+
+ memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
+ blk_head);
+ memcpy(host->bounce_buf + blk_head, host->pg.mapped,
+ data->blksz - blk_head);
+}
+
+/* Only called for multiple block IO */
+static void usdhi6_sg_prep(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
+
+ host->sg = data->sg;
+ /* TODO: if we always map, this is redundant */
+ host->offset = host->sg->offset;
+}
+
+/* Map the first page in an SG segment: common for multiple and single block IO */
+static void *usdhi6_sg_map(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
+ size_t head = PAGE_SIZE - sg->offset;
+ size_t blk_head = head % data->blksz;
+
+ WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
+ if (WARN(sg_dma_len(sg) % data->blksz,
+ "SG size %u isn't a multiple of block size %u\n",
+ sg_dma_len(sg), data->blksz))
+ return NULL;
+
+ host->pg.page = sg_page(sg);
+ host->pg.mapped = kmap(host->pg.page);
+ host->offset = sg->offset;
+
+ /*
+ * Block size must be a power of 2 for multi-block transfers,
+ * therefore blk_head is equal for all pages in this SG
+ */
+ host->head_len = blk_head;
+
+ if (head < data->blksz)
+ /*
+ * The first block in the SG crosses a page boundary.
+ * Max blksz = 512, so blocks can only span 2 pages
+ */
+ usdhi6_blk_bounce(host, sg);
+ else
+ host->blk_page = host->pg.mapped;
+
+ dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
+ host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
+ sg->offset, host->mrq->cmd->opcode, host->mrq);
+
+ return host->blk_page + host->offset;
+}
+
+/* Unmap the current page: common for multiple and single block IO */
+static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
+{
+ struct mmc_data *data = host->mrq->data;
+ struct page *page = host->head_pg.page;
+
+ if (page) {
+ /* Previous block was cross-page boundary */
+ struct scatterlist *sg = data->sg_len > 1 ?
+ host->sg : data->sg;
+ size_t blk_head = host->head_len;
+
+ if (!data->error && data->flags & MMC_DATA_READ) {
+ memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
+ host->bounce_buf, blk_head);
+ memcpy(host->pg.mapped, host->bounce_buf + blk_head,
+ data->blksz - blk_head);
+ }
+
+ flush_dcache_page(page);
+ kunmap(page);
+
+ host->head_pg.page = NULL;
+
+ if (!force && sg_dma_len(sg) + sg->offset >
+ (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
+ /* More blocks in this SG, don't unmap the next page */
+ return;
+ }
+
+ page = host->pg.page;
+ if (!page)
+ return;
+
+ flush_dcache_page(page);
+ kunmap(page);
+
+ host->pg.page = NULL;
+}
+
+/* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */
+static void usdhi6_sg_advance(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ size_t done, total;
+
+ /* New offset: set at the end of the previous block */
+ if (host->head_pg.page) {
+ /* Finished a cross-page block, jump to the new page */
+ host->page_idx++;
+ host->offset = data->blksz - host->head_len;
+ host->blk_page = host->pg.mapped;
+ usdhi6_sg_unmap(host, false);
+ } else {
+ host->offset += data->blksz;
+ /* The completed block didn't cross a page boundary */
+ if (host->offset == PAGE_SIZE) {
+ /* If required, we'll map the page below */
+ host->offset = 0;
+ host->page_idx++;
+ }
+ }
+
+ /*
+ * Now host->blk_page + host->offset point at the end of our last block
+ * and host->page_idx is the index of the page, in which our new block
+ * is located, if any
+ */
+
+ done = (host->page_idx << PAGE_SHIFT) + host->offset;
+ total = host->sg->offset + sg_dma_len(host->sg);
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
+ done, total, host->offset);
+
+ if (done < total && host->offset) {
+ /* More blocks in this page */
+ if (host->offset + data->blksz > PAGE_SIZE)
+ /* We approached at a block, that spans 2 pages */
+ usdhi6_blk_bounce(host, host->sg);
+
+ return;
+ }
+
+ /* Finished current page or an SG segment */
+ usdhi6_sg_unmap(host, false);
+
+ if (done == total) {
+ /*
+ * End of an SG segment or the complete SG: jump to the next
+ * segment, we'll map it later in usdhi6_blk_read() or
+ * usdhi6_blk_write()
+ */
+ struct scatterlist *next = sg_next(host->sg);
+
+ host->page_idx = 0;
+
+ if (!next)
+ host->wait = USDHI6_WAIT_FOR_DATA_END;
+ host->sg = next;
+
+ if (WARN(next && sg_dma_len(next) % data->blksz,
+ "SG size %u isn't a multiple of block size %u\n",
+ sg_dma_len(next), data->blksz))
+ data->error = -EINVAL;
+
+ return;
+ }
+
+ /* We cannot get here after crossing a page border */
+
+ /* Next page in the same SG */
+ host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
+ host->pg.mapped = kmap(host->pg.page);
+ host->blk_page = host->pg.mapped;
+
+ dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
+ host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
+ host->mrq->cmd->opcode, host->mrq);
+}
+
+/* DMA handling */
+
+static void usdhi6_dma_release(struct usdhi6_host *host)
+{
+ host->dma_active = false;
+ if (host->chan_tx) {
+ struct dma_chan *chan = host->chan_tx;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ if (host->chan_rx) {
+ struct dma_chan *chan = host->chan_rx;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+}
+
+static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+
+ if (!host->dma_active)
+ return;
+
+ usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
+ host->dma_active = false;
+
+ if (data->flags & MMC_DATA_READ)
+ dma_unmap_sg(host->chan_rx->device->dev, data->sg,
+ data->sg_len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(host->chan_tx->device->dev, data->sg,
+ data->sg_len, DMA_TO_DEVICE);
+}
+
+static void usdhi6_dma_complete(void *arg)
+{
+ struct usdhi6_host *host = arg;
+ struct mmc_request *mrq = host->mrq;
+
+ if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
+ dev_name(mmc_dev(host->mmc)), mrq))
+ return;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
+ mrq->cmd->opcode);
+
+ usdhi6_dma_stop_unmap(host);
+ usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
+}
+
+static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
+ enum dma_transfer_direction dir)
+{
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg;
+ struct dma_async_tx_descriptor *desc = NULL;
+ dma_cookie_t cookie = -EINVAL;
+ enum dma_data_direction data_dir;
+ int ret;
+
+ switch (dir) {
+ case DMA_MEM_TO_DEV:
+ data_dir = DMA_TO_DEVICE;
+ break;
+ case DMA_DEV_TO_MEM:
+ data_dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
+ if (ret > 0) {
+ host->dma_active = true;
+ desc = dmaengine_prep_slave_sg(chan, sg, ret, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+
+ if (desc) {
+ desc->callback = usdhi6_dma_complete;
+ desc->callback_param = host;
+ cookie = dmaengine_submit(desc);
+ }
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
+ __func__, data->sg_len, ret, cookie, desc);
+
+ if (cookie < 0) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = cookie;
+ usdhi6_dma_release(host);
+ dev_warn(mmc_dev(host->mmc),
+ "DMA failed: %d, falling back to PIO\n", ret);
+ }
+
+ return cookie;
+}
+
+static int usdhi6_dma_start(struct usdhi6_host *host)
+{
+ if (!host->chan_rx || !host->chan_tx)
+ return -ENODEV;
+
+ if (host->mrq->data->flags & MMC_DATA_READ)
+ return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
+
+ return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
+}
+
+static void usdhi6_dma_kill(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
+ __func__, data->sg_len, data->blocks, data->blksz);
+ /* Abort DMA */
+ if (data->flags & MMC_DATA_READ)
+ dmaengine_terminate_all(host->chan_rx);
+ else
+ dmaengine_terminate_all(host->chan_tx);
+}
+
+static void usdhi6_dma_check_error(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
+ __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
+
+ if (host->io_error) {
+ data->error = usdhi6_error_code(host);
+ data->bytes_xfered = 0;
+ usdhi6_dma_kill(host);
+ usdhi6_dma_release(host);
+ dev_warn(mmc_dev(host->mmc),
+ "DMA failed: %d, falling back to PIO\n", data->error);
+ return;
+ }
+
+ /*
+ * The datasheet tells us to check a response from the card, whereas
+ * responses only come after the command phase, not after the data
+ * phase. Let's check anyway.
+ */
+ if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
+ dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
+}
+
+static void usdhi6_dma_kick(struct usdhi6_host *host)
+{
+ if (host->mrq->data->flags & MMC_DATA_READ)
+ dma_async_issue_pending(host->chan_rx);
+ else
+ dma_async_issue_pending(host->chan_tx);
+}
+
+static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
+{
+ struct dma_slave_config cfg = {
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ int ret;
+
+ host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+ dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
+ host->chan_tx);
+
+ if (!host->chan_tx)
+ return;
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = start + USDHI6_SD_BUF0;
+ cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(host->chan_tx, &cfg);
+ if (ret < 0)
+ goto e_release_tx;
+
+ host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+ dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
+ host->chan_rx);
+
+ if (!host->chan_rx)
+ goto e_release_tx;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.src_addr = cfg.dst_addr;
+ cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
+ cfg.dst_addr = 0;
+ ret = dmaengine_slave_config(host->chan_rx, &cfg);
+ if (ret < 0)
+ goto e_release_rx;
+
+ return;
+
+e_release_rx:
+ dma_release_channel(host->chan_rx);
+ host->chan_rx = NULL;
+e_release_tx:
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
+}
+
+/* API helpers */
+
+static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
+{
+ unsigned long rate = ios->clock;
+ u32 val;
+ unsigned int i;
+
+ for (i = 1000; i; i--) {
+ if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (!i) {
+ dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
+ return;
+ }
+
+ val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
+
+ if (rate) {
+ unsigned long new_rate;
+
+ if (host->imclk <= rate) {
+ if (ios->timing != MMC_TIMING_UHS_DDR50) {
+ /* Cannot have 1-to-1 clock in DDR mode */
+ new_rate = host->imclk;
+ val |= 0xff;
+ } else {
+ new_rate = host->imclk / 2;
+ }
+ } else {
+ unsigned long div =
+ roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
+ val |= div >> 2;
+ new_rate = host->imclk / div;
+ }
+
+ if (host->rate == new_rate)
+ return;
+
+ host->rate = new_rate;
+
+ dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
+ rate, (val & 0xff) << 2, new_rate);
+ }
+
+ /*
+ * if old or new rate is equal to input rate, have to switch the clock
+ * off before changing and on after
+ */
+ if (host->imclk == rate || host->imclk == host->rate || !rate)
+ usdhi6_write(host, USDHI6_SD_CLK_CTRL,
+ val & ~USDHI6_SD_CLK_CTRL_SCLKEN);
+
+ if (!rate) {
+ host->rate = 0;
+ return;
+ }
+
+ usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
+
+ if (host->imclk == rate || host->imclk == host->rate ||
+ !(val & USDHI6_SD_CLK_CTRL_SCLKEN))
+ usdhi6_write(host, USDHI6_SD_CLK_CTRL,
+ val | USDHI6_SD_CLK_CTRL_SCLKEN);
+}
+
+static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ /* Errors ignored... */
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->power_mode ? ios->vdd : 0);
+}
+
+static int usdhi6_reset(struct usdhi6_host *host)
+{
+ int i;
+
+ usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
+ cpu_relax();
+ usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
+ for (i = 1000; i; i--)
+ if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
+ break;
+
+ return i ? 0 : -ETIMEDOUT;
+}
+
+static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+ u32 option, mode;
+ int ret;
+
+ dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n",
+ ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ usdhi6_set_power(host, ios);
+ usdhi6_only_cd(host);
+ break;
+ case MMC_POWER_UP:
+ /*
+ * We only also touch USDHI6_SD_OPTION from .request(), which
+ * cannot race with MMC_POWER_UP
+ */
+ ret = usdhi6_reset(host);
+ if (ret < 0) {
+ dev_err(mmc_dev(mmc), "Cannot reset the interface!\n");
+ } else {
+ usdhi6_set_power(host, ios);
+ usdhi6_only_cd(host);
+ }
+ break;
+ case MMC_POWER_ON:
+ option = usdhi6_read(host, USDHI6_SD_OPTION);
+ /*
+ * The eMMC standard only allows 4 or 8 bits in the DDR mode,
+ * the same probably holds for SD cards. We check here anyway,
+ * since the datasheet explicitly requires 4 bits for DDR.
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_1) {
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
+ dev_err(mmc_dev(mmc),
+ "4 bits are required for DDR\n");
+ option |= USDHI6_SD_OPTION_WIDTH_1;
+ mode = 0;
+ } else {
+ option &= ~USDHI6_SD_OPTION_WIDTH_1;
+ mode = ios->timing == MMC_TIMING_UHS_DDR50;
+ }
+ usdhi6_write(host, USDHI6_SD_OPTION, option);
+ usdhi6_write(host, USDHI6_SDIF_MODE, mode);
+ break;
+ }
+
+ if (host->rate != ios->clock)
+ usdhi6_clk_set(host, ios);
+}
+
+/* This is data timeout. Response timeout is fixed to 640 clock cycles */
+static void usdhi6_timeout_set(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ u32 val;
+ unsigned long ticks;
+
+ if (!mrq->data)
+ ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
+ else
+ ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
+ mrq->data->timeout_clks;
+
+ if (!ticks || ticks > 1 << 27)
+ /* Max timeout */
+ val = 14;
+ else if (ticks < 1 << 13)
+ /* Min timeout */
+ val = 0;
+ else
+ val = order_base_2(ticks) - 13;
+
+ dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
+ mrq->data ? "data" : "cmd", ticks, host->rate);
+
+ /* Timeout Counter mask: 0xf0 */
+ usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
+ (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
+}
+
+static void usdhi6_request_done(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ if (WARN(host->pg.page || host->head_pg.page,
+ "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n",
+ host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
+ data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
+ data ? host->offset : 0, data ? data->blocks : 0,
+ data ? data->blksz : 0, data ? data->sg_len : 0))
+ usdhi6_sg_unmap(host, true);
+
+ if (mrq->cmd->error ||
+ (data && data->error) ||
+ (mrq->stop && mrq->stop->error))
+ dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
+ __func__, mrq->cmd->opcode, data ? data->blocks : 0,
+ data ? data->blksz : 0,
+ mrq->cmd->error,
+ data ? data->error : 1,
+ mrq->stop ? mrq->stop->error : 1);
+
+ /* Disable DMA */
+ usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
+ host->wait = USDHI6_WAIT_FOR_REQUEST;
+ host->mrq = NULL;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static int usdhi6_cmd_flags(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+ u16 opc = cmd->opcode;
+
+ if (host->app_cmd) {
+ host->app_cmd = false;
+ opc |= USDHI6_SD_CMD_APP;
+ }
+
+ if (mrq->data) {
+ opc |= USDHI6_SD_CMD_DATA;
+
+ if (mrq->data->flags & MMC_DATA_READ)
+ opc |= USDHI6_SD_CMD_READ;
+
+ if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+ (cmd->opcode == SD_IO_RW_EXTENDED &&
+ mrq->data->blocks > 1)) {
+ opc |= USDHI6_SD_CMD_MULTI;
+ if (!mrq->stop)
+ opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF;
+ }
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ opc |= USDHI6_SD_CMD_MODE_RSP_NONE;
+ break;
+ case MMC_RSP_R1:
+ opc |= USDHI6_SD_CMD_MODE_RSP_R1;
+ break;
+ case MMC_RSP_R1B:
+ opc |= USDHI6_SD_CMD_MODE_RSP_R1B;
+ break;
+ case MMC_RSP_R2:
+ opc |= USDHI6_SD_CMD_MODE_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ opc |= USDHI6_SD_CMD_MODE_RSP_R3;
+ break;
+ default:
+ dev_warn(mmc_dev(host->mmc),
+ "Unknown response type %d\n",
+ mmc_resp_type(cmd));
+ return -EINVAL;
+ }
+ }
+
+ return opc;
+}
+
+static int usdhi6_rq_start(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ int opc = usdhi6_cmd_flags(host);
+ int i;
+
+ if (opc < 0)
+ return opc;
+
+ for (i = 1000; i; i--) {
+ if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (!i) {
+ dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
+ return -EAGAIN;
+ }
+
+ if (data) {
+ bool use_dma;
+ int ret = 0;
+
+ host->page_idx = 0;
+
+ if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
+ switch (data->blksz) {
+ case 512:
+ break;
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ if (mrq->stop)
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) &&
+ data->blksz != 512) {
+ ret = -EINVAL;
+ }
+
+ if (ret < 0) {
+ dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
+ __func__, data->blocks, data->blksz);
+ return -EINVAL;
+ }
+
+ if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+ (cmd->opcode == SD_IO_RW_EXTENDED &&
+ data->blocks > 1))
+ usdhi6_sg_prep(host);
+
+ usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
+
+ if ((data->blksz >= USDHI6_MIN_DMA ||
+ data->blocks > 1) &&
+ (data->blksz % 4 ||
+ data->sg->offset % 4))
+ dev_dbg(mmc_dev(host->mmc),
+ "Bad SG of %u: %ux%u @ %u\n", data->sg_len,
+ data->blksz, data->blocks, data->sg->offset);
+
+ /* Enable DMA for USDHI6_MIN_DMA bytes or more */
+ use_dma = data->blksz >= USDHI6_MIN_DMA &&
+ !(data->blksz % 4) &&
+ usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
+
+ if (use_dma)
+ usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
+
+ dev_dbg(mmc_dev(host->mmc),
+ "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n",
+ __func__, cmd->opcode, data->blocks, data->blksz,
+ data->sg_len, use_dma ? "DMA" : "PIO",
+ data->flags & MMC_DATA_READ ? "read" : "write",
+ data->sg->offset, mrq->stop ? " + stop" : "");
+ } else {
+ dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
+ __func__, cmd->opcode);
+ }
+
+ /* We have to get a command completion interrupt with DMA too */
+ usdhi6_wait_for_resp(host);
+
+ host->wait = USDHI6_WAIT_FOR_CMD;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+
+ /* SEC bit is required to enable block counting by the core */
+ usdhi6_write(host, USDHI6_SD_STOP,
+ data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
+ usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
+
+ /* Kick command execution */
+ usdhi6_write(host, USDHI6_SD_CMD, opc);
+
+ return 0;
+}
+
+static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+ int ret;
+
+ cancel_delayed_work_sync(&host->timeout_work);
+
+ host->mrq = mrq;
+ host->sg = NULL;
+
+ usdhi6_timeout_set(host);
+ ret = usdhi6_rq_start(host);
+ if (ret < 0) {
+ mrq->cmd->error = ret;
+ usdhi6_request_done(host);
+ }
+}
+
+static int usdhi6_get_cd(struct mmc_host *mmc)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+ /* Read is atomic, no need to lock */
+ u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
+
+/*
+ * level status.CD CD_ACTIVE_HIGH card present
+ * 1 0 0 0
+ * 1 0 1 1
+ * 0 1 0 1
+ * 0 1 1 0
+ */
+ return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
+}
+
+static int usdhi6_get_ro(struct mmc_host *mmc)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+ /* No locking as above */
+ u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
+
+/*
+ * level status.WP RO_ACTIVE_HIGH card read-only
+ * 1 0 0 0
+ * 1 0 1 1
+ * 0 1 0 1
+ * 0 1 1 0
+ */
+ return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+}
+
+static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+
+ dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis");
+
+ if (enable) {
+ host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
+ usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
+ usdhi6_write(host, USDHI6_SDIO_MODE, 1);
+ } else {
+ usdhi6_write(host, USDHI6_SDIO_MODE, 0);
+ usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
+ host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
+ }
+}
+
+static struct mmc_host_ops usdhi6_ops = {
+ .request = usdhi6_request,
+ .set_ios = usdhi6_set_ios,
+ .get_cd = usdhi6_get_cd,
+ .get_ro = usdhi6_get_ro,
+ .enable_sdio_irq = usdhi6_enable_sdio_irq,
+};
+
+/* State machine handlers */
+
+static void usdhi6_resp_cmd12(struct usdhi6_host *host)
+{
+ struct mmc_command *cmd = host->mrq->stop;
+ cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
+}
+
+static void usdhi6_resp_read(struct usdhi6_host *host)
+{
+ struct mmc_command *cmd = host->mrq->cmd;
+ u32 *rsp = cmd->resp, tmp = 0;
+ int i;
+
+/*
+ * RSP10 39-8
+ * RSP32 71-40
+ * RSP54 103-72
+ * RSP76 127-104
+ * R2-type response:
+ * resp[0] = r[127..96]
+ * resp[1] = r[95..64]
+ * resp[2] = r[63..32]
+ * resp[3] = r[31..0]
+ * Other responses:
+ * resp[0] = r[39..8]
+ */
+
+ if (mmc_resp_type(cmd) == MMC_RSP_NONE)
+ return;
+
+ if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
+ dev_err(mmc_dev(host->mmc),
+ "CMD%d: response expected but is missing!\n", cmd->opcode);
+ return;
+ }
+
+ if (mmc_resp_type(cmd) & MMC_RSP_136)
+ for (i = 0; i < 4; i++) {
+ if (i)
+ rsp[3 - i] = tmp >> 24;
+ tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
+ rsp[3 - i] |= tmp << 8;
+ }
+ else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
+ /* Read RSP54 to avoid conflict with auto CMD12 */
+ rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
+ else
+ rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
+
+ dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
+}
+
+static int usdhi6_blk_read(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p;
+ int i, rest;
+
+ if (host->io_error) {
+ data->error = usdhi6_error_code(host);
+ goto error;
+ }
+
+ if (host->pg.page) {
+ p = host->blk_page + host->offset;
+ } else {
+ p = usdhi6_sg_map(host);
+ if (!p) {
+ data->error = -ENOMEM;
+ goto error;
+ }
+ }
+
+ for (i = 0; i < data->blksz / 4; i++, p++)
+ *p = usdhi6_read(host, USDHI6_SD_BUF0);
+
+ rest = data->blksz % 4;
+ for (i = 0; i < (rest + 1) / 2; i++) {
+ u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
+ ((u8 *)p)[2 * i] = ((u8 *)&d)[0];
+ if (rest > 1 && !i)
+ ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1];
+ }
+
+ return 0;
+
+error:
+ dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
+ host->wait = USDHI6_WAIT_FOR_REQUEST;
+ return data->error;
+}
+
+static int usdhi6_blk_write(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p;
+ int i, rest;
+
+ if (host->io_error) {
+ data->error = usdhi6_error_code(host);
+ goto error;
+ }
+
+ if (host->pg.page) {
+ p = host->blk_page + host->offset;
+ } else {
+ p = usdhi6_sg_map(host);
+ if (!p) {
+ data->error = -ENOMEM;
+ goto error;
+ }
+ }
+
+ for (i = 0; i < data->blksz / 4; i++, p++)
+ usdhi6_write(host, USDHI6_SD_BUF0, *p);
+
+ rest = data->blksz % 4;
+ for (i = 0; i < (rest + 1) / 2; i++) {
+ u16 d;
+ ((u8 *)&d)[0] = ((u8 *)p)[2 * i];
+ if (rest > 1 && !i)
+ ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1];
+ else
+ ((u8 *)&d)[1] = 0;
+ usdhi6_write16(host, USDHI6_SD_BUF0, d);
+ }
+
+ return 0;
+
+error:
+ dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
+ host->wait = USDHI6_WAIT_FOR_REQUEST;
+ return data->error;
+}
+
+static int usdhi6_stop_cmd(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+
+ switch (mrq->cmd->opcode) {
+ case MMC_READ_MULTIPLE_BLOCK:
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) {
+ host->wait = USDHI6_WAIT_FOR_STOP;
+ return 0;
+ }
+ /* Unsupported STOP command */
+ default:
+ dev_err(mmc_dev(host->mmc),
+ "unsupported stop CMD%d for CMD%d\n",
+ mrq->stop->opcode, mrq->cmd->opcode);
+ mrq->stop->error = -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool usdhi6_end_cmd(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+
+ if (host->io_error) {
+ cmd->error = usdhi6_error_code(host);
+ return false;
+ }
+
+ usdhi6_resp_read(host);
+
+ if (!mrq->data)
+ return false;
+
+ if (host->dma_active) {
+ usdhi6_dma_kick(host);
+ if (!mrq->stop)
+ host->wait = USDHI6_WAIT_FOR_DMA;
+ else if (usdhi6_stop_cmd(host) < 0)
+ return false;
+ } else if (mrq->data->flags & MMC_DATA_READ) {
+ if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ (cmd->opcode == SD_IO_RW_EXTENDED &&
+ mrq->data->blocks > 1))
+ host->wait = USDHI6_WAIT_FOR_MREAD;
+ else
+ host->wait = USDHI6_WAIT_FOR_READ;
+ } else {
+ if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+ (cmd->opcode == SD_IO_RW_EXTENDED &&
+ mrq->data->blocks > 1))
+ host->wait = USDHI6_WAIT_FOR_MWRITE;
+ else
+ host->wait = USDHI6_WAIT_FOR_WRITE;
+ }
+
+ return true;
+}
+
+static bool usdhi6_read_block(struct usdhi6_host *host)
+{
+ /* ACCESS_END IRQ is already unmasked */
+ int ret = usdhi6_blk_read(host);
+
+ /*
+ * Have to force unmapping both pages: the single block could have been
+ * cross-page, in which case for single-block IO host->page_idx == 0.
+ * So, if we don't force, the second page won't be unmapped.
+ */
+ usdhi6_sg_unmap(host, true);
+
+ if (ret < 0)
+ return false;
+
+ host->wait = USDHI6_WAIT_FOR_DATA_END;
+ return true;
+}
+
+static bool usdhi6_mread_block(struct usdhi6_host *host)
+{
+ int ret = usdhi6_blk_read(host);
+
+ if (ret < 0)
+ return false;
+
+ usdhi6_sg_advance(host);
+
+ return !host->mrq->data->error &&
+ (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
+}
+
+static bool usdhi6_write_block(struct usdhi6_host *host)
+{
+ int ret = usdhi6_blk_write(host);
+
+ /* See comment in usdhi6_read_block() */
+ usdhi6_sg_unmap(host, true);
+
+ if (ret < 0)
+ return false;
+
+ host->wait = USDHI6_WAIT_FOR_DATA_END;
+ return true;
+}
+
+static bool usdhi6_mwrite_block(struct usdhi6_host *host)
+{
+ int ret = usdhi6_blk_write(host);
+
+ if (ret < 0)
+ return false;
+
+ usdhi6_sg_advance(host);
+
+ return !host->mrq->data->error &&
+ (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
+}
+
+/* Interrupt & timeout handlers */
+
+static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id)
+{
+ struct usdhi6_host *host = dev_id;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ bool io_wait = false;
+
+ cancel_delayed_work_sync(&host->timeout_work);
+
+ mrq = host->mrq;
+ if (!mrq)
+ return IRQ_HANDLED;
+
+ cmd = mrq->cmd;
+ data = mrq->data;
+
+ switch (host->wait) {
+ case USDHI6_WAIT_FOR_REQUEST:
+ /* We're too late, the timeout has already kicked in */
+ return IRQ_HANDLED;
+ case USDHI6_WAIT_FOR_CMD:
+ /* Wait for data? */
+ io_wait = usdhi6_end_cmd(host);
+ break;
+ case USDHI6_WAIT_FOR_MREAD:
+ /* Wait for more data? */
+ io_wait = usdhi6_mread_block(host);
+ break;
+ case USDHI6_WAIT_FOR_READ:
+ /* Wait for data end? */
+ io_wait = usdhi6_read_block(host);
+ break;
+ case USDHI6_WAIT_FOR_MWRITE:
+ /* Wait data to write? */
+ io_wait = usdhi6_mwrite_block(host);
+ break;
+ case USDHI6_WAIT_FOR_WRITE:
+ /* Wait for data end? */
+ io_wait = usdhi6_write_block(host);
+ break;
+ case USDHI6_WAIT_FOR_DMA:
+ usdhi6_dma_check_error(host);
+ break;
+ case USDHI6_WAIT_FOR_STOP:
+ usdhi6_write(host, USDHI6_SD_STOP, 0);
+ if (host->io_error) {
+ int ret = usdhi6_error_code(host);
+ if (mrq->stop)
+ mrq->stop->error = ret;
+ else
+ mrq->data->error = ret;
+ dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
+ break;
+ }
+ usdhi6_resp_cmd12(host);
+ mrq->stop->error = 0;
+ break;
+ case USDHI6_WAIT_FOR_DATA_END:
+ if (host->io_error) {
+ mrq->data->error = usdhi6_error_code(host);
+ dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
+ mrq->data->error);
+ }
+ break;
+ default:
+ cmd->error = -EFAULT;
+ dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
+ usdhi6_request_done(host);
+ return IRQ_HANDLED;
+ }
+
+ if (io_wait) {
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ /* Wait for more data or ACCESS_END */
+ if (!host->dma_active)
+ usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
+ return IRQ_HANDLED;
+ }
+
+ if (!cmd->error) {
+ if (data) {
+ if (!data->error) {
+ if (host->wait != USDHI6_WAIT_FOR_STOP &&
+ host->mrq->stop &&
+ !host->mrq->stop->error &&
+ !usdhi6_stop_cmd(host)) {
+ /* Sending STOP */
+ usdhi6_wait_for_resp(host);
+
+ schedule_delayed_work(&host->timeout_work,
+ host->timeout);
+
+ return IRQ_HANDLED;
+ }
+
+ data->bytes_xfered = data->blocks * data->blksz;
+ } else {
+ /* Data error: might need to unmap the last page */
+ dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
+ __func__, data->error);
+ usdhi6_sg_unmap(host, true);
+ }
+ } else if (cmd->opcode == MMC_APP_CMD) {
+ host->app_cmd = true;
+ }
+ }
+
+ usdhi6_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t usdhi6_sd(int irq, void *dev_id)
+{
+ struct usdhi6_host *host = dev_id;
+ u16 status, status2, error;
+
+ status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
+ ~USDHI6_SD_INFO1_CARD;
+ status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
+
+ usdhi6_only_cd(host);
+
+ dev_dbg(mmc_dev(host->mmc),
+ "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2);
+
+ if (!status && !status2)
+ return IRQ_NONE;
+
+ error = status2 & USDHI6_SD_INFO2_ERR;
+
+ /* Ack / clear interrupts */
+ if (USDHI6_SD_INFO1_IRQ & status)
+ usdhi6_write(host, USDHI6_SD_INFO1,
+ 0xffff & ~(USDHI6_SD_INFO1_IRQ & status));
+
+ if (USDHI6_SD_INFO2_IRQ & status2) {
+ if (error)
+ /* In error cases BWE and BRE aren't cleared automatically */
+ status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE;
+
+ usdhi6_write(host, USDHI6_SD_INFO2,
+ 0xffff & ~(USDHI6_SD_INFO2_IRQ & status2));
+ }
+
+ host->io_error = error;
+ host->irq_status = status;
+
+ if (error) {
+ /* Don't pollute the log with unsupported command timeouts */
+ if (host->wait != USDHI6_WAIT_FOR_CMD ||
+ error != USDHI6_SD_INFO2_RSP_TOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "%s(): INFO2 error bits 0x%08x\n",
+ __func__, error);
+ else
+ dev_dbg(mmc_dev(host->mmc),
+ "%s(): INFO2 error bits 0x%08x\n",
+ __func__, error);
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t usdhi6_sdio(int irq, void *dev_id)
+{
+ struct usdhi6_host *host = dev_id;
+ u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
+
+ mmc_signal_sdio_irq(host->mmc);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t usdhi6_cd(int irq, void *dev_id)
+{
+ struct usdhi6_host *host = dev_id;
+ struct mmc_host *mmc = host->mmc;
+ u16 status;
+
+ /* We're only interested in hotplug events here */
+ status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
+ USDHI6_SD_INFO1_CARD;
+
+ if (!status)
+ return IRQ_NONE;
+
+ /* Ack */
+ usdhi6_write(host, USDHI6_SD_INFO1, !status);
+
+ if (!work_pending(&mmc->detect.work) &&
+ (((status & USDHI6_SD_INFO1_CARD_INSERT) &&
+ !mmc->card) ||
+ ((status & USDHI6_SD_INFO1_CARD_EJECT) &&
+ mmc->card)))
+ mmc_detect_change(mmc, msecs_to_jiffies(100));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Actually this should not be needed, if the built-in timeout works reliably in
+ * the both PIO cases and DMA never fails. But if DMA does fail, a timeout
+ * handler might be the only way to catch the error.
+ */
+static void usdhi6_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *d = container_of(work, struct delayed_work, work);
+ struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq ? mrq->data : NULL;
+
+ dev_warn(mmc_dev(host->mmc),
+ "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
+ host->dma_active ? "DMA" : "PIO",
+ host->wait, mrq ? mrq->cmd->opcode : -1,
+ usdhi6_read(host, USDHI6_SD_INFO1),
+ usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
+
+ if (host->dma_active) {
+ usdhi6_dma_kill(host);
+ usdhi6_dma_stop_unmap(host);
+ }
+
+ switch (host->wait) {
+ default:
+ dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
+ /* mrq can be NULL in this actually impossible case */
+ case USDHI6_WAIT_FOR_CMD:
+ usdhi6_error_code(host);
+ if (mrq)
+ mrq->cmd->error = -ETIMEDOUT;
+ break;
+ case USDHI6_WAIT_FOR_STOP:
+ usdhi6_error_code(host);
+ mrq->stop->error = -ETIMEDOUT;
+ break;
+ case USDHI6_WAIT_FOR_DMA:
+ case USDHI6_WAIT_FOR_MREAD:
+ case USDHI6_WAIT_FOR_MWRITE:
+ case USDHI6_WAIT_FOR_READ:
+ case USDHI6_WAIT_FOR_WRITE:
+ dev_dbg(mmc_dev(host->mmc),
+ "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
+ data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
+ host->offset, data->blocks, data->blksz, data->sg_len,
+ sg_dma_len(host->sg), host->sg->offset);
+ usdhi6_sg_unmap(host, true);
+ /*
+ * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped
+ * the page
+ */
+ case USDHI6_WAIT_FOR_DATA_END:
+ usdhi6_error_code(host);
+ data->error = -ETIMEDOUT;
+ }
+
+ if (mrq)
+ usdhi6_request_done(host);
+}
+
+/* Probe / release */
+
+static const struct of_device_id usdhi6_of_match[] = {
+ {.compatible = "renesas,usdhi6rol0"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, usdhi6_of_match);
+
+static int usdhi6_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mmc_host *mmc;
+ struct usdhi6_host *host;
+ struct resource *res;
+ int irq_cd, irq_sd, irq_sdio;
+ u32 version;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ irq_cd = platform_get_irq_byname(pdev, "card detect");
+ irq_sd = platform_get_irq_byname(pdev, "data");
+ irq_sdio = platform_get_irq_byname(pdev, "SDIO");
+ if (irq_sd < 0 || irq_sdio < 0)
+ return -ENODEV;
+
+ mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ ret = mmc_of_parse(mmc);
+ if (ret < 0)
+ goto e_free_mmc;
+
+ mmc_regulator_get_supply(mmc);
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->wait = USDHI6_WAIT_FOR_REQUEST;
+ host->timeout = msecs_to_jiffies(4000);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto e_free_mmc;
+ }
+
+ host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(host->clk))
+ goto e_free_mmc;
+
+ host->imclk = clk_get_rate(host->clk);
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret < 0)
+ goto e_free_mmc;
+
+ version = usdhi6_read(host, USDHI6_VERSION);
+ if ((version & 0xfff) != 0xa0d) {
+ dev_err(dev, "Version not recognized %x\n", version);
+ goto e_clk_off;
+ }
+
+ dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
+ usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
+
+ usdhi6_mask_all(host);
+
+ if (irq_cd >= 0) {
+ ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0,
+ dev_name(dev), host);
+ if (ret < 0)
+ goto e_clk_off;
+ } else {
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0,
+ dev_name(dev), host);
+ if (ret < 0)
+ goto e_clk_off;
+
+ ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0,
+ dev_name(dev), host);
+ if (ret < 0)
+ goto e_clk_off;
+
+ INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
+
+ usdhi6_dma_request(host, res->start);
+
+ mmc->ops = &usdhi6_ops;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_SDIO_IRQ;
+ /* Set .max_segs to some random number. Feel free to adjust. */
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 512;
+ mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+ mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
+ /*
+ * Setting .max_seg_size to 1 page would simplify our page-mapping code,
+ * But OTOH, having large segments makes DMA more efficient. We could
+ * check, whether we managed to get DMA and fall back to 1 page
+ * segments, but if we do manage to obtain DMA and then it fails at
+ * run-time and we fall back to PIO, we will continue getting large
+ * segments. So, we wouldn't be able to get rid of the code anyway.
+ */
+ mmc->max_seg_size = mmc->max_req_size;
+ if (!mmc->f_max)
+ mmc->f_max = host->imclk;
+ mmc->f_min = host->imclk / 512;
+
+ platform_set_drvdata(pdev, host);
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto e_clk_off;
+
+ return 0;
+
+e_clk_off:
+ clk_disable_unprepare(host->clk);
+e_free_mmc:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int usdhi6_remove(struct platform_device *pdev)
+{
+ struct usdhi6_host *host = platform_get_drvdata(pdev);
+
+ mmc_remove_host(host->mmc);
+
+ usdhi6_mask_all(host);
+ cancel_delayed_work_sync(&host->timeout_work);
+ usdhi6_dma_release(host);
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+static struct platform_driver usdhi6_driver = {
+ .probe = usdhi6_probe,
+ .remove = usdhi6_remove,
+ .driver = {
+ .name = "usdhi6rol0",
+ .owner = THIS_MODULE,
+ .of_match_table = usdhi6_of_match,
+ },
+};
+
+module_platform_driver(usdhi6_driver);
+
+MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:usdhi6rol0");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 498d1f799085..282891a8e451 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -840,7 +840,7 @@ static int wmt_mci_probe(struct platform_device *pdev)
priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
mmc->max_blk_count * 16,
&priv->dma_desc_device_addr,
- 208);
+ GFP_KERNEL);
if (!priv->dma_desc_buffer) {
dev_err(&pdev->dev, "DMA alloc fail\n");
ret = -EPERM;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5d49a2129618..94b821042d9d 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -321,6 +321,8 @@ source "drivers/mtd/onenand/Kconfig"
source "drivers/mtd/lpddr/Kconfig"
+source "drivers/mtd/spi-nor/Kconfig"
+
source "drivers/mtd/ubi/Kconfig"
endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4cfb31e6c966..99bb9a1f6e16 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -32,4 +32,5 @@ inftl-objs := inftlcore.o inftlmount.o
obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
+obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
obj-$(CONFIG_MTD_UBI) += ubi/
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index e4696b37f3de..9f02c28c0204 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -169,33 +169,33 @@ config MTD_OTP
in the programming of OTP bits will waste them.
config MTD_CFI_INTELEXT
- tristate "Support for Intel/Sharp flash chips"
+ tristate "Support for CFI command set 0001 (Intel/Sharp chips)"
depends on MTD_GEN_PROBE
select MTD_CFI_UTIL
help
The Common Flash Interface defines a number of different command
sets which a CFI-compliant chip may claim to implement. This code
- provides support for one of those command sets, used on Intel
- StrataFlash and other parts.
+ provides support for command set 0001, used on Intel StrataFlash
+ and other parts.
config MTD_CFI_AMDSTD
- tristate "Support for AMD/Fujitsu/Spansion flash chips"
+ tristate "Support for CFI command set 0002 (AMD/Fujitsu/Spansion chips)"
depends on MTD_GEN_PROBE
select MTD_CFI_UTIL
help
The Common Flash Interface defines a number of different command
sets which a CFI-compliant chip may claim to implement. This code
- provides support for one of those command sets, used on chips
- including the AMD Am29LV320.
+ provides support for command set 0002, used on chips including
+ the AMD Am29LV320.
config MTD_CFI_STAA
- tristate "Support for ST (Advanced Architecture) flash chips"
+ tristate "Support for CFI command set 0020 (ST (Advanced Architecture) chips)"
depends on MTD_GEN_PROBE
select MTD_CFI_UTIL
help
The Common Flash Interface defines a number of different command
sets which a CFI-compliant chip may claim to implement. This code
- provides support for one of those command sets.
+ provides support for command set 0020.
config MTD_CFI_UTIL
tristate
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 6293855fb5ee..423666b51efb 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -961,7 +961,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
chipnum++;
if (chipnum >= cfi->numchips)
- break;
+ break;
}
}
@@ -1170,7 +1170,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
chipnum++;
if (chipnum >= cfi->numchips)
- break;
+ break;
}
}
return 0;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 08049f6eea60..09c79bd0b4f4 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -239,7 +239,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
chipnum++;
if (chipnum >= cfi->numchips)
- break;
+ break;
}
}
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 1210bc2923b7..c49d0b127fef 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -80,7 +80,7 @@ config MTD_DATAFLASH_OTP
config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
- depends on SPI_MASTER
+ depends on SPI_MASTER && MTD_SPI_NOR
help
This enables access to most modern SPI flash chips, used for
program and data storage. Series supported include Atmel AT26DF,
@@ -212,7 +212,7 @@ config MTD_DOCG3
config MTD_ST_SPI_FSM
tristate "ST Microelectronics SPI FSM Serial Flash Controller"
- depends on ARM || SH
+ depends on ARCH_STI
help
This provides an MTD device driver for the ST Microelectronics
SPI Fast Sequence Mode (FSM) Serial Flash Controller and support
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index dd5e1018d37b..91a169c44b39 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1608,8 +1608,8 @@ static ssize_t dps1_insert_key(struct device *dev,
#define FLOOR_SYSFS(id) { \
__ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
__ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
- __ATTR(f##id##_dps0_protection_key, S_IWUGO, NULL, dps0_insert_key), \
- __ATTR(f##id##_dps1_protection_key, S_IWUGO, NULL, dps1_insert_key), \
+ __ATTR(f##id##_dps0_protection_key, S_IWUSR|S_IWGRP, NULL, dps0_insert_key), \
+ __ATTR(f##id##_dps1_protection_key, S_IWUSR|S_IWGRP, NULL, dps1_insert_key), \
}
static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index 1fd4a0f77967..7df86948e6d4 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -213,6 +213,28 @@ static void elm_load_syndrome(struct elm_info *info,
val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
elm_write_reg(info, offset, val);
break;
+ case BCH16_ECC:
+ val = cpu_to_be32(*(u32 *) &ecc[22]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[18]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[14]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[10]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[6]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[2]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
+ elm_write_reg(info, offset, val);
+ break;
default:
pr_err("invalid config bch_type\n");
}
@@ -418,6 +440,7 @@ static int elm_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
/**
* elm_context_save
* saves ELM configurations to preserve them across Hardware powered-down
@@ -435,6 +458,13 @@ static int elm_context_save(struct elm_info *info)
for (i = 0; i < ERROR_VECTOR_MAX; i++) {
offset = i * SYNDROME_FRAGMENT_REG_SIZE;
switch (bch_type) {
+ case BCH16_ECC:
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_5 + offset);
+ regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_4 + offset);
case BCH8_ECC:
regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_3 + offset);
@@ -473,6 +503,13 @@ static int elm_context_restore(struct elm_info *info)
for (i = 0; i < ERROR_VECTOR_MAX; i++) {
offset = i * SYNDROME_FRAGMENT_REG_SIZE;
switch (bch_type) {
+ case BCH16_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
+ regs->elm_syndrome_fragment_5[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
+ regs->elm_syndrome_fragment_4[i]);
case BCH8_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
regs->elm_syndrome_fragment_3[i]);
@@ -509,6 +546,7 @@ static int elm_resume(struct device *dev)
elm_context_restore(info);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 524dab3ac938..ed7e0a1bed3c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -19,485 +19,98 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/math64.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/mod_devicetable.h>
-#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_platform.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
-/* Flash opcodes. */
-#define OPCODE_WREN 0x06 /* Write enable */
-#define OPCODE_RDSR 0x05 /* Read status register */
-#define OPCODE_WRSR 0x01 /* Write status register 1 byte */
-#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
-#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
-#define OPCODE_DUAL_READ 0x3b /* Read data bytes (Dual SPI) */
-#define OPCODE_QUAD_READ 0x6b /* Read data bytes (Quad SPI) */
-#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
-#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
-#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
-#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
-#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
-#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
-#define OPCODE_RDID 0x9f /* Read JEDEC ID */
-#define OPCODE_RDCR 0x35 /* Read configuration register */
-
-/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
-#define OPCODE_NORM_READ_4B 0x13 /* Read data bytes (low frequency) */
-#define OPCODE_FAST_READ_4B 0x0c /* Read data bytes (high frequency) */
-#define OPCODE_DUAL_READ_4B 0x3c /* Read data bytes (Dual SPI) */
-#define OPCODE_QUAD_READ_4B 0x6c /* Read data bytes (Quad SPI) */
-#define OPCODE_PP_4B 0x12 /* Page program (up to 256 bytes) */
-#define OPCODE_SE_4B 0xdc /* Sector erase (usually 64KiB) */
-
-/* Used for SST flashes only. */
-#define OPCODE_BP 0x02 /* Byte program */
-#define OPCODE_WRDI 0x04 /* Write disable */
-#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
-
-/* Used for Macronix and Winbond flashes. */
-#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
-#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
-
-/* Used for Spansion flashes only. */
-#define OPCODE_BRWR 0x17 /* Bank register write */
-
-/* Status Register bits. */
-#define SR_WIP 1 /* Write in progress */
-#define SR_WEL 2 /* Write enable latch */
-/* meaning of other SR_* bits may differ between vendors */
-#define SR_BP0 4 /* Block protect 0 */
-#define SR_BP1 8 /* Block protect 1 */
-#define SR_BP2 0x10 /* Block protect 2 */
-#define SR_SRWD 0x80 /* SR write protect */
-
-#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */
-
-/* Configuration Register bits. */
-#define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */
-
-/* Define max times to check status register before we give up. */
-#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define MAX_CMD_SIZE 6
-
-#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
-
-/****************************************************************************/
-
-enum read_type {
- M25P80_NORMAL = 0,
- M25P80_FAST,
- M25P80_DUAL,
- M25P80_QUAD,
-};
-
struct m25p {
struct spi_device *spi;
- struct mutex lock;
+ struct spi_nor spi_nor;
struct mtd_info mtd;
- u16 page_size;
- u16 addr_width;
- u8 erase_opcode;
- u8 read_opcode;
- u8 program_opcode;
- u8 *command;
- enum read_type flash_read;
+ u8 command[MAX_CMD_SIZE];
};
-static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
-{
- return container_of(mtd, struct m25p, mtd);
-}
-
-/****************************************************************************/
-
-/*
- * Internal helper functions
- */
-
-/*
- * Read the status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
- */
-static int read_sr(struct m25p *flash)
-{
- ssize_t retval;
- u8 code = OPCODE_RDSR;
- u8 val;
-
- retval = spi_write_then_read(flash->spi, &code, 1, &val, 1);
-
- if (retval < 0) {
- dev_err(&flash->spi->dev, "error %d reading SR\n",
- (int) retval);
- return retval;
- }
-
- return val;
-}
-
-/*
- * Read configuration register, returning its value in the
- * location. Return the configuration register value.
- * Returns negative if error occured.
- */
-static int read_cr(struct m25p *flash)
-{
- u8 code = OPCODE_RDCR;
- int ret;
- u8 val;
-
- ret = spi_write_then_read(flash->spi, &code, 1, &val, 1);
- if (ret < 0) {
- dev_err(&flash->spi->dev, "error %d reading CR\n", ret);
- return ret;
- }
-
- return val;
-}
-
-/*
- * Write status register 1 byte
- * Returns negative if error occurred.
- */
-static int write_sr(struct m25p *flash, u8 val)
-{
- flash->command[0] = OPCODE_WRSR;
- flash->command[1] = val;
-
- return spi_write(flash->spi, flash->command, 2);
-}
-
-/*
- * Set write enable latch with Write Enable command.
- * Returns negative if error occurred.
- */
-static inline int write_enable(struct m25p *flash)
-{
- u8 code = OPCODE_WREN;
-
- return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
-}
-
-/*
- * Send write disble instruction to the chip.
- */
-static inline int write_disable(struct m25p *flash)
-{
- u8 code = OPCODE_WRDI;
-
- return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
-}
-
-/*
- * Enable/disable 4-byte addressing mode.
- */
-static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
-{
- int status;
- bool need_wren = false;
-
- switch (JEDEC_MFR(jedec_id)) {
- case CFI_MFR_ST: /* Micron, actually */
- /* Some Micron need WREN command; all will accept it */
- need_wren = true;
- case CFI_MFR_MACRONIX:
- case 0xEF /* winbond */:
- if (need_wren)
- write_enable(flash);
-
- flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
- status = spi_write(flash->spi, flash->command, 1);
-
- if (need_wren)
- write_disable(flash);
-
- return status;
- default:
- /* Spansion style */
- flash->command[0] = OPCODE_BRWR;
- flash->command[1] = enable << 7;
- return spi_write(flash->spi, flash->command, 2);
- }
-}
-
-/*
- * Service routine to read status register until ready, or timeout occurs.
- * Returns non-zero if error.
- */
-static int wait_till_ready(struct m25p *flash)
-{
- unsigned long deadline;
- int sr;
-
- deadline = jiffies + MAX_READY_WAIT_JIFFIES;
-
- do {
- if ((sr = read_sr(flash)) < 0)
- break;
- else if (!(sr & SR_WIP))
- return 0;
-
- cond_resched();
-
- } while (!time_after_eq(jiffies, deadline));
-
- return 1;
-}
-
-/*
- * Write status Register and configuration register with 2 bytes
- * The first byte will be written to the status register, while the
- * second byte will be written to the configuration register.
- * Return negative if error occured.
- */
-static int write_sr_cr(struct m25p *flash, u16 val)
-{
- flash->command[0] = OPCODE_WRSR;
- flash->command[1] = val & 0xff;
- flash->command[2] = (val >> 8);
-
- return spi_write(flash->spi, flash->command, 3);
-}
-
-static int macronix_quad_enable(struct m25p *flash)
-{
- int ret, val;
- u8 cmd[2];
- cmd[0] = OPCODE_WRSR;
-
- val = read_sr(flash);
- cmd[1] = val | SR_QUAD_EN_MX;
- write_enable(flash);
-
- spi_write(flash->spi, &cmd, 2);
-
- if (wait_till_ready(flash))
- return 1;
-
- ret = read_sr(flash);
- if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
- dev_err(&flash->spi->dev, "Macronix Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int spansion_quad_enable(struct m25p *flash)
+static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
{
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
int ret;
- int quad_en = CR_QUAD_EN_SPAN << 8;
-
- write_enable(flash);
- ret = write_sr_cr(flash, quad_en);
- if (ret < 0) {
- dev_err(&flash->spi->dev,
- "error while writing configuration register\n");
- return -EINVAL;
- }
-
- /* read back and check it */
- ret = read_cr(flash);
- if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
- dev_err(&flash->spi->dev, "Spansion Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int set_quad_mode(struct m25p *flash, u32 jedec_id)
-{
- int status;
-
- switch (JEDEC_MFR(jedec_id)) {
- case CFI_MFR_MACRONIX:
- status = macronix_quad_enable(flash);
- if (status) {
- dev_err(&flash->spi->dev,
- "Macronix quad-read not enabled\n");
- return -EINVAL;
- }
- return status;
- default:
- status = spansion_quad_enable(flash);
- if (status) {
- dev_err(&flash->spi->dev,
- "Spansion quad-read not enabled\n");
- return -EINVAL;
- }
- return status;
- }
-}
-
-/*
- * Erase the whole flash memory
- *
- * Returns 0 if successful, non-zero otherwise.
- */
-static int erase_chip(struct m25p *flash)
-{
- pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__,
- (long long)(flash->mtd.size >> 10));
+ ret = spi_write_then_read(spi, &code, 1, val, len);
+ if (ret < 0)
+ dev_err(&spi->dev, "error %d reading %x\n", ret, code);
- /* Wait until finished previous write command. */
- if (wait_till_ready(flash))
- return 1;
-
- /* Send write enable, then erase commands. */
- write_enable(flash);
-
- /* Set up command buffer. */
- flash->command[0] = OPCODE_CHIP_ERASE;
-
- spi_write(flash->spi, flash->command, 1);
-
- return 0;
+ return ret;
}
-static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
+static void m25p_addr2cmd(struct spi_nor *nor, unsigned int addr, u8 *cmd)
{
/* opcode is in cmd[0] */
- cmd[1] = addr >> (flash->addr_width * 8 - 8);
- cmd[2] = addr >> (flash->addr_width * 8 - 16);
- cmd[3] = addr >> (flash->addr_width * 8 - 24);
- cmd[4] = addr >> (flash->addr_width * 8 - 32);
+ cmd[1] = addr >> (nor->addr_width * 8 - 8);
+ cmd[2] = addr >> (nor->addr_width * 8 - 16);
+ cmd[3] = addr >> (nor->addr_width * 8 - 24);
+ cmd[4] = addr >> (nor->addr_width * 8 - 32);
}
-static int m25p_cmdsz(struct m25p *flash)
+static int m25p_cmdsz(struct spi_nor *nor)
{
- return 1 + flash->addr_width;
+ return 1 + nor->addr_width;
}
-/*
- * Erase one sector of flash memory at offset ``offset'' which is any
- * address within the sector which should be erased.
- *
- * Returns 0 if successful, non-zero otherwise.
- */
-static int erase_sector(struct m25p *flash, u32 offset)
+static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
+ int wr_en)
{
- pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev),
- __func__, flash->mtd.erasesize / 1024, offset);
-
- /* Wait until finished previous write command. */
- if (wait_till_ready(flash))
- return 1;
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
- /* Send write enable, then erase commands. */
- write_enable(flash);
-
- /* Set up command buffer. */
- flash->command[0] = flash->erase_opcode;
- m25p_addr2cmd(flash, offset, flash->command);
-
- spi_write(flash->spi, flash->command, m25p_cmdsz(flash));
+ flash->command[0] = opcode;
+ if (buf)
+ memcpy(&flash->command[1], buf, len);
- return 0;
+ return spi_write(spi, flash->command, len + 1);
}
-/****************************************************************************/
-
-/*
- * MTD implementation
- */
-
-/*
- * Erase an address range on the flash chip. The address range may extend
- * one or more erase sectors. Return an error is there is a problem erasing.
- */
-static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
+static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
{
- struct m25p *flash = mtd_to_m25p(mtd);
- u32 addr,len;
- uint32_t rem;
-
- pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev),
- __func__, (long long)instr->addr,
- (long long)instr->len);
-
- div_u64_rem(instr->len, mtd->erasesize, &rem);
- if (rem)
- return -EINVAL;
-
- addr = instr->addr;
- len = instr->len;
-
- mutex_lock(&flash->lock);
-
- /* whole-chip erase? */
- if (len == flash->mtd.size) {
- if (erase_chip(flash)) {
- instr->state = MTD_ERASE_FAILED;
- mutex_unlock(&flash->lock);
- return -EIO;
- }
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
+ struct spi_transfer t[2] = {};
+ struct spi_message m;
+ int cmd_sz = m25p_cmdsz(nor);
- /* REVISIT in some cases we could speed up erasing large regions
- * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
- * to use "small sector erase", but that's not always optimal.
- */
+ spi_message_init(&m);
- /* "sector"-at-a-time erase */
- } else {
- while (len) {
- if (erase_sector(flash, addr)) {
- instr->state = MTD_ERASE_FAILED;
- mutex_unlock(&flash->lock);
- return -EIO;
- }
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ cmd_sz = 1;
- addr += mtd->erasesize;
- len -= mtd->erasesize;
- }
- }
+ flash->command[0] = nor->program_opcode;
+ m25p_addr2cmd(nor, to, flash->command);
- mutex_unlock(&flash->lock);
+ t[0].tx_buf = flash->command;
+ t[0].len = cmd_sz;
+ spi_message_add_tail(&t[0], &m);
- instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ t[1].tx_buf = buf;
+ t[1].len = len;
+ spi_message_add_tail(&t[1], &m);
- return 0;
-}
+ spi_sync(spi, &m);
-/*
- * Dummy Cycle calculation for different type of read.
- * It can be used to support more commands with
- * different dummy cycle requirements.
- */
-static inline int m25p80_dummy_cycles_read(struct m25p *flash)
-{
- switch (flash->flash_read) {
- case M25P80_FAST:
- case M25P80_DUAL:
- case M25P80_QUAD:
- return 1;
- case M25P80_NORMAL:
- return 0;
- default:
- dev_err(&flash->spi->dev, "No valid read type supported\n");
- return -1;
- }
+ *retlen += m.actual_length - cmd_sz;
}
-static inline unsigned int m25p80_rx_nbits(const struct m25p *flash)
+static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
{
- switch (flash->flash_read) {
- case M25P80_DUAL:
+ switch (nor->flash_read) {
+ case SPI_NOR_DUAL:
return 2;
- case M25P80_QUAD:
+ case SPI_NOR_QUAD:
return 4;
default:
return 0;
@@ -505,590 +118,72 @@ static inline unsigned int m25p80_rx_nbits(const struct m25p *flash)
}
/*
- * Read an address range from the flash chip. The address range
+ * Read an address range from the nor chip. The address range
* may be any size provided it is within the physical boundaries.
*/
-static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
{
- struct m25p *flash = mtd_to_m25p(mtd);
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
struct spi_transfer t[2];
struct spi_message m;
- uint8_t opcode;
- int dummy;
+ int dummy = nor->read_dummy;
+ int ret;
- pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
- __func__, (u32)from, len);
+ /* Wait till previous write/erase is done. */
+ ret = nor->wait_till_ready(nor);
+ if (ret)
+ return ret;
spi_message_init(&m);
memset(t, 0, (sizeof t));
- dummy = m25p80_dummy_cycles_read(flash);
- if (dummy < 0) {
- dev_err(&flash->spi->dev, "No valid read command supported\n");
- return -EINVAL;
- }
+ flash->command[0] = nor->read_opcode;
+ m25p_addr2cmd(nor, from, flash->command);
t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash) + dummy;
+ t[0].len = m25p_cmdsz(nor) + dummy;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
- t[1].rx_nbits = m25p80_rx_nbits(flash);
+ t[1].rx_nbits = m25p80_rx_nbits(nor);
t[1].len = len;
spi_message_add_tail(&t[1], &m);
- mutex_lock(&flash->lock);
-
- /* Wait till previous write/erase is done. */
- if (wait_till_ready(flash)) {
- /* REVISIT status return?? */
- mutex_unlock(&flash->lock);
- return 1;
- }
-
- /* Set up the write data buffer. */
- opcode = flash->read_opcode;
- flash->command[0] = opcode;
- m25p_addr2cmd(flash, from, flash->command);
-
- spi_sync(flash->spi, &m);
-
- *retlen = m.actual_length - m25p_cmdsz(flash) - dummy;
-
- mutex_unlock(&flash->lock);
+ spi_sync(spi, &m);
+ *retlen = m.actual_length - m25p_cmdsz(nor) - dummy;
return 0;
}
-/*
- * Write an address range to the flash chip. Data must be written in
- * FLASH_PAGESIZE chunks. The address range may be any size provided
- * it is within the physical boundaries.
- */
-static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int m25p80_erase(struct spi_nor *nor, loff_t offset)
{
- struct m25p *flash = mtd_to_m25p(mtd);
- u32 page_offset, page_size;
- struct spi_transfer t[2];
- struct spi_message m;
-
- pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
- __func__, (u32)to, len);
-
- spi_message_init(&m);
- memset(t, 0, (sizeof t));
-
- t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash);
- spi_message_add_tail(&t[0], &m);
-
- t[1].tx_buf = buf;
- spi_message_add_tail(&t[1], &m);
-
- mutex_lock(&flash->lock);
-
- /* Wait until finished previous write command. */
- if (wait_till_ready(flash)) {
- mutex_unlock(&flash->lock);
- return 1;
- }
-
- write_enable(flash);
-
- /* Set up the opcode in the write buffer. */
- flash->command[0] = flash->program_opcode;
- m25p_addr2cmd(flash, to, flash->command);
-
- page_offset = to & (flash->page_size - 1);
-
- /* do all the bytes fit onto one page? */
- if (page_offset + len <= flash->page_size) {
- t[1].len = len;
-
- spi_sync(flash->spi, &m);
-
- *retlen = m.actual_length - m25p_cmdsz(flash);
- } else {
- u32 i;
-
- /* the size of data remaining on the first page */
- page_size = flash->page_size - page_offset;
-
- t[1].len = page_size;
- spi_sync(flash->spi, &m);
-
- *retlen = m.actual_length - m25p_cmdsz(flash);
-
- /* write everything in flash->page_size chunks */
- for (i = page_size; i < len; i += page_size) {
- page_size = len - i;
- if (page_size > flash->page_size)
- page_size = flash->page_size;
-
- /* write the next page to flash */
- m25p_addr2cmd(flash, to + i, flash->command);
-
- t[1].tx_buf = buf + i;
- t[1].len = page_size;
-
- wait_till_ready(flash);
-
- write_enable(flash);
-
- spi_sync(flash->spi, &m);
-
- *retlen += m.actual_length - m25p_cmdsz(flash);
- }
- }
-
- mutex_unlock(&flash->lock);
-
- return 0;
-}
-
-static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct m25p *flash = mtd_to_m25p(mtd);
- struct spi_transfer t[2];
- struct spi_message m;
- size_t actual;
- int cmd_sz, ret;
-
- pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
- __func__, (u32)to, len);
-
- spi_message_init(&m);
- memset(t, 0, (sizeof t));
-
- t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash);
- spi_message_add_tail(&t[0], &m);
-
- t[1].tx_buf = buf;
- spi_message_add_tail(&t[1], &m);
+ struct m25p *flash = nor->priv;
+ int ret;
- mutex_lock(&flash->lock);
+ dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
+ flash->mtd.erasesize / 1024, (u32)offset);
/* Wait until finished previous write command. */
- ret = wait_till_ready(flash);
+ ret = nor->wait_till_ready(nor);
if (ret)
- goto time_out;
-
- write_enable(flash);
-
- actual = to % 2;
- /* Start write from odd address. */
- if (actual) {
- flash->command[0] = OPCODE_BP;
- m25p_addr2cmd(flash, to, flash->command);
-
- /* write one byte. */
- t[1].len = 1;
- spi_sync(flash->spi, &m);
- ret = wait_till_ready(flash);
- if (ret)
- goto time_out;
- *retlen += m.actual_length - m25p_cmdsz(flash);
- }
- to += actual;
-
- flash->command[0] = OPCODE_AAI_WP;
- m25p_addr2cmd(flash, to, flash->command);
-
- /* Write out most of the data here. */
- cmd_sz = m25p_cmdsz(flash);
- for (; actual < len - 1; actual += 2) {
- t[0].len = cmd_sz;
- /* write two bytes. */
- t[1].len = 2;
- t[1].tx_buf = buf + actual;
+ return ret;
- spi_sync(flash->spi, &m);
- ret = wait_till_ready(flash);
- if (ret)
- goto time_out;
- *retlen += m.actual_length - cmd_sz;
- cmd_sz = 1;
- to += 2;
- }
- write_disable(flash);
- ret = wait_till_ready(flash);
+ /* Send write enable, then erase commands. */
+ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
if (ret)
- goto time_out;
-
- /* Write out trailing byte if it exists. */
- if (actual != len) {
- write_enable(flash);
- flash->command[0] = OPCODE_BP;
- m25p_addr2cmd(flash, to, flash->command);
- t[0].len = m25p_cmdsz(flash);
- t[1].len = 1;
- t[1].tx_buf = buf + actual;
-
- spi_sync(flash->spi, &m);
- ret = wait_till_ready(flash);
- if (ret)
- goto time_out;
- *retlen += m.actual_length - m25p_cmdsz(flash);
- write_disable(flash);
- }
-
-time_out:
- mutex_unlock(&flash->lock);
- return ret;
-}
-
-static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct m25p *flash = mtd_to_m25p(mtd);
- uint32_t offset = ofs;
- uint8_t status_old, status_new;
- int res = 0;
-
- mutex_lock(&flash->lock);
- /* Wait until finished previous command */
- if (wait_till_ready(flash)) {
- res = 1;
- goto err;
- }
-
- status_old = read_sr(flash);
-
- if (offset < flash->mtd.size-(flash->mtd.size/2))
- status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
- else if (offset < flash->mtd.size-(flash->mtd.size/4))
- status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
- else if (offset < flash->mtd.size-(flash->mtd.size/8))
- status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
- else if (offset < flash->mtd.size-(flash->mtd.size/16))
- status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
- else if (offset < flash->mtd.size-(flash->mtd.size/32))
- status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
- else if (offset < flash->mtd.size-(flash->mtd.size/64))
- status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
- else
- status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
-
- /* Only modify protection if it will not unlock other areas */
- if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) >
- (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
- write_enable(flash);
- if (write_sr(flash, status_new) < 0) {
- res = 1;
- goto err;
- }
- }
-
-err: mutex_unlock(&flash->lock);
- return res;
-}
-
-static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct m25p *flash = mtd_to_m25p(mtd);
- uint32_t offset = ofs;
- uint8_t status_old, status_new;
- int res = 0;
-
- mutex_lock(&flash->lock);
- /* Wait until finished previous command */
- if (wait_till_ready(flash)) {
- res = 1;
- goto err;
- }
-
- status_old = read_sr(flash);
-
- if (offset+len > flash->mtd.size-(flash->mtd.size/64))
- status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0);
- else if (offset+len > flash->mtd.size-(flash->mtd.size/32))
- status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
- else if (offset+len > flash->mtd.size-(flash->mtd.size/16))
- status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
- else if (offset+len > flash->mtd.size-(flash->mtd.size/8))
- status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
- else if (offset+len > flash->mtd.size-(flash->mtd.size/4))
- status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
- else if (offset+len > flash->mtd.size-(flash->mtd.size/2))
- status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
- else
- status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
-
- /* Only modify protection if it will not lock other areas */
- if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) <
- (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
- write_enable(flash);
- if (write_sr(flash, status_new) < 0) {
- res = 1;
- goto err;
- }
- }
-
-err: mutex_unlock(&flash->lock);
- return res;
-}
-
-/****************************************************************************/
-
-/*
- * SPI device driver setup and teardown
- */
-
-struct flash_info {
- /* JEDEC id zero means "no ID" (most older chips); otherwise it has
- * a high byte of zero plus three data bytes: the manufacturer id,
- * then a two byte device id.
- */
- u32 jedec_id;
- u16 ext_id;
-
- /* The size listed here is what works with OPCODE_SE, which isn't
- * necessarily called a "sector" by the vendor.
- */
- unsigned sector_size;
- u16 n_sectors;
-
- u16 page_size;
- u16 addr_width;
-
- u16 flags;
-#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
-#define M25P_NO_ERASE 0x02 /* No erase command needed */
-#define SST_WRITE 0x04 /* use SST byte programming */
-#define M25P_NO_FR 0x08 /* Can't do fastread */
-#define SECT_4K_PMC 0x10 /* OPCODE_BE_4K_PMC works uniformly */
-#define M25P80_DUAL_READ 0x20 /* Flash supports Dual Read */
-#define M25P80_QUAD_READ 0x40 /* Flash supports Quad Read */
-};
-
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
- .jedec_id = (_jedec_id), \
- .ext_id = (_ext_id), \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .flags = (_flags), \
- })
-
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = (_page_size), \
- .addr_width = (_addr_width), \
- .flags = (_flags), \
- })
-
-/* NOTE: double check command sets and memory organization when you add
- * more flash chips. This current list focusses on newer chips, which
- * have been converging on command sets which including JEDEC ID.
- */
-static const struct spi_device_id m25p_ids[] = {
- /* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
-
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
-
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
-
- { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
-
- /* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
-
- /* ESMT */
- { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
-
- /* Everspin */
- { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
-
- /* GigaDevice */
- { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
- { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
-
- /* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
-
- /* Macronix */
- { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
- { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, M25P80_QUAD_READ) },
- { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, M25P80_QUAD_READ) },
-
- /* Micron */
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
-
- /* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
-
- /* Spansion -- single (large) sector size only, at least
- * for the chips listed here (without boot sectors).
- */
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, M25P80_DUAL_READ | M25P80_QUAD_READ) },
- { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, M25P80_DUAL_READ | M25P80_QUAD_READ) },
- { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
-
- /* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
-
- /* ST Microelectronics -- newer production may have feature updates */
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
-
- { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
-
- { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
-
- /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
-
- /* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO( 16, 8, 16, 1, M25P_NO_ERASE | M25P_NO_FR) },
- { "cat25c03", CAT25_INFO( 32, 8, 16, 2, M25P_NO_ERASE | M25P_NO_FR) },
- { "cat25c09", CAT25_INFO( 128, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) },
- { "cat25c17", CAT25_INFO( 256, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2, M25P_NO_ERASE | M25P_NO_FR) },
- { },
-};
-MODULE_DEVICE_TABLE(spi, m25p_ids);
-
-static const struct spi_device_id *jedec_probe(struct spi_device *spi)
-{
- int tmp;
- u8 code = OPCODE_RDID;
- u8 id[5];
- u32 jedec;
- u16 ext_jedec;
- struct flash_info *info;
+ return ret;
- /* JEDEC also defines an optional "extended device information"
- * string for after vendor-specific data, after the three bytes
- * we use here. Supporting some chips might require using it.
- */
- tmp = spi_write_then_read(spi, &code, 1, id, 5);
- if (tmp < 0) {
- pr_debug("%s: error %d reading JEDEC ID\n",
- dev_name(&spi->dev), tmp);
- return ERR_PTR(tmp);
- }
- jedec = id[0];
- jedec = jedec << 8;
- jedec |= id[1];
- jedec = jedec << 8;
- jedec |= id[2];
+ /* Set up command buffer. */
+ flash->command[0] = nor->erase_opcode;
+ m25p_addr2cmd(nor, offset, flash->command);
- ext_jedec = id[3] << 8 | id[4];
+ spi_write(flash->spi, flash->command, m25p_cmdsz(nor));
- for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) {
- info = (void *)m25p_ids[tmp].driver_data;
- if (info->jedec_id == jedec) {
- if (info->ext_id == 0 || info->ext_id == ext_jedec)
- return &m25p_ids[tmp];
- }
- }
- dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
- return ERR_PTR(-ENODEV);
+ return 0;
}
-
/*
* board specific setup should have ensured the SPI clock used here
* matches what the READ command supports, at least until this driver
@@ -1096,231 +191,45 @@ static const struct spi_device_id *jedec_probe(struct spi_device *spi)
*/
static int m25p_probe(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
- struct flash_platform_data *data;
- struct m25p *flash;
- struct flash_info *info;
- unsigned i;
struct mtd_part_parser_data ppdata;
- struct device_node *np = spi->dev.of_node;
+ struct flash_platform_data *data;
+ struct m25p *flash;
+ struct spi_nor *nor;
+ enum read_mode mode = SPI_NOR_NORMAL;
int ret;
- /* Platform data helps sort out which chip type we have, as
- * well as how this board partitions it. If we don't have
- * a chip ID, try the JEDEC id commands; they'll work for most
- * newer chips, even if we don't recognize the particular chip.
- */
- data = dev_get_platdata(&spi->dev);
- if (data && data->type) {
- const struct spi_device_id *plat_id;
-
- for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) {
- plat_id = &m25p_ids[i];
- if (strcmp(data->type, plat_id->name))
- continue;
- break;
- }
-
- if (i < ARRAY_SIZE(m25p_ids) - 1)
- id = plat_id;
- else
- dev_warn(&spi->dev, "unrecognized id %s\n", data->type);
- }
-
- info = (void *)id->driver_data;
-
- if (info->jedec_id) {
- const struct spi_device_id *jid;
-
- jid = jedec_probe(spi);
- if (IS_ERR(jid)) {
- return PTR_ERR(jid);
- } else if (jid != id) {
- /*
- * JEDEC knows better, so overwrite platform ID. We
- * can't trust partitions any longer, but we'll let
- * mtd apply them anyway, since some partitions may be
- * marked read-only, and we don't want to lose that
- * information, even if it's not 100% accurate.
- */
- dev_warn(&spi->dev, "found %s, expected %s\n",
- jid->name, id->name);
- id = jid;
- info = (void *)jid->driver_data;
- }
- }
-
flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
- flash->command = devm_kzalloc(&spi->dev, MAX_CMD_SIZE, GFP_KERNEL);
- if (!flash->command)
- return -ENOMEM;
-
- flash->spi = spi;
- mutex_init(&flash->lock);
- spi_set_drvdata(spi, flash);
-
- /*
- * Atmel, SST and Intel/Numonyx serial flash tend to power
- * up with the software protection bits set
- */
+ nor = &flash->spi_nor;
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
- JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
- JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
- write_enable(flash);
- write_sr(flash, 0);
- }
-
- if (data && data->name)
- flash->mtd.name = data->name;
- else
- flash->mtd.name = dev_name(&spi->dev);
-
- flash->mtd.type = MTD_NORFLASH;
- flash->mtd.writesize = 1;
- flash->mtd.flags = MTD_CAP_NORFLASH;
- flash->mtd.size = info->sector_size * info->n_sectors;
- flash->mtd._erase = m25p80_erase;
- flash->mtd._read = m25p80_read;
-
- /* flash protection support for STmicro chips */
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
- flash->mtd._lock = m25p80_lock;
- flash->mtd._unlock = m25p80_unlock;
- }
+ /* install the hooks */
+ nor->read = m25p80_read;
+ nor->write = m25p80_write;
+ nor->erase = m25p80_erase;
+ nor->write_reg = m25p80_write_reg;
+ nor->read_reg = m25p80_read_reg;
- /* sst flash chips use AAI word program */
- if (info->flags & SST_WRITE)
- flash->mtd._write = sst_write;
- else
- flash->mtd._write = m25p80_write;
+ nor->dev = &spi->dev;
+ nor->mtd = &flash->mtd;
+ nor->priv = flash;
- /* prefer "small sector" erase if possible */
- if (info->flags & SECT_4K) {
- flash->erase_opcode = OPCODE_BE_4K;
- flash->mtd.erasesize = 4096;
- } else if (info->flags & SECT_4K_PMC) {
- flash->erase_opcode = OPCODE_BE_4K_PMC;
- flash->mtd.erasesize = 4096;
- } else {
- flash->erase_opcode = OPCODE_SE;
- flash->mtd.erasesize = info->sector_size;
- }
+ spi_set_drvdata(spi, flash);
+ flash->mtd.priv = nor;
+ flash->spi = spi;
- if (info->flags & M25P_NO_ERASE)
- flash->mtd.flags |= MTD_NO_ERASE;
+ if (spi->mode & SPI_RX_QUAD)
+ mode = SPI_NOR_QUAD;
+ else if (spi->mode & SPI_RX_DUAL)
+ mode = SPI_NOR_DUAL;
+ ret = spi_nor_scan(nor, spi_get_device_id(spi), mode);
+ if (ret)
+ return ret;
+ data = dev_get_platdata(&spi->dev);
ppdata.of_node = spi->dev.of_node;
- flash->mtd.dev.parent = &spi->dev;
- flash->page_size = info->page_size;
- flash->mtd.writebufsize = flash->page_size;
-
- if (np) {
- /* If we were instantiated by DT, use it */
- if (of_property_read_bool(np, "m25p,fast-read"))
- flash->flash_read = M25P80_FAST;
- else
- flash->flash_read = M25P80_NORMAL;
- } else {
- /* If we weren't instantiated by DT, default to fast-read */
- flash->flash_read = M25P80_FAST;
- }
-
- /* Some devices cannot do fast-read, no matter what DT tells us */
- if (info->flags & M25P_NO_FR)
- flash->flash_read = M25P80_NORMAL;
-
- /* Quad/Dual-read mode takes precedence over fast/normal */
- if (spi->mode & SPI_RX_QUAD && info->flags & M25P80_QUAD_READ) {
- ret = set_quad_mode(flash, info->jedec_id);
- if (ret) {
- dev_err(&flash->spi->dev, "quad mode not supported\n");
- return ret;
- }
- flash->flash_read = M25P80_QUAD;
- } else if (spi->mode & SPI_RX_DUAL && info->flags & M25P80_DUAL_READ) {
- flash->flash_read = M25P80_DUAL;
- }
- /* Default commands */
- switch (flash->flash_read) {
- case M25P80_QUAD:
- flash->read_opcode = OPCODE_QUAD_READ;
- break;
- case M25P80_DUAL:
- flash->read_opcode = OPCODE_DUAL_READ;
- break;
- case M25P80_FAST:
- flash->read_opcode = OPCODE_FAST_READ;
- break;
- case M25P80_NORMAL:
- flash->read_opcode = OPCODE_NORM_READ;
- break;
- default:
- dev_err(&flash->spi->dev, "No Read opcode defined\n");
- return -EINVAL;
- }
-
- flash->program_opcode = OPCODE_PP;
-
- if (info->addr_width)
- flash->addr_width = info->addr_width;
- else if (flash->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- flash->addr_width = 4;
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
- /* Dedicated 4-byte command set */
- switch (flash->flash_read) {
- case M25P80_QUAD:
- flash->read_opcode = OPCODE_QUAD_READ_4B;
- break;
- case M25P80_DUAL:
- flash->read_opcode = OPCODE_DUAL_READ_4B;
- break;
- case M25P80_FAST:
- flash->read_opcode = OPCODE_FAST_READ_4B;
- break;
- case M25P80_NORMAL:
- flash->read_opcode = OPCODE_NORM_READ_4B;
- break;
- }
- flash->program_opcode = OPCODE_PP_4B;
- /* No small sector erase for 4-byte command set */
- flash->erase_opcode = OPCODE_SE_4B;
- flash->mtd.erasesize = info->sector_size;
- } else
- set_4byte(flash, info->jedec_id, 1);
- } else {
- flash->addr_width = 3;
- }
-
- dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
- (long long)flash->mtd.size >> 10);
-
- pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
- ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
- flash->mtd.name,
- (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
- flash->mtd.erasesize, flash->mtd.erasesize / 1024,
- flash->mtd.numeraseregions);
-
- if (flash->mtd.numeraseregions)
- for (i = 0; i < flash->mtd.numeraseregions; i++)
- pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)flash->mtd.eraseregions[i].offset,
- flash->mtd.eraseregions[i].erasesize,
- flash->mtd.eraseregions[i].erasesize / 1024,
- flash->mtd.eraseregions[i].numblocks);
-
-
- /* partitions should match sector boundaries; and it may be good to
- * use readonly partitions for writeprotected sectors (BP2..BP0).
- */
return mtd_device_parse_register(&flash->mtd, NULL, &ppdata,
data ? data->parts : NULL,
data ? data->nr_parts : 0);
@@ -1341,7 +250,7 @@ static struct spi_driver m25p80_driver = {
.name = "m25p80",
.owner = THIS_MODULE,
},
- .id_table = m25p_ids,
+ .id_table = spi_nor_ids,
.probe = m25p_probe,
.remove = m25p_remove,
diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h
index 4f0c2c7c898e..f59a125295d0 100644
--- a/drivers/mtd/devices/serial_flash_cmds.h
+++ b/drivers/mtd/devices/serial_flash_cmds.h
@@ -13,43 +13,23 @@
#define _MTD_SERIAL_FLASH_CMDS_H
/* Generic Flash Commands/OPCODEs */
-#define FLASH_CMD_WREN 0x06
-#define FLASH_CMD_WRDI 0x04
-#define FLASH_CMD_RDID 0x9f
-#define FLASH_CMD_RDSR 0x05
-#define FLASH_CMD_RDSR2 0x35
-#define FLASH_CMD_WRSR 0x01
-#define FLASH_CMD_SE_4K 0x20
-#define FLASH_CMD_SE_32K 0x52
-#define FLASH_CMD_SE 0xd8
-#define FLASH_CMD_CHIPERASE 0xc7
-#define FLASH_CMD_WRVCR 0x81
-#define FLASH_CMD_RDVCR 0x85
+#define SPINOR_OP_RDSR2 0x35
+#define SPINOR_OP_WRVCR 0x81
+#define SPINOR_OP_RDVCR 0x85
/* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */
-#define FLASH_CMD_READ 0x03 /* READ */
-#define FLASH_CMD_READ_FAST 0x0b /* FAST READ */
-#define FLASH_CMD_READ_1_1_2 0x3b /* DUAL OUTPUT READ */
-#define FLASH_CMD_READ_1_2_2 0xbb /* DUAL I/O READ */
-#define FLASH_CMD_READ_1_1_4 0x6b /* QUAD OUTPUT READ */
-#define FLASH_CMD_READ_1_4_4 0xeb /* QUAD I/O READ */
+#define SPINOR_OP_READ_1_2_2 0xbb /* DUAL I/O READ */
+#define SPINOR_OP_READ_1_4_4 0xeb /* QUAD I/O READ */
-#define FLASH_CMD_WRITE 0x02 /* PAGE PROGRAM */
-#define FLASH_CMD_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
-#define FLASH_CMD_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
-#define FLASH_CMD_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
-#define FLASH_CMD_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
-
-#define FLASH_CMD_EN4B_ADDR 0xb7 /* Enter 4-byte address mode */
-#define FLASH_CMD_EX4B_ADDR 0xe9 /* Exit 4-byte address mode */
+#define SPINOR_OP_WRITE 0x02 /* PAGE PROGRAM */
+#define SPINOR_OP_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
+#define SPINOR_OP_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
+#define SPINOR_OP_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
+#define SPINOR_OP_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
/* READ commands with 32-bit addressing */
-#define FLASH_CMD_READ4 0x13
-#define FLASH_CMD_READ4_FAST 0x0c
-#define FLASH_CMD_READ4_1_1_2 0x3c
-#define FLASH_CMD_READ4_1_2_2 0xbc
-#define FLASH_CMD_READ4_1_1_4 0x6c
-#define FLASH_CMD_READ4_1_4_4 0xec
+#define SPINOR_OP_READ4_1_2_2 0xbc
+#define SPINOR_OP_READ4_1_4_4 0xec
/* Configuration flags */
#define FLASH_FLAG_SINGLE 0x000000ff
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 5a5cd2ace4a6..2fc4957cbe7f 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -280,14 +280,11 @@ __setup("slram=", mtd_slram_setup);
static int __init init_slram(void)
{
char *devname;
- int i;
#ifndef MODULE
char *devstart;
char *devlength;
- i = 0;
-
if (!map) {
E("slram: not enough parameters.\n");
return(-EINVAL);
@@ -314,6 +311,7 @@ static int __init init_slram(void)
}
#else
int count;
+ int i;
for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count];
count++) {
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 1957d7c8e185..d252514d3e98 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -19,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -201,44 +202,6 @@
#define STFSM_MAX_WAIT_SEQ_MS 1000 /* FSM execution time */
-/* Flash Commands */
-#define FLASH_CMD_WREN 0x06
-#define FLASH_CMD_WRDI 0x04
-#define FLASH_CMD_RDID 0x9f
-#define FLASH_CMD_RDSR 0x05
-#define FLASH_CMD_RDSR2 0x35
-#define FLASH_CMD_WRSR 0x01
-#define FLASH_CMD_SE_4K 0x20
-#define FLASH_CMD_SE_32K 0x52
-#define FLASH_CMD_SE 0xd8
-#define FLASH_CMD_CHIPERASE 0xc7
-#define FLASH_CMD_WRVCR 0x81
-#define FLASH_CMD_RDVCR 0x85
-
-#define FLASH_CMD_READ 0x03 /* READ */
-#define FLASH_CMD_READ_FAST 0x0b /* FAST READ */
-#define FLASH_CMD_READ_1_1_2 0x3b /* DUAL OUTPUT READ */
-#define FLASH_CMD_READ_1_2_2 0xbb /* DUAL I/O READ */
-#define FLASH_CMD_READ_1_1_4 0x6b /* QUAD OUTPUT READ */
-#define FLASH_CMD_READ_1_4_4 0xeb /* QUAD I/O READ */
-
-#define FLASH_CMD_WRITE 0x02 /* PAGE PROGRAM */
-#define FLASH_CMD_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
-#define FLASH_CMD_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
-#define FLASH_CMD_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
-#define FLASH_CMD_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
-
-#define FLASH_CMD_EN4B_ADDR 0xb7 /* Enter 4-byte address mode */
-#define FLASH_CMD_EX4B_ADDR 0xe9 /* Exit 4-byte address mode */
-
-/* READ commands with 32-bit addressing (N25Q256 and S25FLxxxS) */
-#define FLASH_CMD_READ4 0x13
-#define FLASH_CMD_READ4_FAST 0x0c
-#define FLASH_CMD_READ4_1_1_2 0x3c
-#define FLASH_CMD_READ4_1_2_2 0xbc
-#define FLASH_CMD_READ4_1_1_4 0x6c
-#define FLASH_CMD_READ4_1_4_4 0xec
-
/* S25FLxxxS commands */
#define S25FL_CMD_WRITE4_1_1_4 0x34
#define S25FL_CMD_SE4 0xdc
@@ -246,7 +209,7 @@
#define S25FL_CMD_DYBWR 0xe1
#define S25FL_CMD_DYBRD 0xe0
#define S25FL_CMD_WRITE4 0x12 /* Note, opcode clashes with
- * 'FLASH_CMD_WRITE_1_4_4'
+ * 'SPINOR_OP_WRITE_1_4_4'
* as found on N25Qxxx devices! */
/* Status register */
@@ -261,6 +224,12 @@
#define S25FL_STATUS_E_ERR 0x20
#define S25FL_STATUS_P_ERR 0x40
+#define N25Q_CMD_WRVCR 0x81
+#define N25Q_CMD_RDVCR 0x85
+#define N25Q_CMD_RDVECR 0x65
+#define N25Q_CMD_RDNVCR 0xb5
+#define N25Q_CMD_WRNVCR 0xb1
+
#define FLASH_PAGESIZE 256 /* In Bytes */
#define FLASH_PAGESIZE_32 (FLASH_PAGESIZE / 4) /* In uint32_t */
#define FLASH_MAX_BUSY_WAIT (300 * HZ) /* Maximum 'CHIPERASE' time */
@@ -270,7 +239,6 @@
*/
#define CFG_READ_TOGGLE_32BIT_ADDR 0x00000001
#define CFG_WRITE_TOGGLE_32BIT_ADDR 0x00000002
-#define CFG_WRITE_EX_32BIT_ADDR_DELAY 0x00000004
#define CFG_ERASESEC_TOGGLE_32BIT_ADDR 0x00000008
#define CFG_S25FL_CHECK_ERROR_FLAGS 0x00000010
@@ -329,7 +297,7 @@ struct flash_info {
u32 jedec_id;
u16 ext_id;
/*
- * The size listed here is what works with FLASH_CMD_SE, which isn't
+ * The size listed here is what works with SPINOR_OP_SE, which isn't
* necessarily called a "sector" by the vendor.
*/
unsigned sector_size;
@@ -369,17 +337,26 @@ static struct flash_info flash_types[] = {
{ "m25px32", 0x207116, 0, 64 * 1024, 64, M25PX_FLAG, 75, NULL },
{ "m25px64", 0x207117, 0, 64 * 1024, 128, M25PX_FLAG, 75, NULL },
+ /* Macronix MX25xxx
+ * - Support for 'FLASH_FLAG_WRITE_1_4_4' is omitted for devices
+ * where operating frequency must be reduced.
+ */
#define MX25_FLAG (FLASH_FLAG_READ_WRITE | \
FLASH_FLAG_READ_FAST | \
FLASH_FLAG_READ_1_1_2 | \
FLASH_FLAG_READ_1_2_2 | \
FLASH_FLAG_READ_1_1_4 | \
- FLASH_FLAG_READ_1_4_4 | \
FLASH_FLAG_SE_4K | \
FLASH_FLAG_SE_32K)
+ { "mx25l3255e", 0xc29e16, 0, 64 * 1024, 64,
+ (MX25_FLAG | FLASH_FLAG_WRITE_1_4_4), 86,
+ stfsm_mx25_config},
{ "mx25l25635e", 0xc22019, 0, 64*1024, 512,
(MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
stfsm_mx25_config },
+ { "mx25l25655e", 0xc22619, 0, 64*1024, 512,
+ (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
+ stfsm_mx25_config},
#define N25Q_FLAG (FLASH_FLAG_READ_WRITE | \
FLASH_FLAG_READ_FAST | \
@@ -407,6 +384,8 @@ static struct flash_info flash_types[] = {
FLASH_FLAG_READ_1_4_4 | \
FLASH_FLAG_WRITE_1_1_4 | \
FLASH_FLAG_READ_FAST)
+ { "s25fl032p", 0x010215, 0x4d00, 64 * 1024, 64, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config},
{ "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, S25FLXXXP_FLAG, 80,
stfsm_s25fl_config },
{ "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, S25FLXXXP_FLAG, 80,
@@ -473,22 +452,22 @@ static struct flash_info flash_types[] = {
/* Default READ configurations, in order of preference */
static struct seq_rw_config default_read_configs[] = {
- {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ_1_4_4, 0, 4, 4, 0x00, 2, 4},
- {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ_1_1_4, 0, 1, 4, 0x00, 4, 0},
- {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ_1_2_2, 0, 2, 2, 0x00, 4, 0},
- {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
- {FLASH_FLAG_READ_FAST, FLASH_CMD_READ_FAST, 0, 1, 1, 0x00, 0, 8},
- {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ, 0, 1, 1, 0x00, 0, 0},
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
/* Default WRITE configurations */
static struct seq_rw_config default_write_configs[] = {
- {FLASH_FLAG_WRITE_1_4_4, FLASH_CMD_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0},
- {FLASH_FLAG_WRITE_1_1_4, FLASH_CMD_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0},
- {FLASH_FLAG_WRITE_1_2_2, FLASH_CMD_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0},
- {FLASH_FLAG_WRITE_1_1_2, FLASH_CMD_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0},
- {FLASH_FLAG_READ_WRITE, FLASH_CMD_WRITE, 1, 1, 1, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_4_4, SPINOR_OP_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_4, SPINOR_OP_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_2_2, SPINOR_OP_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_2, SPINOR_OP_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_WRITE, 1, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
@@ -511,12 +490,12 @@ static struct seq_rw_config default_write_configs[] = {
* cycles.
*/
static struct seq_rw_config n25q_read3_configs[] = {
- {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ_1_4_4, 0, 4, 4, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ_1_1_4, 0, 1, 4, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ_1_2_2, 0, 2, 2, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
- {FLASH_FLAG_READ_FAST, FLASH_CMD_READ_FAST, 0, 1, 1, 0x00, 0, 8},
- {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ, 0, 1, 1, 0x00, 0, 0},
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
@@ -526,12 +505,12 @@ static struct seq_rw_config n25q_read3_configs[] = {
* - 'FAST' variants configured for 8 dummy cycles (see note above.)
*/
static struct seq_rw_config n25q_read4_configs[] = {
- {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ4_1_4_4, 0, 4, 4, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ4_1_2_2, 0, 2, 2, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
- {FLASH_FLAG_READ_FAST, FLASH_CMD_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
- {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ4, 0, 1, 1, 0x00, 0, 0},
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
@@ -544,7 +523,7 @@ static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
{
seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_EN4B_ADDR) |
+ SEQ_OPC_OPCODE(SPINOR_OP_EN4B) |
SEQ_OPC_CSDEASSERT);
seq->seq[0] = STFSM_INST_CMD1;
@@ -572,12 +551,12 @@ static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
* entering a state that is incompatible with the SPIBoot Controller.
*/
static struct seq_rw_config stfsm_s25fl_read4_configs[] = {
- {FLASH_FLAG_READ_1_4_4, FLASH_CMD_READ4_1_4_4, 0, 4, 4, 0x00, 2, 4},
- {FLASH_FLAG_READ_1_1_4, FLASH_CMD_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_2_2, FLASH_CMD_READ4_1_2_2, 0, 2, 2, 0x00, 4, 0},
- {FLASH_FLAG_READ_1_1_2, FLASH_CMD_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
- {FLASH_FLAG_READ_FAST, FLASH_CMD_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
- {FLASH_FLAG_READ_WRITE, FLASH_CMD_READ4, 0, 1, 1, 0x00, 0, 0},
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
@@ -590,13 +569,13 @@ static struct seq_rw_config stfsm_s25fl_write4_configs[] = {
/*
* [W25Qxxx] Configuration
*/
-#define W25Q_STATUS_QE (0x1 << 9)
+#define W25Q_STATUS_QE (0x1 << 1)
static struct stfsm_seq stfsm_seq_read_jedec = {
.data_size = TRANSFER_SIZE(8),
.seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_RDID)),
+ SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
.seq = {
STFSM_INST_CMD1,
STFSM_INST_DATA_READ,
@@ -612,7 +591,7 @@ static struct stfsm_seq stfsm_seq_read_status_fifo = {
.data_size = TRANSFER_SIZE(4),
.seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_RDSR)),
+ SEQ_OPC_OPCODE(SPINOR_OP_RDSR)),
.seq = {
STFSM_INST_CMD1,
STFSM_INST_DATA_READ,
@@ -628,10 +607,10 @@ static struct stfsm_seq stfsm_seq_erase_sector = {
/* 'addr_cfg' configured during initialisation */
.seq_opc = {
(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT),
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_SE)),
+ SEQ_OPC_OPCODE(SPINOR_OP_SE)),
},
.seq = {
STFSM_INST_CMD1,
@@ -649,10 +628,10 @@ static struct stfsm_seq stfsm_seq_erase_sector = {
static struct stfsm_seq stfsm_seq_erase_chip = {
.seq_opc = {
(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT),
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_CHIPERASE) | SEQ_OPC_CSDEASSERT),
+ SEQ_OPC_OPCODE(SPINOR_OP_CHIP_ERASE) | SEQ_OPC_CSDEASSERT),
},
.seq = {
STFSM_INST_CMD1,
@@ -669,26 +648,9 @@ static struct stfsm_seq stfsm_seq_erase_chip = {
static struct stfsm_seq stfsm_seq_write_status = {
.seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT),
- .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WRSR)),
- .seq = {
- STFSM_INST_CMD1,
- STFSM_INST_CMD2,
- STFSM_INST_STA_WR1,
- STFSM_INST_STOP,
- },
- .seq_cfg = (SEQ_CFG_PADS_1 |
- SEQ_CFG_READNOTWRITE |
- SEQ_CFG_CSDEASSERT |
- SEQ_CFG_STARTSEQ),
-};
-
-static struct stfsm_seq stfsm_seq_wrvcr = {
- .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WREN) | SEQ_OPC_CSDEASSERT),
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
.seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WRVCR)),
+ SEQ_OPC_OPCODE(SPINOR_OP_WRSR)),
.seq = {
STFSM_INST_CMD1,
STFSM_INST_CMD2,
@@ -704,9 +666,9 @@ static struct stfsm_seq stfsm_seq_wrvcr = {
static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
{
seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_EN4B_ADDR));
+ SEQ_OPC_OPCODE(SPINOR_OP_EN4B));
seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WREN) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
SEQ_OPC_CSDEASSERT);
seq->seq[0] = STFSM_INST_CMD2;
@@ -793,7 +755,7 @@ static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
dev_dbg(fsm->dev, "Reading %d bytes from FIFO\n", size);
- BUG_ON((((uint32_t)buf) & 0x3) || (size & 0x3));
+ BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
while (remaining) {
for (;;) {
@@ -817,7 +779,7 @@ static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
dev_dbg(fsm->dev, "writing %d bytes to FIFO\n", size);
- BUG_ON((((uint32_t)buf) & 0x3) || (size & 0x3));
+ BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
writesl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
@@ -827,7 +789,7 @@ static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
static int stfsm_enter_32bit_addr(struct stfsm *fsm, int enter)
{
struct stfsm_seq *seq = &fsm->stfsm_seq_en_32bit_addr;
- uint32_t cmd = enter ? FLASH_CMD_EN4B_ADDR : FLASH_CMD_EX4B_ADDR;
+ uint32_t cmd = enter ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
@@ -851,7 +813,7 @@ static uint8_t stfsm_wait_busy(struct stfsm *fsm)
/* Use RDRS1 */
seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_RDSR));
+ SEQ_OPC_OPCODE(SPINOR_OP_RDSR));
/* Load read_status sequence */
stfsm_load_seq(fsm, seq);
@@ -889,60 +851,57 @@ static uint8_t stfsm_wait_busy(struct stfsm *fsm)
}
static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd,
- uint8_t *status)
+ uint8_t *data, int bytes)
{
struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
uint32_t tmp;
+ uint8_t *t = (uint8_t *)&tmp;
+ int i;
- dev_dbg(fsm->dev, "reading STA[%s]\n",
- (cmd == FLASH_CMD_RDSR) ? "1" : "2");
+ dev_dbg(fsm->dev, "read 'status' register [0x%02x], %d byte(s)\n",
+ cmd, bytes);
- seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
- SEQ_OPC_CYCLES(8) |
+ BUG_ON(bytes != 1 && bytes != 2);
+
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(cmd)),
stfsm_load_seq(fsm, seq);
stfsm_read_fifo(fsm, &tmp, 4);
- *status = (uint8_t)(tmp >> 24);
+ for (i = 0; i < bytes; i++)
+ data[i] = t[i];
stfsm_wait_seq(fsm);
return 0;
}
-static int stfsm_write_status(struct stfsm *fsm, uint16_t status,
- int sta_bytes)
+static int stfsm_write_status(struct stfsm *fsm, uint8_t cmd,
+ uint16_t data, int bytes, int wait_busy)
{
struct stfsm_seq *seq = &stfsm_seq_write_status;
- dev_dbg(fsm->dev, "writing STA[%s] 0x%04x\n",
- (sta_bytes == 1) ? "1" : "1+2", status);
-
- seq->status = (uint32_t)status | STA_PADS_1 | STA_CSDEASSERT;
- seq->seq[2] = (sta_bytes == 1) ?
- STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2;
-
- stfsm_load_seq(fsm, seq);
-
- stfsm_wait_seq(fsm);
+ dev_dbg(fsm->dev,
+ "write 'status' register [0x%02x], %d byte(s), 0x%04x\n"
+ " %s wait-busy\n", cmd, bytes, data, wait_busy ? "with" : "no");
- return 0;
-};
+ BUG_ON(bytes != 1 && bytes != 2);
-static int stfsm_wrvcr(struct stfsm *fsm, uint8_t data)
-{
- struct stfsm_seq *seq = &stfsm_seq_wrvcr;
-
- dev_dbg(fsm->dev, "writing VCR 0x%02x\n", data);
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd));
- seq->status = (STA_DATA_BYTE1(data) | STA_PADS_1 | STA_CSDEASSERT);
+ seq->status = (uint32_t)data | STA_PADS_1 | STA_CSDEASSERT;
+ seq->seq[2] = (bytes == 1) ? STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2;
stfsm_load_seq(fsm, seq);
stfsm_wait_seq(fsm);
+ if (wait_busy)
+ stfsm_wait_busy(fsm);
+
return 0;
}
@@ -1027,7 +986,7 @@ static void stfsm_prepare_rw_seq(struct stfsm *fsm,
if (cfg->write)
seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WREN) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
SEQ_OPC_CSDEASSERT);
/* Address configuration (24 or 32-bit addresses) */
@@ -1149,31 +1108,36 @@ static int stfsm_mx25_config(struct stfsm *fsm)
stfsm_mx25_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
soc_reset = stfsm_can_handle_soc_reset(fsm);
- if (soc_reset || !fsm->booted_from_spi) {
+ if (soc_reset || !fsm->booted_from_spi)
/* If we can handle SoC resets, we enable 32-bit address
* mode pervasively */
stfsm_enter_32bit_addr(fsm, 1);
- } else {
+ else
/* Else, enable/disable 32-bit addressing before/after
* each operation */
fsm->configuration = (CFG_READ_TOGGLE_32BIT_ADDR |
CFG_WRITE_TOGGLE_32BIT_ADDR |
CFG_ERASESEC_TOGGLE_32BIT_ADDR);
- /* It seems a small delay is required after exiting
- * 32-bit mode following a write operation. The issue
- * is under investigation.
- */
- fsm->configuration |= CFG_WRITE_EX_32BIT_ADDR_DELAY;
- }
}
- /* For QUAD mode, set 'QE' STATUS bit */
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sta, 1);
data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
if (data_pads == 4) {
- stfsm_read_status(fsm, FLASH_CMD_RDSR, &sta);
- sta |= MX25_STATUS_QE;
- stfsm_write_status(fsm, sta, 1);
+ if (!(sta & MX25_STATUS_QE)) {
+ /* Set 'QE' */
+ sta |= MX25_STATUS_QE;
+
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
+ }
+ } else {
+ if (sta & MX25_STATUS_QE) {
+ /* Clear 'QE' */
+ sta &= ~MX25_STATUS_QE;
+
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
+ }
}
return 0;
@@ -1239,7 +1203,7 @@ static int stfsm_n25q_config(struct stfsm *fsm)
*/
vcr = (N25Q_VCR_DUMMY_CYCLES(8) | N25Q_VCR_XIP_DISABLED |
N25Q_VCR_WRAP_CONT);
- stfsm_wrvcr(fsm, vcr);
+ stfsm_write_status(fsm, N25Q_CMD_WRVCR, vcr, 1, 0);
return 0;
}
@@ -1297,7 +1261,7 @@ static void stfsm_s25fl_write_dyb(struct stfsm *fsm, uint32_t offs, uint8_t dby)
{
struct stfsm_seq seq = {
.seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WREN) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
SEQ_OPC_CSDEASSERT),
.seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(S25FL_CMD_DYBWR)),
@@ -1337,7 +1301,7 @@ static int stfsm_s25fl_clear_status_reg(struct stfsm *fsm)
SEQ_OPC_CSDEASSERT),
.seq_opc[1] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
- SEQ_OPC_OPCODE(FLASH_CMD_WRDI) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WRDI) |
SEQ_OPC_CSDEASSERT),
.seq = {
STFSM_INST_CMD1,
@@ -1367,6 +1331,7 @@ static int stfsm_s25fl_config(struct stfsm *fsm)
uint32_t offs;
uint16_t sta_wr;
uint8_t sr1, cr1, dyb;
+ int update_sr = 0;
int ret;
if (flags & FLASH_FLAG_32BIT_ADDR) {
@@ -1414,34 +1379,28 @@ static int stfsm_s25fl_config(struct stfsm *fsm)
}
}
- /* Check status of 'QE' bit */
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR2, &cr1, 1);
data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
- stfsm_read_status(fsm, FLASH_CMD_RDSR2, &cr1);
if (data_pads == 4) {
if (!(cr1 & STFSM_S25FL_CONFIG_QE)) {
/* Set 'QE' */
cr1 |= STFSM_S25FL_CONFIG_QE;
- stfsm_read_status(fsm, FLASH_CMD_RDSR, &sr1);
- sta_wr = ((uint16_t)cr1 << 8) | sr1;
-
- stfsm_write_status(fsm, sta_wr, 2);
-
- stfsm_wait_busy(fsm);
+ update_sr = 1;
}
} else {
- if ((cr1 & STFSM_S25FL_CONFIG_QE)) {
+ if (cr1 & STFSM_S25FL_CONFIG_QE) {
/* Clear 'QE' */
cr1 &= ~STFSM_S25FL_CONFIG_QE;
- stfsm_read_status(fsm, FLASH_CMD_RDSR, &sr1);
- sta_wr = ((uint16_t)cr1 << 8) | sr1;
-
- stfsm_write_status(fsm, sta_wr, 2);
-
- stfsm_wait_busy(fsm);
+ update_sr = 1;
}
-
+ }
+ if (update_sr) {
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
+ sta_wr = ((uint16_t)cr1 << 8) | sr1;
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta_wr, 2, 1);
}
/*
@@ -1456,27 +1415,36 @@ static int stfsm_s25fl_config(struct stfsm *fsm)
static int stfsm_w25q_config(struct stfsm *fsm)
{
uint32_t data_pads;
- uint16_t sta_wr;
- uint8_t sta1, sta2;
+ uint8_t sr1, sr2;
+ uint16_t sr_wr;
+ int update_sr = 0;
int ret;
ret = stfsm_prepare_rwe_seqs_default(fsm);
if (ret)
return ret;
- /* If using QUAD mode, set QE STATUS bit */
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR2, &sr2, 1);
data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
if (data_pads == 4) {
- stfsm_read_status(fsm, FLASH_CMD_RDSR, &sta1);
- stfsm_read_status(fsm, FLASH_CMD_RDSR2, &sta2);
-
- sta_wr = ((uint16_t)sta2 << 8) | sta1;
-
- sta_wr |= W25Q_STATUS_QE;
-
- stfsm_write_status(fsm, sta_wr, 2);
-
- stfsm_wait_busy(fsm);
+ if (!(sr2 & W25Q_STATUS_QE)) {
+ /* Set 'QE' */
+ sr2 |= W25Q_STATUS_QE;
+ update_sr = 1;
+ }
+ } else {
+ if (sr2 & W25Q_STATUS_QE) {
+ /* Clear 'QE' */
+ sr2 &= ~W25Q_STATUS_QE;
+ update_sr = 1;
+ }
+ }
+ if (update_sr) {
+ /* Write status register */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
+ sr_wr = ((uint16_t)sr2 << 8) | sr1;
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sr_wr, 2, 1);
}
return 0;
@@ -1506,7 +1474,7 @@ static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size,
read_mask = (data_pads << 2) - 1;
/* Handle non-aligned buf */
- p = ((uint32_t)buf & 0x3) ? (uint8_t *)page_buf : buf;
+ p = ((uintptr_t)buf & 0x3) ? (uint8_t *)page_buf : buf;
/* Handle non-aligned size */
size_ub = (size + read_mask) & ~read_mask;
@@ -1528,7 +1496,7 @@ static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size,
}
/* Handle non-aligned buf */
- if ((uint32_t)buf & 0x3)
+ if ((uintptr_t)buf & 0x3)
memcpy(buf, page_buf, size);
/* Wait for sequence to finish */
@@ -1570,7 +1538,7 @@ static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
write_mask = (data_pads << 2) - 1;
/* Handle non-aligned buf */
- if ((uint32_t)buf & 0x3) {
+ if ((uintptr_t)buf & 0x3) {
memcpy(page_buf, buf, size);
p = (uint8_t *)page_buf;
} else {
@@ -1628,11 +1596,8 @@ static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
stfsm_s25fl_clear_status_reg(fsm);
/* Exit 32-bit address mode, if required */
- if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR) {
+ if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
stfsm_enter_32bit_addr(fsm, 0);
- if (fsm->configuration & CFG_WRITE_EX_32BIT_ADDR_DELAY)
- udelay(1);
- }
return 0;
}
@@ -1736,7 +1701,7 @@ static int stfsm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
while (len) {
/* Write up to page boundary */
- bytes = min(FLASH_PAGESIZE - page_offs, len);
+ bytes = min_t(size_t, FLASH_PAGESIZE - page_offs, len);
ret = stfsm_write(fsm, b, bytes, to);
if (ret)
@@ -1935,6 +1900,13 @@ static int stfsm_init(struct stfsm *fsm)
fsm->base + SPI_CONFIGDATA);
writel(STFSM_DEFAULT_WR_TIME, fsm->base + SPI_STATUS_WR_TIME_REG);
+ /*
+ * Set the FSM 'WAIT' delay to the minimum workable value. Note, for
+ * our purposes, the WAIT instruction is used purely to achieve
+ * "sequence validity" rather than actually implement a delay.
+ */
+ writel(0x00000001, fsm->base + SPI_PROGRAM_ERASE_TIME);
+
/* Clear FIFO, just in case */
stfsm_clear_fifo(fsm);
@@ -2086,7 +2058,7 @@ static int stfsm_remove(struct platform_device *pdev)
return mtd_device_unregister(&fsm->mtd);
}
-static struct of_device_id stfsm_match[] = {
+static const struct of_device_id stfsm_match[] = {
{ .compatible = "st,spi-fsm", },
{},
};
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
index 265f969817e3..3a19cbee24d7 100644
--- a/drivers/mtd/lpddr/Kconfig
+++ b/drivers/mtd/lpddr/Kconfig
@@ -1,5 +1,5 @@
-menu "LPDDR flash memory drivers"
- depends on MTD!=n
+menu "LPDDR & LPDDR2 PCM memory drivers"
+ depends on MTD
config MTD_LPDDR
tristate "Support for LPDDR flash chips"
@@ -17,4 +17,13 @@ config MTD_QINFO_PROBE
Window QINFO interface, permits software to be used for entire
families of devices. This serves similar purpose of CFI on legacy
Flash products
+
+config MTD_LPDDR2_NVM
+ # ARM dependency is only for writel_relaxed()
+ depends on MTD && ARM
+ tristate "Support for LPDDR2-NVM flash chips"
+ help
+ This option enables support of PCM memories with a LPDDR2-NVM
+ (Low power double data rate 2) interface.
+
endmenu
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile
index da48e46b5812..881d440d483e 100644
--- a/drivers/mtd/lpddr/Makefile
+++ b/drivers/mtd/lpddr/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o
obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o
+obj-$(CONFIG_MTD_LPDDR2_NVM) += lpddr2_nvm.o
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
new file mode 100644
index 000000000000..063cec40d0ae
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -0,0 +1,507 @@
+/*
+ * LPDDR2-NVM MTD driver. This module provides read, write, erase, lock/unlock
+ * support for LPDDR2-NVM PCM memories
+ *
+ * Copyright © 2012 Micron Technology, Inc.
+ *
+ * Vincenzo Aliberti <vincenzo.aliberti@gmail.com>
+ * Domenico Manna <domenico.manna@gmail.com>
+ * Many thanks to Andrea Vigilante for initial enabling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/err.h>
+
+/* Parameters */
+#define ERASE_BLOCKSIZE (0x00020000/2) /* in Word */
+#define WRITE_BUFFSIZE (0x00000400/2) /* in Word */
+#define OW_BASE_ADDRESS 0x00000000 /* OW offset */
+#define BUS_WIDTH 0x00000020 /* x32 devices */
+
+/* PFOW symbols address offset */
+#define PFOW_QUERY_STRING_P (0x0000/2) /* in Word */
+#define PFOW_QUERY_STRING_F (0x0002/2) /* in Word */
+#define PFOW_QUERY_STRING_O (0x0004/2) /* in Word */
+#define PFOW_QUERY_STRING_W (0x0006/2) /* in Word */
+
+/* OW registers address */
+#define CMD_CODE_OFS (0x0080/2) /* in Word */
+#define CMD_DATA_OFS (0x0084/2) /* in Word */
+#define CMD_ADD_L_OFS (0x0088/2) /* in Word */
+#define CMD_ADD_H_OFS (0x008A/2) /* in Word */
+#define MPR_L_OFS (0x0090/2) /* in Word */
+#define MPR_H_OFS (0x0092/2) /* in Word */
+#define CMD_EXEC_OFS (0x00C0/2) /* in Word */
+#define STATUS_REG_OFS (0x00CC/2) /* in Word */
+#define PRG_BUFFER_OFS (0x0010/2) /* in Word */
+
+/* Datamask */
+#define MR_CFGMASK 0x8000
+#define SR_OK_DATAMASK 0x0080
+
+/* LPDDR2-NVM Commands */
+#define LPDDR2_NVM_LOCK 0x0061
+#define LPDDR2_NVM_UNLOCK 0x0062
+#define LPDDR2_NVM_SW_PROGRAM 0x0041
+#define LPDDR2_NVM_SW_OVERWRITE 0x0042
+#define LPDDR2_NVM_BUF_PROGRAM 0x00E9
+#define LPDDR2_NVM_BUF_OVERWRITE 0x00EA
+#define LPDDR2_NVM_ERASE 0x0020
+
+/* LPDDR2-NVM Registers offset */
+#define LPDDR2_MODE_REG_DATA 0x0040
+#define LPDDR2_MODE_REG_CFG 0x0050
+
+/*
+ * Internal Type Definitions
+ * pcm_int_data contains memory controller details:
+ * @reg_data : LPDDR2_MODE_REG_DATA register address after remapping
+ * @reg_cfg : LPDDR2_MODE_REG_CFG register address after remapping
+ * &bus_width: memory bus-width (eg: x16 2 Bytes, x32 4 Bytes)
+ */
+struct pcm_int_data {
+ void __iomem *ctl_regs;
+ int bus_width;
+};
+
+static DEFINE_MUTEX(lpdd2_nvm_mutex);
+
+/*
+ * Build a map_word starting from an u_long
+ */
+static inline map_word build_map_word(u_long myword)
+{
+ map_word val = { {0} };
+ val.x[0] = myword;
+ return val;
+}
+
+/*
+ * Build Mode Register Configuration DataMask based on device bus-width
+ */
+static inline u_int build_mr_cfgmask(u_int bus_width)
+{
+ u_int val = MR_CFGMASK;
+
+ if (bus_width == 0x0004) /* x32 device */
+ val = val << 16;
+
+ return val;
+}
+
+/*
+ * Build Status Register OK DataMask based on device bus-width
+ */
+static inline u_int build_sr_ok_datamask(u_int bus_width)
+{
+ u_int val = SR_OK_DATAMASK;
+
+ if (bus_width == 0x0004) /* x32 device */
+ val = (val << 16)+val;
+
+ return val;
+}
+
+/*
+ * Evaluates Overlay Window Control Registers address
+ */
+static inline u_long ow_reg_add(struct map_info *map, u_long offset)
+{
+ u_long val = 0;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ val = map->pfow_base + offset*pcm_data->bus_width;
+
+ return val;
+}
+
+/*
+ * Enable lpddr2-nvm Overlay Window
+ * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
+ * used by device commands as well as uservisible resources like Device Status
+ * Register, Device ID, etc
+ */
+static inline void ow_enable(struct map_info *map)
+{
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
+ pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
+ writel_relaxed(0x01, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
+}
+
+/*
+ * Disable lpddr2-nvm Overlay Window
+ * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
+ * used by device commands as well as uservisible resources like Device Status
+ * Register, Device ID, etc
+ */
+static inline void ow_disable(struct map_info *map)
+{
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
+ pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
+ writel_relaxed(0x02, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
+}
+
+/*
+ * Execute lpddr2-nvm operations
+ */
+static int lpddr2_nvm_do_op(struct map_info *map, u_long cmd_code,
+ u_long cmd_data, u_long cmd_add, u_long cmd_mpr, u_char *buf)
+{
+ map_word add_l = { {0} }, add_h = { {0} }, mpr_l = { {0} },
+ mpr_h = { {0} }, data_l = { {0} }, cmd = { {0} },
+ exec_cmd = { {0} }, sr;
+ map_word data_h = { {0} }; /* only for 2x x16 devices stacked */
+ u_long i, status_reg, prg_buff_ofs;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+ u_int sr_ok_datamask = build_sr_ok_datamask(pcm_data->bus_width);
+
+ /* Builds low and high words for OW Control Registers */
+ add_l.x[0] = cmd_add & 0x0000FFFF;
+ add_h.x[0] = (cmd_add >> 16) & 0x0000FFFF;
+ mpr_l.x[0] = cmd_mpr & 0x0000FFFF;
+ mpr_h.x[0] = (cmd_mpr >> 16) & 0x0000FFFF;
+ cmd.x[0] = cmd_code & 0x0000FFFF;
+ exec_cmd.x[0] = 0x0001;
+ data_l.x[0] = cmd_data & 0x0000FFFF;
+ data_h.x[0] = (cmd_data >> 16) & 0x0000FFFF; /* only for 2x x16 */
+
+ /* Set Overlay Window Control Registers */
+ map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS));
+ map_write(map, data_l, ow_reg_add(map, CMD_DATA_OFS));
+ map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS));
+ map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS));
+ map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS));
+ map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS));
+ if (pcm_data->bus_width == 0x0004) { /* 2x16 devices stacked */
+ map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS) + 2);
+ map_write(map, data_h, ow_reg_add(map, CMD_DATA_OFS) + 2);
+ map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS) + 2);
+ map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS) + 2);
+ map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS) + 2);
+ map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS) + 2);
+ }
+
+ /* Fill Program Buffer */
+ if ((cmd_code == LPDDR2_NVM_BUF_PROGRAM) ||
+ (cmd_code == LPDDR2_NVM_BUF_OVERWRITE)) {
+ prg_buff_ofs = (map_read(map,
+ ow_reg_add(map, PRG_BUFFER_OFS))).x[0];
+ for (i = 0; i < cmd_mpr; i++) {
+ map_write(map, build_map_word(buf[i]), map->pfow_base +
+ prg_buff_ofs + i);
+ }
+ }
+
+ /* Command Execute */
+ map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS));
+ if (pcm_data->bus_width == 0x0004) /* 2x16 devices stacked */
+ map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS) + 2);
+
+ /* Status Register Check */
+ do {
+ sr = map_read(map, ow_reg_add(map, STATUS_REG_OFS));
+ status_reg = sr.x[0];
+ if (pcm_data->bus_width == 0x0004) {/* 2x16 devices stacked */
+ sr = map_read(map, ow_reg_add(map,
+ STATUS_REG_OFS) + 2);
+ status_reg += sr.x[0] << 16;
+ }
+ } while ((status_reg & sr_ok_datamask) != sr_ok_datamask);
+
+ return (((status_reg & sr_ok_datamask) == sr_ok_datamask) ? 0 : -EIO);
+}
+
+/*
+ * Execute lpddr2-nvm operations @ block level
+ */
+static int lpddr2_nvm_do_block_op(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len, u_char block_op)
+{
+ struct map_info *map = mtd->priv;
+ u_long add, end_add;
+ int ret = 0;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ add = start_add;
+ end_add = add + len;
+
+ do {
+ ret = lpddr2_nvm_do_op(map, block_op, 0x00, add, add, NULL);
+ if (ret)
+ goto out;
+ add += mtd->erasesize;
+ } while (add < end_add);
+
+out:
+ ow_disable(map);
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return ret;
+}
+
+/*
+ * verify presence of PFOW string
+ */
+static int lpddr2_nvm_pfow_present(struct map_info *map)
+{
+ map_word pfow_val[4];
+ unsigned int found = 1;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ /* Load string from array */
+ pfow_val[0] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_P));
+ pfow_val[1] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_F));
+ pfow_val[2] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_O));
+ pfow_val[3] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_W));
+
+ /* Verify the string loaded vs expected */
+ if (!map_word_equal(map, build_map_word('P'), pfow_val[0]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('F'), pfow_val[1]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('O'), pfow_val[2]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('W'), pfow_val[3]))
+ found = 0;
+
+ ow_disable(map);
+
+ mutex_unlock(&lpdd2_nvm_mutex);
+
+ return found;
+}
+
+/*
+ * lpddr2_nvm driver read method
+ */
+static int lpddr2_nvm_read(struct mtd_info *mtd, loff_t start_add,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ *retlen = len;
+
+ map_copy_from(map, buf, start_add, *retlen);
+
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return 0;
+}
+
+/*
+ * lpddr2_nvm driver write method
+ */
+static int lpddr2_nvm_write(struct mtd_info *mtd, loff_t start_add,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+ u_long add, current_len, tot_len, target_len, my_data;
+ u_char *write_buf = (u_char *)buf;
+ int ret = 0;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ /* Set start value for the variables */
+ add = start_add;
+ target_len = len;
+ tot_len = 0;
+
+ while (tot_len < target_len) {
+ if (!(IS_ALIGNED(add, mtd->writesize))) { /* do sw program */
+ my_data = write_buf[tot_len];
+ my_data += (write_buf[tot_len+1]) << 8;
+ if (pcm_data->bus_width == 0x0004) {/* 2x16 devices */
+ my_data += (write_buf[tot_len+2]) << 16;
+ my_data += (write_buf[tot_len+3]) << 24;
+ }
+ ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_SW_OVERWRITE,
+ my_data, add, 0x00, NULL);
+ if (ret)
+ goto out;
+
+ add += pcm_data->bus_width;
+ tot_len += pcm_data->bus_width;
+ } else { /* do buffer program */
+ current_len = min(target_len - tot_len,
+ (u_long) mtd->writesize);
+ ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_BUF_OVERWRITE,
+ 0x00, add, current_len, write_buf + tot_len);
+ if (ret)
+ goto out;
+
+ add += current_len;
+ tot_len += current_len;
+ }
+ }
+
+out:
+ *retlen = tot_len;
+ ow_disable(map);
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return ret;
+}
+
+/*
+ * lpddr2_nvm driver erase method
+ */
+static int lpddr2_nvm_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int ret = lpddr2_nvm_do_block_op(mtd, instr->addr, instr->len,
+ LPDDR2_NVM_ERASE);
+ if (!ret) {
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ }
+
+ return ret;
+}
+
+/*
+ * lpddr2_nvm driver unlock method
+ */
+static int lpddr2_nvm_unlock(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len)
+{
+ return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_UNLOCK);
+}
+
+/*
+ * lpddr2_nvm driver lock method
+ */
+static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len)
+{
+ return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
+}
+
+/*
+ * lpddr2_nvm driver probe method
+ */
+static int lpddr2_nvm_probe(struct platform_device *pdev)
+{
+ struct map_info *map;
+ struct mtd_info *mtd;
+ struct resource *add_range;
+ struct resource *control_regs;
+ struct pcm_int_data *pcm_data;
+
+ /* Allocate memory control_regs data structures */
+ pcm_data = devm_kzalloc(&pdev->dev, sizeof(*pcm_data), GFP_KERNEL);
+ if (!pcm_data)
+ return -ENOMEM;
+
+ pcm_data->bus_width = BUS_WIDTH;
+
+ /* Allocate memory for map_info & mtd_info data structures */
+ map = devm_kzalloc(&pdev->dev, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ mtd = devm_kzalloc(&pdev->dev, sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return -ENOMEM;
+
+ /* lpddr2_nvm address range */
+ add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* Populate map_info data structure */
+ *map = (struct map_info) {
+ .virt = devm_ioremap_resource(&pdev->dev, add_range),
+ .name = pdev->dev.init_name,
+ .phys = add_range->start,
+ .size = resource_size(add_range),
+ .bankwidth = pcm_data->bus_width / 2,
+ .pfow_base = OW_BASE_ADDRESS,
+ .fldrv_priv = pcm_data,
+ };
+ if (IS_ERR(map->virt))
+ return PTR_ERR(map->virt);
+
+ simple_map_init(map); /* fill with default methods */
+
+ control_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pcm_data->ctl_regs = devm_ioremap_resource(&pdev->dev, control_regs);
+ if (IS_ERR(pcm_data->ctl_regs))
+ return PTR_ERR(pcm_data->ctl_regs);
+
+ /* Populate mtd_info data structure */
+ *mtd = (struct mtd_info) {
+ .name = pdev->dev.init_name,
+ .type = MTD_RAM,
+ .priv = map,
+ .size = resource_size(add_range),
+ .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
+ .writesize = 1,
+ .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
+ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
+ ._read = lpddr2_nvm_read,
+ ._write = lpddr2_nvm_write,
+ ._erase = lpddr2_nvm_erase,
+ ._unlock = lpddr2_nvm_unlock,
+ ._lock = lpddr2_nvm_lock,
+ };
+
+ /* Verify the presence of the device looking for PFOW string */
+ if (!lpddr2_nvm_pfow_present(map)) {
+ pr_err("device not recognized\n");
+ return -EINVAL;
+ }
+ /* Parse partitions and register the MTD device */
+ return mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+}
+
+/*
+ * lpddr2_nvm driver remove method
+ */
+static int lpddr2_nvm_remove(struct platform_device *pdev)
+{
+ return mtd_device_unregister(dev_get_drvdata(&pdev->dev));
+}
+
+/* Initialize platform_driver data structure for lpddr2_nvm */
+static struct platform_driver lpddr2_nvm_drv = {
+ .driver = {
+ .name = "lpddr2_nvm",
+ },
+ .probe = lpddr2_nvm_probe,
+ .remove = lpddr2_nvm_remove,
+};
+
+module_platform_driver(lpddr2_nvm_drv);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vincenzo Aliberti <vincenzo.aliberti@gmail.com>");
+MODULE_DESCRIPTION("MTD driver for LPDDR2-NVM PCM memories");
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index fce23fe043f7..21b2874a303b 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -108,7 +108,7 @@ config MTD_SUN_UFLASH
config MTD_SC520CDP
tristate "CFI Flash device mapped on AMD SC520 CDP"
- depends on X86 && MTD_CFI
+ depends on (MELAN || COMPILE_TEST) && MTD_CFI
help
The SC520 CDP board has two banks of CFI-compliant chips and one
Dual-in-line JEDEC chip. This 'mapping' driver supports that
@@ -116,7 +116,7 @@ config MTD_SC520CDP
config MTD_NETSC520
tristate "CFI Flash device mapped on AMD NetSc520"
- depends on X86 && MTD_CFI
+ depends on (MELAN || COMPILE_TEST) && MTD_CFI
help
This enables access routines for the flash chips on the AMD NetSc520
demonstration board. If you have one of these boards and would like
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 8fead8e46bce..093edd51bdc7 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -183,7 +183,7 @@ static const struct sc520_par_table par_table[NUM_FLASH_BANKS] =
static void sc520cdp_setup_par(void)
{
- volatile unsigned long __iomem *mmcr;
+ unsigned long __iomem *mmcr;
unsigned long mmcr_val;
int i, j;
@@ -203,11 +203,11 @@ static void sc520cdp_setup_par(void)
*/
for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */
for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */
- mmcr_val = mmcr[SC520_PAR(j)];
+ mmcr_val = readl(&mmcr[SC520_PAR(j)]);
/* if target device field matches, reprogram the PAR */
if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev)
{
- mmcr[SC520_PAR(j)] = par_table[i].new_par;
+ writel(par_table[i].new_par, &mmcr[SC520_PAR(j)]);
break;
}
}
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 83a7a7091562..bb580bc16445 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -33,28 +33,6 @@ struct map_info soleng_flash_map = {
static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
-#ifdef CONFIG_MTD_SUPERH_RESERVE
-static struct mtd_partition superh_se_partitions[] = {
- /* Reserved for boot code, read-only */
- {
- .name = "flash_boot",
- .offset = 0x00000000,
- .size = CONFIG_MTD_SUPERH_RESERVE,
- .mask_flags = MTD_WRITEABLE,
- },
- /* All else is writable (e.g. JFFS) */
- {
- .name = "Flash FS",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
- }
-};
-#define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions)
-#else
-#define superh_se_partitions NULL
-#define NUM_PARTITIONS 0
-#endif /* CONFIG_MTD_SUPERH_RESERVE */
-
static int __init init_soleng_maps(void)
{
/* First probe at offset 0 */
@@ -92,8 +70,7 @@ static int __init init_soleng_maps(void)
mtd_device_register(eprom_mtd, NULL, 0);
}
- mtd_device_parse_register(flash_mtd, probes, NULL,
- superh_se_partitions, NUM_PARTITIONS);
+ mtd_device_parse_register(flash_mtd, probes, NULL, NULL, 0);
return 0;
}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 4dbfaee9aa95..43e30992a369 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -87,6 +87,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
+ if (req->cmd_flags & REQ_FLUSH)
+ return tr->flush(dev);
+
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
get_capacity(req->rq_disk))
return -EIO;
@@ -407,6 +410,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (!new->rq)
goto error3;
+ if (tr->flush)
+ blk_queue_flush(new->rq, REQ_FLUSH);
+
new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 7d4e7b9da3a1..a0f54e80670c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -568,13 +568,18 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
{
struct mtd_write_req req;
struct mtd_oob_ops ops;
- void __user *usr_data, *usr_oob;
+ const void __user *usr_data, *usr_oob;
int ret;
- if (copy_from_user(&req, argp, sizeof(req)) ||
- !access_ok(VERIFY_READ, req.usr_data, req.len) ||
- !access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
+ if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
+
+ usr_data = (const void __user *)(uintptr_t)req.usr_data;
+ usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
+ if (!access_ok(VERIFY_READ, usr_data, req.len) ||
+ !access_ok(VERIFY_READ, usr_oob, req.ooblen))
+ return -EFAULT;
+
if (!mtd->_write_oob)
return -EOPNOTSUPP;
@@ -583,10 +588,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
ops.ooblen = (size_t)req.ooblen;
ops.ooboffs = 0;
- usr_data = (void __user *)(uintptr_t)req.usr_data;
- usr_oob = (void __user *)(uintptr_t)req.usr_oob;
-
- if (req.usr_data) {
+ if (usr_data) {
ops.datbuf = memdup_user(usr_data, ops.len);
if (IS_ERR(ops.datbuf))
return PTR_ERR(ops.datbuf);
@@ -594,7 +596,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
ops.datbuf = NULL;
}
- if (req.usr_oob) {
+ if (usr_oob) {
ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
if (IS_ERR(ops.oobbuf)) {
kfree(ops.datbuf);
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index b7a24946ca26..722898aea7a6 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -679,9 +679,6 @@ static int bf5xx_nand_remove(struct platform_device *pdev)
peripheral_free_list(bfin_nfc_pin_req);
bf5xx_nand_dma_remove(info);
- /* free the common resources */
- kfree(info);
-
return 0;
}
@@ -742,10 +739,10 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
return -EFAULT;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
- goto out_err_kzalloc;
+ goto out_err;
}
platform_set_drvdata(pdev, info);
@@ -790,7 +787,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
/* initialise the hardware */
err = bf5xx_nand_hw_init(info);
if (err)
- goto out_err_hw_init;
+ goto out_err;
/* setup hardware ECC data struct */
if (hardware_ecc) {
@@ -827,9 +824,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
out_err_nand_scan:
bf5xx_nand_dma_remove(info);
-out_err_hw_init:
- kfree(info);
-out_err_kzalloc:
+out_err:
peripheral_free_list(bfin_nfc_pin_req);
return err;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index c07cd573ad3a..9f2012a3e764 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1233,7 +1233,7 @@ static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
return status;
}
-static void denali_erase(struct mtd_info *mtd, int page)
+static int denali_erase(struct mtd_info *mtd, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1250,8 +1250,7 @@ static void denali_erase(struct mtd_info *mtd, int page)
irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
INTR_STATUS__ERASE_FAIL);
- denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
- NAND_STATUS_FAIL : PASS;
+ return (irq_status & INTR_STATUS__ERASE_FAIL) ? NAND_STATUS_FAIL : PASS;
}
static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
@@ -1584,7 +1583,7 @@ int denali_init(struct denali_nand_info *denali)
denali->nand.ecc.write_page_raw = denali_write_page_raw;
denali->nand.ecc.read_oob = denali_read_oob;
denali->nand.ecc.write_oob = denali_write_oob;
- denali->nand.erase_cmd = denali_erase;
+ denali->nand.erase = denali_erase;
if (nand_scan_tail(&denali->mtd)) {
ret = -ENXIO;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 1b0265e85a06..ce24637e14f1 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -872,7 +872,7 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
return 0;
}
-static void docg4_erase_block(struct mtd_info *mtd, int page)
+static int docg4_erase_block(struct mtd_info *mtd, int page)
{
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
@@ -916,6 +916,8 @@ static void docg4_erase_block(struct mtd_info *mtd, int page)
write_nop(docptr);
poll_status(doc);
write_nop(docptr);
+
+ return nand->waitfunc(mtd, nand);
}
static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
@@ -1236,7 +1238,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->block_markbad = docg4_block_markbad;
nand->read_buf = docg4_read_buf;
nand->write_buf = docg4_write_buf16;
- nand->erase_cmd = docg4_erase_block;
+ nand->erase = docg4_erase_block;
nand->ecc.read_page = docg4_read_page;
nand->ecc.write_page = docg4_write_page;
nand->ecc.read_page_raw = docg4_read_page_raw;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ec549cd9849f..545a5c002f09 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -723,6 +723,19 @@ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, uint32_t data_len,
+ const uint8_t *buf, int oob_required)
+{
+ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
{
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
@@ -761,6 +774,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->ecc.read_page = fsl_elbc_read_page;
chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
/* If CS Base Register selects full hardware ECC then use it */
if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index cb45d2f8e208..2338124dd05f 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -56,7 +56,7 @@ struct fsl_ifc_nand_ctrl {
struct nand_hw_control controller;
struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
- u8 __iomem *addr; /* Address of assigned IFC buffer */
+ void __iomem *addr; /* Address of assigned IFC buffer */
unsigned int page; /* Last page written to / read from */
unsigned int read_bytes;/* Number of bytes read during command */
unsigned int column; /* Saved column from SEQIN */
@@ -591,7 +591,10 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
* The chip always seems to report that it is
* write-protected, even when it is not.
*/
- setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ if (chip->options & NAND_BUSWIDTH_16)
+ setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ else
+ setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
return;
case NAND_CMD_RESET:
@@ -636,7 +639,7 @@ static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
len = bufsize - ifc_nand_ctrl->index;
}
- memcpy_toio(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index], buf, len);
+ memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
ifc_nand_ctrl->index += len;
}
@@ -648,13 +651,16 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct fsl_ifc_mtd *priv = chip->priv;
+ unsigned int offset;
/*
* If there are still bytes in the IFC buffer, then use the
* next byte.
*/
- if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes)
- return in_8(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index++]);
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ offset = ifc_nand_ctrl->index++;
+ return in_8(ifc_nand_ctrl->addr + offset);
+ }
dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
return ERR_BYTE;
@@ -675,8 +681,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
* next byte.
*/
if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
- data = in_be16((uint16_t __iomem *)&ifc_nand_ctrl->
- addr[ifc_nand_ctrl->index]);
+ data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
ifc_nand_ctrl->index += 2;
return (uint8_t) data;
}
@@ -701,7 +706,7 @@ static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
avail = min((unsigned int)len,
ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
- memcpy_fromio(buf, &ifc_nand_ctrl->addr[ifc_nand_ctrl->index], avail);
+ memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
ifc_nand_ctrl->index += avail;
if (len > avail)
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index 588f5374047c..05bb91f2f4c4 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -54,7 +54,7 @@
#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
- (GPMI_IS_MX6Q(x) \
+ (GPMI_IS_MX6(x) \
? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
& MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
: (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
@@ -65,7 +65,7 @@
#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
(0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
- ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
& MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
: 0 \
@@ -77,7 +77,7 @@
#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
(0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
- (GPMI_IS_MX6Q(x) \
+ (GPMI_IS_MX6(x) \
? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
: ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
)
@@ -96,7 +96,7 @@
#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
- (GPMI_IS_MX6Q(x) \
+ (GPMI_IS_MX6(x) \
? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
& MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
: (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
@@ -107,7 +107,7 @@
#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
(0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
- ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
& MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
: 0 \
@@ -119,7 +119,7 @@
#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
(0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
- (GPMI_IS_MX6Q(x) \
+ (GPMI_IS_MX6(x) \
? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
: ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
)
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index dd1df605a1d6..87e658ce23ef 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -861,7 +861,7 @@ static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
struct resources *r = &this->resources;
unsigned long rate = clk_get_rate(r->clock[0]);
int mode = this->timing_mode;
- int dll_threshold = 16; /* in ns */
+ int dll_threshold = this->devdata->max_chain_delay;
unsigned long delay;
unsigned long clk_period;
int t_rea;
@@ -886,9 +886,6 @@ static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
/* [3] for GPMI_HW_GPMI_CTRL1 */
hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
- if (GPMI_IS_MX6Q(this))
- dll_threshold = 12;
-
/*
* Enlarge 10 times for the numerator and denominator in {3}.
* This make us to get more accurate result.
@@ -974,7 +971,7 @@ int gpmi_extra_init(struct gpmi_nand_data *this)
struct nand_chip *chip = &this->nand;
/* Enable the asynchronous EDO feature. */
- if (GPMI_IS_MX6Q(this) && chip->onfi_version) {
+ if (GPMI_IS_MX6(this) && chip->onfi_version) {
int mode = onfi_get_async_timing_mode(chip);
/* We only support the timing mode 4 and mode 5. */
@@ -1096,12 +1093,12 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
if (GPMI_IS_MX23(this)) {
mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
- } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+ } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
/*
* In the imx6, all the ready/busy pins are bound
* together. So we only need to check chip 0.
*/
- if (GPMI_IS_MX6Q(this))
+ if (GPMI_IS_MX6(this))
chip = 0;
/* MX28 shares the same R/B register as MX6Q. */
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index bb77f750e75a..f638cd8077ca 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -53,6 +53,30 @@ static struct nand_ecclayout gpmi_hw_ecclayout = {
.oobfree = { {.offset = 0, .length = 0} }
};
+static const struct gpmi_devdata gpmi_devdata_imx23 = {
+ .type = IS_MX23,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx28 = {
+ .type = IS_MX28,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6q = {
+ .type = IS_MX6Q,
+ .bch_max_ecc_strength = 40,
+ .max_chain_delay = 12,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6sx = {
+ .type = IS_MX6SX,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12,
+};
+
static irqreturn_t bch_irq(int irq, void *cookie)
{
struct gpmi_nand_data *this = cookie;
@@ -102,14 +126,8 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
/* The mx23/mx28 only support the GF13. */
if (geo->gf_len == 14)
return false;
-
- if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX)
- return false;
- } else if (GPMI_IS_MX6Q(this)) {
- if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX)
- return false;
}
- return true;
+ return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
}
/*
@@ -270,8 +288,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
"We can not support this nand chip."
" Its required ecc strength(%d) is beyond our"
" capability(%d).\n", geo->ecc_strength,
- (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX
- : MXS_ECC_STRENGTH_MAX));
+ this->devdata->bch_max_ecc_strength);
return -EINVAL;
}
@@ -572,7 +589,7 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
}
/* Get extra clocks */
- if (GPMI_IS_MX6Q(this))
+ if (GPMI_IS_MX6(this))
extra_clks = extra_clks_for_mx6q;
if (!extra_clks)
return 0;
@@ -590,9 +607,9 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
r->clock[i] = clk;
}
- if (GPMI_IS_MX6Q(this))
+ if (GPMI_IS_MX6(this))
/*
- * Set the default value for the gpmi clock in mx6q:
+ * Set the default value for the gpmi clock.
*
* If you want to use the ONFI nand which is in the
* Synchronous Mode, you should change the clock as you need.
@@ -1655,7 +1672,7 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
* (1) the chip is imx6, and
* (2) the size of the ECC parity is byte aligned.
*/
- if (GPMI_IS_MX6Q(this) &&
+ if (GPMI_IS_MX6(this) &&
((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
ecc->read_subpage = gpmi_ecc_read_subpage;
chip->options |= NAND_SUBPAGE_READ;
@@ -1711,7 +1728,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
- ret = nand_scan_ident(mtd, GPMI_IS_MX6Q(this) ? 2 : 1, NULL);
+ ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);
if (ret)
goto err_out;
@@ -1740,23 +1757,19 @@ err_out:
return ret;
}
-static const struct platform_device_id gpmi_ids[] = {
- { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
- { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
- { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
- {}
-};
-
static const struct of_device_id gpmi_nand_id_table[] = {
{
.compatible = "fsl,imx23-gpmi-nand",
- .data = (void *)&gpmi_ids[IS_MX23],
+ .data = (void *)&gpmi_devdata_imx23,
}, {
.compatible = "fsl,imx28-gpmi-nand",
- .data = (void *)&gpmi_ids[IS_MX28],
+ .data = (void *)&gpmi_devdata_imx28,
}, {
.compatible = "fsl,imx6q-gpmi-nand",
- .data = (void *)&gpmi_ids[IS_MX6Q],
+ .data = (void *)&gpmi_devdata_imx6q,
+ }, {
+ .compatible = "fsl,imx6sx-gpmi-nand",
+ .data = (void *)&gpmi_devdata_imx6sx,
}, {}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
@@ -1767,18 +1780,18 @@ static int gpmi_nand_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
int ret;
+ this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
+ if (!this)
+ return -ENOMEM;
+
of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
if (of_id) {
- pdev->id_entry = of_id->data;
+ this->devdata = of_id->data;
} else {
dev_err(&pdev->dev, "Failed to find the right device id.\n");
return -ENODEV;
}
- this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
- if (!this)
- return -ENOMEM;
-
platform_set_drvdata(pdev, this);
this->pdev = pdev;
this->dev = &pdev->dev;
@@ -1823,7 +1836,6 @@ static struct platform_driver gpmi_nand_driver = {
},
.probe = gpmi_nand_probe,
.remove = gpmi_nand_remove,
- .id_table = gpmi_ids,
};
module_platform_driver(gpmi_nand_driver);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 4c801fa18725..32c6ba49f986 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -119,11 +119,25 @@ struct nand_timing {
int8_t tRHOH_in_ns;
};
+enum gpmi_type {
+ IS_MX23,
+ IS_MX28,
+ IS_MX6Q,
+ IS_MX6SX
+};
+
+struct gpmi_devdata {
+ enum gpmi_type type;
+ int bch_max_ecc_strength;
+ int max_chain_delay; /* See the async EDO mode */
+};
+
struct gpmi_nand_data {
/* flags */
#define GPMI_ASYNC_EDO_ENABLED (1 << 0)
#define GPMI_TIMING_INIT_OK (1 << 1)
int flags;
+ const struct gpmi_devdata *devdata;
/* System Interface */
struct device *dev;
@@ -281,15 +295,11 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
#define STATUS_ERASED 0xff
#define STATUS_UNCORRECTABLE 0xfe
-/* BCH's bit correction capability. */
-#define MXS_ECC_STRENGTH_MAX 20 /* mx23 and mx28 */
-#define MX6_ECC_STRENGTH_MAX 40
-
-/* Use the platform_id to distinguish different Archs. */
-#define IS_MX23 0x0
-#define IS_MX28 0x1
-#define IS_MX6Q 0x2
-#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
-#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
-#define GPMI_IS_MX6Q(x) ((x)->pdev->id_entry->driver_data == IS_MX6Q)
+/* Use the devdata to distinguish different Archs. */
+#define GPMI_IS_MX23(x) ((x)->devdata->type == IS_MX23)
+#define GPMI_IS_MX28(x) ((x)->devdata->type == IS_MX28)
+#define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q)
+#define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX)
+
+#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x))
#endif
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 9d01c4df838c..41167e9e991e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -37,6 +37,7 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -1204,8 +1205,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
* ecc.pos. Let's make sure that there are no gaps in ECC positions.
*/
for (i = 0; i < eccfrag_len - 1; i++) {
- if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
- eccpos[i + start_step * chip->ecc.bytes + 1]) {
+ if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
gaps = 1;
break;
}
@@ -1501,6 +1501,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
mtd->oobavail : mtd->oobsize;
uint8_t *bufpoi, *oob, *buf;
+ int use_bufpoi;
unsigned int max_bitflips = 0;
int retry_mode = 0;
bool ecc_fail = false;
@@ -1523,9 +1524,20 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
+ if (!aligned)
+ use_bufpoi = 1;
+ else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+ use_bufpoi = !virt_addr_valid(buf);
+ else
+ use_bufpoi = 0;
+
/* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
- bufpoi = aligned ? buf : chip->buffers->databuf;
+ bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
+
+ if (use_bufpoi && aligned)
+ pr_debug("%s: using read bounce buffer for buf@%p\n",
+ __func__, buf);
read_retry:
chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
@@ -1547,7 +1559,7 @@ read_retry:
ret = chip->ecc.read_page(mtd, chip, bufpoi,
oob_required, page);
if (ret < 0) {
- if (!aligned)
+ if (use_bufpoi)
/* Invalidate page cache */
chip->pagebuf = -1;
break;
@@ -1556,7 +1568,7 @@ read_retry:
max_bitflips = max_t(unsigned int, max_bitflips, ret);
/* Transfer not aligned data */
- if (!aligned) {
+ if (use_bufpoi) {
if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
!(mtd->ecc_stats.failed - ecc_failures) &&
(ops->mode != MTD_OPS_RAW)) {
@@ -2376,11 +2388,23 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int bytes = mtd->writesize;
int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
+ int use_bufpoi;
+ int part_pagewr = (column || writelen < (mtd->writesize - 1));
+
+ if (part_pagewr)
+ use_bufpoi = 1;
+ else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+ use_bufpoi = !virt_addr_valid(buf);
+ else
+ use_bufpoi = 0;
- /* Partial page write? */
- if (unlikely(column || writelen < (mtd->writesize - 1))) {
+ /* Partial page write?, or need to use bounce buffer */
+ if (use_bufpoi) {
+ pr_debug("%s: using write bounce buffer for buf@%p\n",
+ __func__, buf);
cached = 0;
- bytes = min_t(int, bytes - column, (int) writelen);
+ if (part_pagewr)
+ bytes = min_t(int, bytes - column, writelen);
chip->pagebuf = -1;
memset(chip->buffers->databuf, 0xff, mtd->writesize);
memcpy(&chip->buffers->databuf[column], buf, bytes);
@@ -2618,18 +2642,20 @@ out:
}
/**
- * single_erase_cmd - [GENERIC] NAND standard block erase command function
+ * single_erase - [GENERIC] NAND standard block erase command function
* @mtd: MTD device structure
* @page: the page address of the block which will be erased
*
- * Standard erase command for NAND chips.
+ * Standard erase command for NAND chips. Returns NAND status.
*/
-static void single_erase_cmd(struct mtd_info *mtd, int page)
+static int single_erase(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd->priv;
/* Send commands to erase a block */
chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ return chip->waitfunc(mtd, chip);
}
/**
@@ -2710,9 +2736,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
(page + pages_per_block))
chip->pagebuf = -1;
- chip->erase_cmd(mtd, page & chip->pagemask);
-
- status = chip->waitfunc(mtd, chip);
+ status = chip->erase(mtd, page & chip->pagemask);
/*
* See if operation failed and additional status checks are
@@ -3607,7 +3631,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->onfi_version = 0;
if (!type->name || !type->pagesize) {
- /* Check is chip is ONFI compliant */
+ /* Check if the chip is ONFI compliant */
if (nand_flash_detect_onfi(mtd, chip, &busw))
goto ident_done;
@@ -3685,7 +3709,7 @@ ident_done:
}
chip->badblockbits = 8;
- chip->erase_cmd = single_erase_cmd;
+ chip->erase = single_erase;
/* Do not replace user supplied command function! */
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
@@ -3770,6 +3794,39 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
}
EXPORT_SYMBOL(nand_scan_ident);
+/*
+ * Check if the chip configuration meet the datasheet requirements.
+
+ * If our configuration corrects A bits per B bytes and the minimum
+ * required correction level is X bits per Y bytes, then we must ensure
+ * both of the following are true:
+ *
+ * (1) A / B >= X / Y
+ * (2) A >= X
+ *
+ * Requirement (1) ensures we can correct for the required bitflip density.
+ * Requirement (2) ensures we can correct even when all bitflips are clumped
+ * in the same sector.
+ */
+static bool nand_ecc_strength_good(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int corr, ds_corr;
+
+ if (ecc->size == 0 || chip->ecc_step_ds == 0)
+ /* Not enough information */
+ return true;
+
+ /*
+ * We get the number of corrected bits per page to compare
+ * the correction density.
+ */
+ corr = (mtd->writesize * ecc->strength) / ecc->size;
+ ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
+
+ return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
+}
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
@@ -3990,6 +4047,9 @@ int nand_scan_tail(struct mtd_info *mtd)
ecc->layout->oobavail += ecc->layout->oobfree[i].length;
mtd->oobavail = ecc->layout->oobavail;
+ /* ECC sanity check: warn noisily if it's too weak */
+ WARN_ON(!nand_ecc_strength_good(mtd));
+
/*
* Set the number of read / write steps for one page depending on ECC
* mode.
@@ -4023,8 +4083,16 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->pagebuf = -1;
/* Large page NAND with SOFT_ECC should support subpage reads */
- if ((ecc->mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
- chip->options |= NAND_SUBPAGE_READ;
+ switch (ecc->mode) {
+ case NAND_ECC_SOFT:
+ case NAND_ECC_SOFT_BCH:
+ if (chip->page_shift > 9)
+ chip->options |= NAND_SUBPAGE_READ;
+ break;
+
+ default:
+ break;
+ }
/* Fill in remaining MTD driver data */
mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index c0615d1526f9..7f0c3b4c2a4f 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -528,7 +528,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
{
struct nand_chip *this = mtd->priv;
int i, chips;
- int bits, startblock, block, dir;
+ int startblock, block, dir;
int scanlen = mtd->writesize + mtd->oobsize;
int bbtblocks;
int blocktopage = this->bbt_erase_shift - this->page_shift;
@@ -552,9 +552,6 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
bbtblocks = mtd->size >> this->bbt_erase_shift;
}
- /* Number of bits for each erase block in the bbt */
- bits = td->options & NAND_BBT_NRBITS_MSK;
-
for (i = 0; i < chips; i++) {
/* Reset version information */
td->version[i] = 0;
@@ -1285,6 +1282,7 @@ static int nand_create_badblock_pattern(struct nand_chip *this)
int nand_default_bbt(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
+ int ret;
/* Is a flash based bad block table requested? */
if (this->bbt_options & NAND_BBT_USE_FLASH) {
@@ -1303,8 +1301,11 @@ int nand_default_bbt(struct mtd_info *mtd)
this->bbt_md = NULL;
}
- if (!this->badblock_pattern)
- nand_create_badblock_pattern(this);
+ if (!this->badblock_pattern) {
+ ret = nand_create_badblock_pattern(this);
+ if (ret)
+ return ret;
+ }
return nand_scan_bbt(mtd, this->badblock_pattern);
}
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 053c9a2d47c3..97c4c0216c90 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -506,7 +506,7 @@ int __nand_correct_data(unsigned char *buf,
if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
return 1; /* error in ECC data; no action needed */
- pr_err("%s: uncorrectable ECC error", __func__);
+ pr_err("%s: uncorrectable ECC error\n", __func__);
return -1;
}
EXPORT_SYMBOL(__nand_correct_data);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 42e8a770e631..4f0d83648e5a 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -575,12 +575,12 @@ static int alloc_device(struct nandsim *ns)
cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
if (IS_ERR(cfile))
return PTR_ERR(cfile);
- if (!cfile->f_op->read && !cfile->f_op->aio_read) {
+ if (!(cfile->f_mode & FMODE_CAN_READ)) {
NS_ERR("alloc_device: cache file not readable\n");
err = -EINVAL;
goto err_close;
}
- if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+ if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
NS_ERR("alloc_device: cache file not writeable\n");
err = -EINVAL;
goto err_close;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 1ff49b80bdaf..f0ed92e210a1 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -137,6 +137,10 @@
#define BADBLOCK_MARKER_LENGTH 2
#ifdef CONFIG_MTD_NAND_OMAP_BCH
+static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
+ 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
+ 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
+ 0x07, 0x0e};
static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
0xac, 0x6b, 0xff, 0x99, 0x7b};
static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
@@ -1114,6 +1118,19 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
ecc_size1 = BCH_ECC_SIZE1;
}
break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ bch_type = 0x2;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = 0x01;
+ ecc_size0 = 52; /* ECC bits in nibbles per sector */
+ ecc_size1 = 0; /* non-ECC bits in nibbles per sector */
+ } else {
+ wr_mode = 0x01;
+ ecc_size0 = 0; /* extra bits in nibbles per sector */
+ ecc_size1 = 52; /* OOB bits in nibbles per sector */
+ }
+ break;
default:
return;
}
@@ -1162,7 +1179,8 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code;
unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
- int i;
+ u32 val;
+ int i, j;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) {
@@ -1201,6 +1219,41 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
*ecc_code++ = ((bch_val1 & 0xF) << 4);
break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
default:
return -EINVAL;
}
@@ -1210,8 +1263,8 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back */
- for (i = 0; i < eccbytes; i++)
- ecc_calc[i] ^= bch4_polynomial[i];
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
break;
case OMAP_ECC_BCH4_CODE_HW:
/* Set 8th ECC byte as 0x0 for ROM compatibility */
@@ -1220,13 +1273,15 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back */
- for (i = 0; i < eccbytes; i++)
- ecc_calc[i] ^= bch8_polynomial[i];
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
break;
case OMAP_ECC_BCH8_CODE_HW:
/* Set 14th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
default:
return -EINVAL;
}
@@ -1237,6 +1292,7 @@ static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
return 0;
}
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
/**
* erased_sector_bitflips - count bit flips
* @data: data sector buffer
@@ -1276,7 +1332,6 @@ static int erased_sector_bitflips(u_char *data, u_char *oob,
return flip_bits;
}
-#ifdef CONFIG_MTD_NAND_OMAP_BCH
/**
* omap_elm_correct_data - corrects page data area in case error reported
* @mtd: MTD device structure
@@ -1318,6 +1373,10 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
actual_eccbytes = ecc->bytes - 1;
erased_ecc_vec = bch8_vector;
break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ actual_eccbytes = ecc->bytes;
+ erased_ecc_vec = bch16_vector;
+ break;
default:
pr_err("invalid driver configuration\n");
return -EINVAL;
@@ -1382,7 +1441,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
/* Check if any error reported */
if (!is_error_reported)
- return 0;
+ return stat;
/* Decode BCH error using ELM module */
elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
@@ -1401,6 +1460,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
BCH4_BIT_PAD;
break;
case OMAP_ECC_BCH8_CODE_HW:
+ case OMAP_ECC_BCH16_CODE_HW:
pos = err_vec[i].error_loc[j];
break;
default:
@@ -1912,6 +1972,40 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
#endif
+ case OMAP_ECC_BCH16_CODE_HW:
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 26;
+ nand_chip->ecc.strength = 16;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+ /* This ECC scheme requires ELM H/W block */
+ err = is_elm_present(info, pdata->elm_of_node, BCH16_ECC);
+ if (err < 0) {
+ pr_err("ELM is required for this ECC scheme\n");
+ goto return_error;
+ }
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ break;
+#else
+ pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ err = -EINVAL;
+ goto return_error;
+#endif
default:
pr_err("nand: error: invalid or unsupported ECC scheme\n");
err = -EINVAL;
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index dd7fe817eafb..471b4df3a5ac 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -214,7 +214,7 @@ static int orion_nand_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id orion_nand_of_match_table[] = {
+static const struct of_device_id orion_nand_of_match_table[] = {
{ .compatible = "marvell,orion-nand", },
{},
};
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 7588fe2c127f..96b0b1d27df1 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -127,10 +127,10 @@
/* macros for registers read/write */
#define nand_writel(info, off, val) \
- __raw_writel((val), (info)->mmio_base + (off))
+ writel_relaxed((val), (info)->mmio_base + (off))
#define nand_readl(info, off) \
- __raw_readl((info)->mmio_base + (off))
+ readl_relaxed((info)->mmio_base + (off))
/* error code and state */
enum {
@@ -337,7 +337,7 @@ static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
/* convert nano-seconds to nand flash controller clock cycles */
#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
-static struct of_device_id pxa3xx_nand_dt_ids[] = {
+static const struct of_device_id pxa3xx_nand_dt_ids[] = {
{
.compatible = "marvell,pxa3xx-nand",
.data = (void *)PXA3XX_NAND_VARIANT_PXA,
@@ -1354,7 +1354,6 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
ecc->mode = NAND_ECC_HW;
ecc->size = 512;
ecc->strength = 1;
- return 1;
} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
info->chunk_size = 512;
@@ -1363,7 +1362,6 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
ecc->mode = NAND_ECC_HW;
ecc->size = 512;
ecc->strength = 1;
- return 1;
/*
* Required ECC: 4-bit correction per 512 bytes
@@ -1378,7 +1376,6 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
ecc->size = info->chunk_size;
ecc->layout = &ecc_layout_2KB_bch4bit;
ecc->strength = 16;
- return 1;
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
info->ecc_bch = 1;
@@ -1389,7 +1386,6 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
ecc->size = info->chunk_size;
ecc->layout = &ecc_layout_4KB_bch4bit;
ecc->strength = 16;
- return 1;
/*
* Required ECC: 8-bit correction per 512 bytes
@@ -1404,8 +1400,15 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
ecc->size = info->chunk_size;
ecc->layout = &ecc_layout_4KB_bch8bit;
ecc->strength = 16;
- return 1;
+ } else {
+ dev_err(&info->pdev->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ strength, page_size);
+ return -ENODEV;
}
+
+ dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
+ ecc->strength, ecc->size);
return 0;
}
@@ -1516,8 +1519,13 @@ KEEP_CONFIG:
}
}
- ecc_strength = chip->ecc_strength_ds;
- ecc_step = chip->ecc_step_ds;
+ if (pdata->ecc_strength && pdata->ecc_step_size) {
+ ecc_strength = pdata->ecc_strength;
+ ecc_step = pdata->ecc_step_size;
+ } else {
+ ecc_strength = chip->ecc_strength_ds;
+ ecc_step = chip->ecc_step_ds;
+ }
/* Set default ECC strength requirements on non-ONFI devices */
if (ecc_strength < 1 && ecc_step < 1) {
@@ -1527,12 +1535,8 @@ KEEP_CONFIG:
ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
ecc_step, mtd->writesize);
- if (!ret) {
- dev_err(&info->pdev->dev,
- "ECC strength %d at page size %d is not supported\n",
- ecc_strength, mtd->writesize);
- return -ENODEV;
- }
+ if (ret)
+ return ret;
/* calculate addressing information */
if (mtd->writesize >= 2048)
@@ -1730,6 +1734,14 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
of_property_read_u32(np, "num-cs", &pdata->num_cs);
pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+ pdata->ecc_strength = of_get_nand_ecc_strength(np);
+ if (pdata->ecc_strength < 0)
+ pdata->ecc_strength = 0;
+
+ pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
+ if (pdata->ecc_step_size < 0)
+ pdata->ecc_step_size = 0;
+
pdev->dev.platform_data = pdata;
return 0;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 325930db3f04..baea83f4dea8 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -245,7 +245,7 @@ static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
}
/* write DWORD chinks - faster */
- while (len) {
+ while (len >= 4) {
reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
r852_write_reg_dword(dev, R852_DATALINE, reg);
buf += 4;
@@ -254,8 +254,10 @@ static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
}
/* write rest */
- while (len)
+ while (len > 0) {
r852_write_reg(dev, R852_DATALINE, *buf++);
+ len--;
+ }
}
/*
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index b1a792fd1c23..efb819c3df2f 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -537,9 +537,9 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area,
return 0;
}
-static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction);
+static int (*s5pc110_dma_ops)(dma_addr_t dst, dma_addr_t src, size_t count, int direction);
-static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction)
+static int s5pc110_dma_poll(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
{
void __iomem *base = onenand->dma_addr;
int status;
@@ -605,7 +605,7 @@ static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction)
+static int s5pc110_dma_irq(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
{
void __iomem *base = onenand->dma_addr;
int status;
@@ -686,7 +686,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
goto normal;
}
- err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
+ err = s5pc110_dma_ops(dma_dst, dma_src,
count, S5PC110_DMA_DIR_READ);
if (page_dma)
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
new file mode 100644
index 000000000000..f8acfa4310ef
--- /dev/null
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -0,0 +1,17 @@
+menuconfig MTD_SPI_NOR
+ tristate "SPI-NOR device support"
+ depends on MTD
+ help
+ This is the framework for the SPI NOR which can be used by the SPI
+ device drivers and the SPI-NOR device driver.
+
+if MTD_SPI_NOR
+
+config SPI_FSL_QUADSPI
+ tristate "Freescale Quad SPI controller"
+ depends on ARCH_MXC
+ help
+ This enables support for the Quad SPI controller in master mode.
+ We only connect the NOR to this controller now.
+
+endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
new file mode 100644
index 000000000000..6a7ce1462247
--- /dev/null
+++ b/drivers/mtd/spi-nor/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
+obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
new file mode 100644
index 000000000000..8d659a2888d5
--- /dev/null
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -0,0 +1,1009 @@
+/*
+ * Freescale QuadSPI driver.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+
+/* The registers */
+#define QUADSPI_MCR 0x00
+#define QUADSPI_MCR_RESERVED_SHIFT 16
+#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT)
+#define QUADSPI_MCR_MDIS_SHIFT 14
+#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT)
+#define QUADSPI_MCR_CLR_TXF_SHIFT 11
+#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT)
+#define QUADSPI_MCR_CLR_RXF_SHIFT 10
+#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT)
+#define QUADSPI_MCR_DDR_EN_SHIFT 7
+#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT)
+#define QUADSPI_MCR_END_CFG_SHIFT 2
+#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT)
+#define QUADSPI_MCR_SWRSTHD_SHIFT 1
+#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT)
+#define QUADSPI_MCR_SWRSTSD_SHIFT 0
+#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT)
+
+#define QUADSPI_IPCR 0x08
+#define QUADSPI_IPCR_SEQID_SHIFT 24
+#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT)
+
+#define QUADSPI_BUF0CR 0x10
+#define QUADSPI_BUF1CR 0x14
+#define QUADSPI_BUF2CR 0x18
+#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
+
+#define QUADSPI_BUF3CR 0x1c
+#define QUADSPI_BUF3CR_ALLMST_SHIFT 31
+#define QUADSPI_BUF3CR_ALLMST (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
+
+#define QUADSPI_BFGENCR 0x20
+#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16
+#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT))
+#define QUADSPI_BFGENCR_SEQID_SHIFT 12
+#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT)
+
+#define QUADSPI_BUF0IND 0x30
+#define QUADSPI_BUF1IND 0x34
+#define QUADSPI_BUF2IND 0x38
+#define QUADSPI_SFAR 0x100
+
+#define QUADSPI_SMPR 0x108
+#define QUADSPI_SMPR_DDRSMP_SHIFT 16
+#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT)
+#define QUADSPI_SMPR_FSDLY_SHIFT 6
+#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT)
+#define QUADSPI_SMPR_FSPHS_SHIFT 5
+#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT)
+#define QUADSPI_SMPR_HSENA_SHIFT 0
+#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT)
+
+#define QUADSPI_RBSR 0x10c
+#define QUADSPI_RBSR_RDBFL_SHIFT 8
+#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT)
+
+#define QUADSPI_RBCT 0x110
+#define QUADSPI_RBCT_WMRK_MASK 0x1F
+#define QUADSPI_RBCT_RXBRD_SHIFT 8
+#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT)
+
+#define QUADSPI_TBSR 0x150
+#define QUADSPI_TBDR 0x154
+#define QUADSPI_SR 0x15c
+#define QUADSPI_SR_IP_ACC_SHIFT 1
+#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT)
+#define QUADSPI_SR_AHB_ACC_SHIFT 2
+#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT)
+
+#define QUADSPI_FR 0x160
+#define QUADSPI_FR_TFF_MASK 0x1
+
+#define QUADSPI_SFA1AD 0x180
+#define QUADSPI_SFA2AD 0x184
+#define QUADSPI_SFB1AD 0x188
+#define QUADSPI_SFB2AD 0x18c
+#define QUADSPI_RBDR 0x200
+
+#define QUADSPI_LUTKEY 0x300
+#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define QUADSPI_LCKCR 0x304
+#define QUADSPI_LCKER_LOCK 0x1
+#define QUADSPI_LCKER_UNLOCK 0x2
+
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE (0x1 << 0)
+
+#define QUADSPI_LUT_BASE 0x310
+
+/*
+ * The definition of the LUT register shows below:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define OPRND0_SHIFT 0
+#define PAD0_SHIFT 8
+#define INSTR0_SHIFT 10
+#define OPRND1_SHIFT 16
+
+/* Instruction set for the LUT register. */
+#define LUT_STOP 0
+#define LUT_CMD 1
+#define LUT_ADDR 2
+#define LUT_DUMMY 3
+#define LUT_MODE 4
+#define LUT_MODE2 5
+#define LUT_MODE4 6
+#define LUT_READ 7
+#define LUT_WRITE 8
+#define LUT_JMP_ON_CS 9
+#define LUT_ADDR_DDR 10
+#define LUT_MODE_DDR 11
+#define LUT_MODE2_DDR 12
+#define LUT_MODE4_DDR 13
+#define LUT_READ_DDR 14
+#define LUT_WRITE_DDR 15
+#define LUT_DATA_LEARN 16
+
+/*
+ * The PAD definitions for LUT register.
+ *
+ * The pad stands for the lines number of IO[0:3].
+ * For example, the Quad read need four IO lines, so you should
+ * set LUT_PAD4 which means we use four IO lines.
+ */
+#define LUT_PAD1 0
+#define LUT_PAD2 1
+#define LUT_PAD4 2
+
+/* Oprands for the LUT register. */
+#define ADDR24BIT 0x18
+#define ADDR32BIT 0x20
+
+/* Macros for constructing the LUT register. */
+#define LUT0(ins, pad, opr) \
+ (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
+ ((LUT_##ins) << INSTR0_SHIFT))
+
+#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
+
+/* other macros for LUT register. */
+#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4)
+#define QUADSPI_LUT_NUM 64
+
+/* SEQID -- we can have 16 seqids at most. */
+#define SEQID_QUAD_READ 0
+#define SEQID_WREN 1
+#define SEQID_WRDI 2
+#define SEQID_RDSR 3
+#define SEQID_SE 4
+#define SEQID_CHIP_ERASE 5
+#define SEQID_PP 6
+#define SEQID_RDID 7
+#define SEQID_WRSR 8
+#define SEQID_RDCR 9
+#define SEQID_EN4B 10
+#define SEQID_BRWR 11
+
+enum fsl_qspi_devtype {
+ FSL_QUADSPI_VYBRID,
+ FSL_QUADSPI_IMX6SX,
+};
+
+struct fsl_qspi_devtype_data {
+ enum fsl_qspi_devtype devtype;
+ int rxfifo;
+ int txfifo;
+};
+
+static struct fsl_qspi_devtype_data vybrid_data = {
+ .devtype = FSL_QUADSPI_VYBRID,
+ .rxfifo = 128,
+ .txfifo = 64
+};
+
+static struct fsl_qspi_devtype_data imx6sx_data = {
+ .devtype = FSL_QUADSPI_IMX6SX,
+ .rxfifo = 128,
+ .txfifo = 512
+};
+
+#define FSL_QSPI_MAX_CHIP 4
+struct fsl_qspi {
+ struct mtd_info mtd[FSL_QSPI_MAX_CHIP];
+ struct spi_nor nor[FSL_QSPI_MAX_CHIP];
+ void __iomem *iobase;
+ void __iomem *ahb_base; /* Used when read from AHB bus */
+ u32 memmap_phy;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ struct fsl_qspi_devtype_data *devtype_data;
+ u32 nor_size;
+ u32 nor_num;
+ u32 clk_rate;
+ unsigned int chip_base_addr; /* We may support two chips. */
+};
+
+static inline int is_vybrid_qspi(struct fsl_qspi *q)
+{
+ return q->devtype_data->devtype == FSL_QUADSPI_VYBRID;
+}
+
+static inline int is_imx6sx_qspi(struct fsl_qspi *q)
+{
+ return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX;
+}
+
+/*
+ * An IC bug makes us to re-arrange the 32-bit data.
+ * The following chips, such as IMX6SLX, have fixed this bug.
+ */
+static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
+{
+ return is_vybrid_qspi(q) ? __swab32(a) : a;
+}
+
+static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
+{
+ writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static inline void fsl_qspi_lock_lut(struct fsl_qspi *q)
+{
+ writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ writel(QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
+{
+ struct fsl_qspi *q = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = readl(q->iobase + QUADSPI_FR);
+ writel(reg, q->iobase + QUADSPI_FR);
+
+ if (reg & QUADSPI_FR_TFF_MASK)
+ complete(&q->c);
+
+ dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg);
+ return IRQ_HANDLED;
+}
+
+static void fsl_qspi_init_lut(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ int rxfifo = q->devtype_data->rxfifo;
+ u32 lut_base;
+ u8 cmd, addrlen, dummy;
+ int i;
+
+ fsl_qspi_unlock_lut(q);
+
+ /* Clear all the LUT table */
+ for (i = 0; i < QUADSPI_LUT_NUM; i++)
+ writel(0, base + QUADSPI_LUT_BASE + i * 4);
+
+ /* Quad Read */
+ lut_base = SEQID_QUAD_READ * 4;
+
+ if (q->nor_size <= SZ_16M) {
+ cmd = SPINOR_OP_READ_1_1_4;
+ addrlen = ADDR24BIT;
+ dummy = 8;
+ } else {
+ /* use the 4-byte address */
+ cmd = SPINOR_OP_READ_1_1_4;
+ addrlen = ADDR32BIT;
+ dummy = 8;
+ }
+
+ writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+ writel(LUT0(DUMMY, PAD1, dummy) | LUT1(READ, PAD4, rxfifo),
+ base + QUADSPI_LUT(lut_base + 1));
+
+ /* Write enable */
+ lut_base = SEQID_WREN * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + QUADSPI_LUT(lut_base));
+
+ /* Page Program */
+ lut_base = SEQID_PP * 4;
+
+ if (q->nor_size <= SZ_16M) {
+ cmd = SPINOR_OP_PP;
+ addrlen = ADDR24BIT;
+ } else {
+ /* use the 4-byte address */
+ cmd = SPINOR_OP_PP;
+ addrlen = ADDR32BIT;
+ }
+
+ writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+ writel(LUT0(WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
+
+ /* Read Status */
+ lut_base = SEQID_RDSR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(READ, PAD1, 0x1),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Erase a sector */
+ lut_base = SEQID_SE * 4;
+
+ if (q->nor_size <= SZ_16M) {
+ cmd = SPINOR_OP_SE;
+ addrlen = ADDR24BIT;
+ } else {
+ /* use the 4-byte address */
+ cmd = SPINOR_OP_SE;
+ addrlen = ADDR32BIT;
+ }
+
+ writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Erase the whole chip */
+ lut_base = SEQID_CHIP_ERASE * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
+ base + QUADSPI_LUT(lut_base));
+
+ /* READ ID */
+ lut_base = SEQID_RDID * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(READ, PAD1, 0x8),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Write Register */
+ lut_base = SEQID_WRSR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(WRITE, PAD1, 0x2),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Read Configuration Register */
+ lut_base = SEQID_RDCR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(READ, PAD1, 0x1),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Write disable */
+ lut_base = SEQID_WRDI * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + QUADSPI_LUT(lut_base));
+
+ /* Enter 4 Byte Mode (Micron) */
+ lut_base = SEQID_EN4B * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + QUADSPI_LUT(lut_base));
+
+ /* Enter 4 Byte Mode (Spansion) */
+ lut_base = SEQID_BRWR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + QUADSPI_LUT(lut_base));
+
+ fsl_qspi_lock_lut(q);
+}
+
+/* Get the SEQID for the command */
+static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
+{
+ switch (cmd) {
+ case SPINOR_OP_READ_1_1_4:
+ return SEQID_QUAD_READ;
+ case SPINOR_OP_WREN:
+ return SEQID_WREN;
+ case SPINOR_OP_WRDI:
+ return SEQID_WRDI;
+ case SPINOR_OP_RDSR:
+ return SEQID_RDSR;
+ case SPINOR_OP_SE:
+ return SEQID_SE;
+ case SPINOR_OP_CHIP_ERASE:
+ return SEQID_CHIP_ERASE;
+ case SPINOR_OP_PP:
+ return SEQID_PP;
+ case SPINOR_OP_RDID:
+ return SEQID_RDID;
+ case SPINOR_OP_WRSR:
+ return SEQID_WRSR;
+ case SPINOR_OP_RDCR:
+ return SEQID_RDCR;
+ case SPINOR_OP_EN4B:
+ return SEQID_EN4B;
+ case SPINOR_OP_BRWR:
+ return SEQID_BRWR;
+ default:
+ dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
+ break;
+ }
+ return -EINVAL;
+}
+
+static int
+fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
+{
+ void __iomem *base = q->iobase;
+ int seqid;
+ u32 reg, reg2;
+ int err;
+
+ init_completion(&q->c);
+ dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
+ q->chip_base_addr, addr, len, cmd);
+
+ /* save the reg */
+ reg = readl(base + QUADSPI_MCR);
+
+ writel(q->memmap_phy + q->chip_base_addr + addr, base + QUADSPI_SFAR);
+ writel(QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
+ base + QUADSPI_RBCT);
+ writel(reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
+
+ do {
+ reg2 = readl(base + QUADSPI_SR);
+ if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) {
+ udelay(1);
+ dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2);
+ continue;
+ }
+ break;
+ } while (1);
+
+ /* trigger the LUT now */
+ seqid = fsl_qspi_get_seqid(q, cmd);
+ writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR);
+
+ /* Wait for the interrupt. */
+ err = wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000));
+ if (!err) {
+ dev_err(q->dev,
+ "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
+ cmd, addr, readl(base + QUADSPI_FR),
+ readl(base + QUADSPI_SR));
+ err = -ETIMEDOUT;
+ } else {
+ err = 0;
+ }
+
+ /* restore the MCR */
+ writel(reg, base + QUADSPI_MCR);
+
+ return err;
+}
+
+/* Read out the data from the QUADSPI_RBDR buffer registers. */
+static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
+{
+ u32 tmp;
+ int i = 0;
+
+ while (len > 0) {
+ tmp = readl(q->iobase + QUADSPI_RBDR + i * 4);
+ tmp = fsl_qspi_endian_xchg(q, tmp);
+ dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n",
+ q->chip_base_addr, tmp);
+
+ if (len >= 4) {
+ *((u32 *)rxbuf) = tmp;
+ rxbuf += 4;
+ } else {
+ memcpy(rxbuf, &tmp, len);
+ break;
+ }
+
+ len -= 4;
+ i++;
+ }
+}
+
+/*
+ * If we have changed the content of the flash by writing or erasing,
+ * we need to invalidate the AHB buffer. If we do not do so, we may read out
+ * the wrong data. The spec tells us reset the AHB domain and Serial Flash
+ * domain at the same time.
+ */
+static inline void fsl_qspi_invalid(struct fsl_qspi *q)
+{
+ u32 reg;
+
+ reg = readl(q->iobase + QUADSPI_MCR);
+ reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
+ writel(reg, q->iobase + QUADSPI_MCR);
+
+ /*
+ * The minimum delay : 1 AHB + 2 SFCK clocks.
+ * Delay 1 us is enough.
+ */
+ udelay(1);
+
+ reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
+ writel(reg, q->iobase + QUADSPI_MCR);
+}
+
+static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
+ u8 opcode, unsigned int to, u32 *txbuf,
+ unsigned count, size_t *retlen)
+{
+ int ret, i, j;
+ u32 tmp;
+
+ dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
+ q->chip_base_addr, to, count);
+
+ /* clear the TX FIFO. */
+ tmp = readl(q->iobase + QUADSPI_MCR);
+ writel(tmp | QUADSPI_MCR_CLR_RXF_MASK, q->iobase + QUADSPI_MCR);
+
+ /* fill the TX data to the FIFO */
+ for (j = 0, i = ((count + 3) / 4); j < i; j++) {
+ tmp = fsl_qspi_endian_xchg(q, *txbuf);
+ writel(tmp, q->iobase + QUADSPI_TBDR);
+ txbuf++;
+ }
+
+ /* Trigger it */
+ ret = fsl_qspi_runcmd(q, opcode, to, count);
+
+ if (ret == 0 && retlen)
+ *retlen += count;
+
+ return ret;
+}
+
+static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
+{
+ int nor_size = q->nor_size;
+ void __iomem *base = q->iobase;
+
+ writel(nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
+ writel(nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
+ writel(nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
+ writel(nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
+}
+
+/*
+ * There are two different ways to read out the data from the flash:
+ * the "IP Command Read" and the "AHB Command Read".
+ *
+ * The IC guy suggests we use the "AHB Command Read" which is faster
+ * then the "IP Command Read". (What's more is that there is a bug in
+ * the "IP Command Read" in the Vybrid.)
+ *
+ * After we set up the registers for the "AHB Command Read", we can use
+ * the memcpy to read the data directly. A "missed" access to the buffer
+ * causes the controller to clear the buffer, and use the sequence pointed
+ * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
+ */
+static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ int seqid;
+
+ /* AHB configuration for access buffer 0/1/2 .*/
+ writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
+ writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
+ writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
+ writel(QUADSPI_BUF3CR_ALLMST, base + QUADSPI_BUF3CR);
+
+ /* We only use the buffer3 */
+ writel(0, base + QUADSPI_BUF0IND);
+ writel(0, base + QUADSPI_BUF1IND);
+ writel(0, base + QUADSPI_BUF2IND);
+
+ /* Set the default lut sequence for AHB Read. */
+ seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
+ writel(seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
+ q->iobase + QUADSPI_BFGENCR);
+}
+
+/* We use this function to do some basic init for spi_nor_scan(). */
+static int fsl_qspi_nor_setup(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ u32 reg;
+ int ret;
+
+ /* the default frequency, we will change it in the future.*/
+ ret = clk_set_rate(q->clk, 66000000);
+ if (ret)
+ return ret;
+
+ /* Init the LUT table. */
+ fsl_qspi_init_lut(q);
+
+ /* Disable the module */
+ writel(QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ base + QUADSPI_MCR);
+
+ reg = readl(base + QUADSPI_SMPR);
+ writel(reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ | QUADSPI_SMPR_FSPHS_MASK
+ | QUADSPI_SMPR_HSENA_MASK
+ | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
+
+ /* Enable the module */
+ writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
+ base + QUADSPI_MCR);
+
+ /* enable the interrupt */
+ writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
+
+ return 0;
+}
+
+static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
+{
+ unsigned long rate = q->clk_rate;
+ int ret;
+
+ if (is_imx6sx_qspi(q))
+ rate *= 4;
+
+ ret = clk_set_rate(q->clk, rate);
+ if (ret)
+ return ret;
+
+ /* Init the LUT table again. */
+ fsl_qspi_init_lut(q);
+
+ /* Init for AHB read */
+ fsl_qspi_init_abh_read(q);
+
+ return 0;
+}
+
+static struct of_device_id fsl_qspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
+
+static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor)
+{
+ q->chip_base_addr = q->nor_size * (nor - q->nor);
+}
+
+static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+ struct fsl_qspi *q = nor->priv;
+
+ ret = fsl_qspi_runcmd(q, opcode, 0, len);
+ if (ret)
+ return ret;
+
+ fsl_qspi_read_data(q, len, buf);
+ return 0;
+}
+
+static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
+ int write_enable)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ if (!buf) {
+ ret = fsl_qspi_runcmd(q, opcode, 0, 1);
+ if (ret)
+ return ret;
+
+ if (opcode == SPINOR_OP_CHIP_ERASE)
+ fsl_qspi_invalid(q);
+
+ } else if (len > 0) {
+ ret = fsl_qspi_nor_write(q, nor, opcode, 0,
+ (u32 *)buf, len, NULL);
+ } else {
+ dev_err(q->dev, "invalid cmd %d\n", opcode);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void fsl_qspi_write(struct spi_nor *nor, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct fsl_qspi *q = nor->priv;
+
+ fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
+ (u32 *)buf, len, retlen);
+
+ /* invalid the data in the AHB buffer. */
+ fsl_qspi_invalid(q);
+}
+
+static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct fsl_qspi *q = nor->priv;
+ u8 cmd = nor->read_opcode;
+ int ret;
+
+ dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
+ cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
+
+ /* Wait until the previous command is finished. */
+ ret = nor->wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ /* Read out the data directly from the AHB buffer.*/
+ memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
+
+ *retlen += len;
+ return 0;
+}
+
+static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
+ nor->mtd->erasesize / 1024, q->chip_base_addr, (u32)offs);
+
+ /* Wait until finished previous write command. */
+ ret = nor->wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ /* Send write enable, then erase commands. */
+ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
+ if (ret)
+ return ret;
+
+ fsl_qspi_invalid(q);
+ return 0;
+}
+
+static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ ret = clk_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(q->clk);
+ if (ret) {
+ clk_disable(q->clk_en);
+ return ret;
+ }
+
+ fsl_qspi_set_base_addr(q, nor);
+ return 0;
+}
+
+static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct fsl_qspi *q = nor->priv;
+
+ clk_disable(q->clk);
+ clk_disable(q->clk_en);
+}
+
+static int fsl_qspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+ struct device *dev = &pdev->dev;
+ struct fsl_qspi *q;
+ struct resource *res;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ int ret, i = 0;
+ bool has_second_chip = false;
+ const struct of_device_id *of_id =
+ of_match_device(fsl_qspi_dt_ids, &pdev->dev);
+
+ q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ q->nor_num = of_get_child_count(dev->of_node);
+ if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
+ return -ENODEV;
+
+ /* find the resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
+ q->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->iobase)) {
+ ret = PTR_ERR(q->iobase);
+ goto map_failed;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "QuadSPI-memory");
+ q->ahb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->ahb_base)) {
+ ret = PTR_ERR(q->ahb_base);
+ goto map_failed;
+ }
+ q->memmap_phy = res->start;
+
+ /* find the clocks */
+ q->clk_en = devm_clk_get(dev, "qspi_en");
+ if (IS_ERR(q->clk_en)) {
+ ret = PTR_ERR(q->clk_en);
+ goto map_failed;
+ }
+
+ q->clk = devm_clk_get(dev, "qspi");
+ if (IS_ERR(q->clk)) {
+ ret = PTR_ERR(q->clk);
+ goto map_failed;
+ }
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret) {
+ dev_err(dev, "can not enable the qspi_en clock\n");
+ goto map_failed;
+ }
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ clk_disable_unprepare(q->clk_en);
+ dev_err(dev, "can not enable the qspi clock\n");
+ goto map_failed;
+ }
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to get the irq\n");
+ goto irq_failed;
+ }
+
+ ret = devm_request_irq(dev, ret,
+ fsl_qspi_irq_handler, 0, pdev->name, q);
+ if (ret) {
+ dev_err(dev, "failed to request irq.\n");
+ goto irq_failed;
+ }
+
+ q->dev = dev;
+ q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
+ platform_set_drvdata(pdev, q);
+
+ ret = fsl_qspi_nor_setup(q);
+ if (ret)
+ goto irq_failed;
+
+ if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
+ has_second_chip = true;
+
+ /* iterate the subnodes. */
+ for_each_available_child_of_node(dev->of_node, np) {
+ const struct spi_device_id *id;
+ char modalias[40];
+
+ /* skip the holes */
+ if (!has_second_chip)
+ i *= 2;
+
+ nor = &q->nor[i];
+ mtd = &q->mtd[i];
+
+ nor->mtd = mtd;
+ nor->dev = dev;
+ nor->priv = q;
+ mtd->priv = nor;
+
+ /* fill the hooks */
+ nor->read_reg = fsl_qspi_read_reg;
+ nor->write_reg = fsl_qspi_write_reg;
+ nor->read = fsl_qspi_read;
+ nor->write = fsl_qspi_write;
+ nor->erase = fsl_qspi_erase;
+
+ nor->prepare = fsl_qspi_prep;
+ nor->unprepare = fsl_qspi_unprep;
+
+ if (of_modalias_node(np, modalias, sizeof(modalias)) < 0)
+ goto map_failed;
+
+ id = spi_nor_match_id(modalias);
+ if (!id)
+ goto map_failed;
+
+ ret = of_property_read_u32(np, "spi-max-frequency",
+ &q->clk_rate);
+ if (ret < 0)
+ goto map_failed;
+
+ /* set the chip address for READID */
+ fsl_qspi_set_base_addr(q, nor);
+
+ ret = spi_nor_scan(nor, id, SPI_NOR_QUAD);
+ if (ret)
+ goto map_failed;
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret)
+ goto map_failed;
+
+ /* Set the correct NOR size now. */
+ if (q->nor_size == 0) {
+ q->nor_size = mtd->size;
+
+ /* Map the SPI NOR to accessiable address */
+ fsl_qspi_set_map_addr(q);
+ }
+
+ /*
+ * The TX FIFO is 64 bytes in the Vybrid, but the Page Program
+ * may writes 265 bytes per time. The write is working in the
+ * unit of the TX FIFO, not in the unit of the SPI NOR's page
+ * size.
+ *
+ * So shrink the spi_nor->page_size if it is larger then the
+ * TX FIFO.
+ */
+ if (nor->page_size > q->devtype_data->txfifo)
+ nor->page_size = q->devtype_data->txfifo;
+
+ i++;
+ }
+
+ /* finish the rest init. */
+ ret = fsl_qspi_nor_setup_last(q);
+ if (ret)
+ goto last_init_failed;
+
+ clk_disable(q->clk);
+ clk_disable(q->clk_en);
+ dev_info(dev, "QuadSPI SPI NOR flash driver\n");
+ return 0;
+
+last_init_failed:
+ for (i = 0; i < q->nor_num; i++)
+ mtd_device_unregister(&q->mtd[i]);
+
+irq_failed:
+ clk_disable_unprepare(q->clk);
+ clk_disable_unprepare(q->clk_en);
+map_failed:
+ dev_err(dev, "Freescale QuadSPI probe failed\n");
+ return ret;
+}
+
+static int fsl_qspi_remove(struct platform_device *pdev)
+{
+ struct fsl_qspi *q = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < q->nor_num; i++)
+ mtd_device_unregister(&q->mtd[i]);
+
+ /* disable the hardware */
+ writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ writel(0x0, q->iobase + QUADSPI_RSER);
+
+ clk_unprepare(q->clk);
+ clk_unprepare(q->clk_en);
+ return 0;
+}
+
+static struct platform_driver fsl_qspi_driver = {
+ .driver = {
+ .name = "fsl-quadspi",
+ .bus = &platform_bus_type,
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_qspi_dt_ids,
+ },
+ .probe = fsl_qspi_probe,
+ .remove = fsl_qspi_remove,
+};
+module_platform_driver(fsl_qspi_driver);
+
+MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
new file mode 100644
index 000000000000..c713c8656710
--- /dev/null
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -0,0 +1,1107 @@
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/math64.h>
+
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
+
+/* Define max times to check status register before we give up. */
+#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
+
+#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+
+/*
+ * Read the status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_sr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
+ if (ret < 0) {
+ pr_err("error %d reading SR\n", (int) ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occured.
+ */
+static int read_cr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading CR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Dummy Cycle calculation for different type of read.
+ * It can be used to support more commands with
+ * different dummy cycle requirements.
+ */
+static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
+{
+ switch (nor->flash_read) {
+ case SPI_NOR_FAST:
+ case SPI_NOR_DUAL:
+ case SPI_NOR_QUAD:
+ return 1;
+ case SPI_NOR_NORMAL:
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * Write status register 1 byte
+ * Returns negative if error occurred.
+ */
+static inline int write_sr(struct spi_nor *nor, u8 val)
+{
+ nor->cmd_buf[0] = val;
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+}
+
+/*
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static inline int write_enable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
+}
+
+/*
+ * Send write disble instruction to the chip.
+ */
+static inline int write_disable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0, 0);
+}
+
+static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+/* Enable/disable 4-byte addressing mode. */
+static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable)
+{
+ int status;
+ bool need_wren = false;
+ u8 cmd;
+
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_ST: /* Micron, actually */
+ /* Some Micron need WREN command; all will accept it */
+ need_wren = true;
+ case CFI_MFR_MACRONIX:
+ case 0xEF /* winbond */:
+ if (need_wren)
+ write_enable(nor);
+
+ cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+ status = nor->write_reg(nor, cmd, NULL, 0, 0);
+ if (need_wren)
+ write_disable(nor);
+
+ return status;
+ default:
+ /* Spansion style */
+ nor->cmd_buf[0] = enable << 7;
+ return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1, 0);
+ }
+}
+
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ unsigned long deadline;
+ int sr;
+
+ deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+
+ do {
+ cond_resched();
+
+ sr = read_sr(nor);
+ if (sr < 0)
+ break;
+ else if (!(sr & SR_WIP))
+ return 0;
+ } while (!time_after_eq(jiffies, deadline));
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int wait_till_ready(struct spi_nor *nor)
+{
+ return nor->wait_till_ready(nor);
+}
+
+/*
+ * Erase the whole flash memory
+ *
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int erase_chip(struct spi_nor *nor)
+{
+ int ret;
+
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
+
+ /* Wait until finished previous write command. */
+ ret = wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ /* Send write enable, then erase commands. */
+ write_enable(nor);
+
+ return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0, 0);
+}
+
+static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ int ret = 0;
+
+ mutex_lock(&nor->lock);
+
+ if (nor->prepare) {
+ ret = nor->prepare(nor, ops);
+ if (ret) {
+ dev_err(nor->dev, "failed in the preparation.\n");
+ mutex_unlock(&nor->lock);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ if (nor->unprepare)
+ nor->unprepare(nor, ops);
+ mutex_unlock(&nor->lock);
+}
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 addr, len;
+ uint32_t rem;
+ int ret;
+
+ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+ (long long)instr->len);
+
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+
+ addr = instr->addr;
+ len = instr->len;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
+ if (ret)
+ return ret;
+
+ /* whole-chip erase? */
+ if (len == mtd->size) {
+ if (erase_chip(nor)) {
+ ret = -EIO;
+ goto erase_err;
+ }
+
+ /* REVISIT in some cases we could speed up erasing large regions
+ * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
+ * to use "small sector erase", but that's not always optimal.
+ */
+
+ /* "sector"-at-a-time erase */
+ } else {
+ while (len) {
+ if (nor->erase(nor, addr)) {
+ ret = -EIO;
+ goto erase_err;
+ }
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+ }
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return ret;
+
+erase_err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+ instr->state = MTD_ERASE_FAILED;
+ return ret;
+}
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int ret = 0;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+ if (ret)
+ return ret;
+
+ /* Wait until finished previous command */
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status_old = read_sr(nor);
+
+ if (offset < mtd->size - (mtd->size / 2))
+ status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
+ else if (offset < mtd->size - (mtd->size / 4))
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+ else if (offset < mtd->size - (mtd->size / 8))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else if (offset < mtd->size - (mtd->size / 16))
+ status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
+ else if (offset < mtd->size - (mtd->size / 32))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset < mtd->size - (mtd->size / 64))
+ status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
+ else
+ status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) >
+ (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ goto err;
+ }
+
+err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int ret = 0;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ if (ret)
+ return ret;
+
+ /* Wait until finished previous command */
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status_old = read_sr(nor);
+
+ if (offset+len > mtd->size - (mtd->size / 64))
+ status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0);
+ else if (offset+len > mtd->size - (mtd->size / 32))
+ status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
+ else if (offset+len > mtd->size - (mtd->size / 16))
+ status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
+ else if (offset+len > mtd->size - (mtd->size / 8))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset+len > mtd->size - (mtd->size / 4))
+ status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
+ else if (offset+len > mtd->size - (mtd->size / 2))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) <
+ (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ goto err;
+ }
+
+err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
+ return ret;
+}
+
+struct flash_info {
+ /* JEDEC id zero means "no ID" (most older chips); otherwise it has
+ * a high byte of zero plus three data bytes: the manufacturer id,
+ * then a two byte device id.
+ */
+ u32 jedec_id;
+ u16 ext_id;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u16 flags;
+#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
+#define SST_WRITE 0x04 /* use SST byte programming */
+#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
+#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
+};
+
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .jedec_id = (_jedec_id), \
+ .ext_id = (_ext_id), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags), \
+ })
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .addr_width = (_addr_width), \
+ .flags = (_flags), \
+ })
+
+/* NOTE: double check command sets and memory organization when you add
+ * more nor chips. This current list focusses on newer chips, which
+ * have been converging on command sets which including JEDEC ID.
+ */
+const struct spi_device_id spi_nor_ids[] = {
+ /* Atmel -- some are (confusingly) marketed as "DataFlash" */
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
+
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+
+ /* EON -- en25xxx */
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
+
+ /* Everspin */
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+
+ /* GigaDevice */
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+
+ /* Macronix */
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
+
+ /* Micron */
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
+
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+
+ /* Spansion -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+
+ /* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+
+ /* ST Microelectronics -- newer production may have feature updates */
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
+
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { },
+};
+EXPORT_SYMBOL_GPL(spi_nor_ids);
+
+static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
+{
+ int tmp;
+ u8 id[5];
+ u32 jedec;
+ u16 ext_jedec;
+ struct flash_info *info;
+
+ tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, 5);
+ if (tmp < 0) {
+ dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
+ return ERR_PTR(tmp);
+ }
+ jedec = id[0];
+ jedec = jedec << 8;
+ jedec |= id[1];
+ jedec = jedec << 8;
+ jedec |= id[2];
+
+ ext_jedec = id[3] << 8 | id[4];
+
+ for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
+ info = (void *)spi_nor_ids[tmp].driver_data;
+ if (info->jedec_id == jedec) {
+ if (info->ext_id == 0 || info->ext_id == ext_jedec)
+ return &spi_nor_ids[tmp];
+ }
+ }
+ dev_err(nor->dev, "unrecognized JEDEC id %06x\n", jedec);
+ return ERR_PTR(-ENODEV);
+}
+
+static const struct spi_device_id *jedec_probe(struct spi_nor *nor)
+{
+ return nor->read_id(nor);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
+ if (ret)
+ return ret;
+
+ ret = nor->read(nor, from, len, retlen, buf);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
+ return ret;
+}
+
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t actual;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ if (ret)
+ return ret;
+
+ /* Wait until finished previous write command. */
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+
+ write_enable(nor);
+
+ nor->sst_write_second = false;
+
+ actual = to % 2;
+ /* Start write from odd address. */
+ if (actual) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ /* write one byte. */
+ nor->write(nor, to, 1, retlen, buf);
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+ }
+ to += actual;
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+ nor->program_opcode = SPINOR_OP_AAI_WP;
+
+ /* write two bytes. */
+ nor->write(nor, to, 2, retlen, buf + actual);
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+ to += 2;
+ nor->sst_write_second = true;
+ }
+ nor->sst_write_second = false;
+
+ write_disable(nor);
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ write_enable(nor);
+
+ nor->program_opcode = SPINOR_OP_BP;
+ nor->write(nor, to, 1, retlen, buf + actual);
+
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+ write_disable(nor);
+ }
+time_out:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ return ret;
+}
+
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 page_offset, page_size, i;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ if (ret)
+ return ret;
+
+ /* Wait until finished previous write command. */
+ ret = wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+
+ write_enable(nor);
+
+ page_offset = to & (nor->page_size - 1);
+
+ /* do all the bytes fit onto one page? */
+ if (page_offset + len <= nor->page_size) {
+ nor->write(nor, to, len, retlen, buf);
+ } else {
+ /* the size of data remaining on the first page */
+ page_size = nor->page_size - page_offset;
+ nor->write(nor, to, page_size, retlen, buf);
+
+ /* write everything in nor->page_size chunks */
+ for (i = page_size; i < len; i += page_size) {
+ page_size = len - i;
+ if (page_size > nor->page_size)
+ page_size = nor->page_size;
+
+ wait_till_ready(nor);
+ write_enable(nor);
+
+ nor->write(nor, to + i, page_size, retlen, buf + i);
+ }
+ }
+
+write_err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ return 0;
+}
+
+static int macronix_quad_enable(struct spi_nor *nor)
+{
+ int ret, val;
+
+ val = read_sr(nor);
+ write_enable(nor);
+
+ nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
+ nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+
+ if (wait_till_ready(nor))
+ return 1;
+
+ ret = read_sr(nor);
+ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+ dev_err(nor->dev, "Macronix Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occured.
+ */
+static int write_sr_cr(struct spi_nor *nor, u16 val)
+{
+ nor->cmd_buf[0] = val & 0xff;
+ nor->cmd_buf[1] = (val >> 8);
+
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2, 0);
+}
+
+static int spansion_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+ int quad_en = CR_QUAD_EN_SPAN << 8;
+
+ write_enable(nor);
+
+ ret = write_sr_cr(nor, quad_en);
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ /* read back and check it */
+ ret = read_cr(nor);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_err(nor->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_quad_mode(struct spi_nor *nor, u32 jedec_id)
+{
+ int status;
+
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_MACRONIX:
+ status = macronix_quad_enable(nor);
+ if (status) {
+ dev_err(nor->dev, "Macronix quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ default:
+ status = spansion_quad_enable(nor);
+ if (status) {
+ dev_err(nor->dev, "Spansion quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ }
+}
+
+static int spi_nor_check(struct spi_nor *nor)
+{
+ if (!nor->dev || !nor->read || !nor->write ||
+ !nor->read_reg || !nor->write_reg || !nor->erase) {
+ pr_err("spi-nor: please fill all the necessary fields!\n");
+ return -EINVAL;
+ }
+
+ if (!nor->read_id)
+ nor->read_id = spi_nor_read_id;
+ if (!nor->wait_till_ready)
+ nor->wait_till_ready = spi_nor_wait_till_ready;
+
+ return 0;
+}
+
+int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
+ enum read_mode mode)
+{
+ struct flash_info *info;
+ struct flash_platform_data *data;
+ struct device *dev = nor->dev;
+ struct mtd_info *mtd = nor->mtd;
+ struct device_node *np = dev->of_node;
+ int ret;
+ int i;
+
+ ret = spi_nor_check(nor);
+ if (ret)
+ return ret;
+
+ /* Platform data helps sort out which chip type we have, as
+ * well as how this board partitions it. If we don't have
+ * a chip ID, try the JEDEC id commands; they'll work for most
+ * newer chips, even if we don't recognize the particular chip.
+ */
+ data = dev_get_platdata(dev);
+ if (data && data->type) {
+ const struct spi_device_id *plat_id;
+
+ for (i = 0; i < ARRAY_SIZE(spi_nor_ids) - 1; i++) {
+ plat_id = &spi_nor_ids[i];
+ if (strcmp(data->type, plat_id->name))
+ continue;
+ break;
+ }
+
+ if (i < ARRAY_SIZE(spi_nor_ids) - 1)
+ id = plat_id;
+ else
+ dev_warn(dev, "unrecognized id %s\n", data->type);
+ }
+
+ info = (void *)id->driver_data;
+
+ if (info->jedec_id) {
+ const struct spi_device_id *jid;
+
+ jid = jedec_probe(nor);
+ if (IS_ERR(jid)) {
+ return PTR_ERR(jid);
+ } else if (jid != id) {
+ /*
+ * JEDEC knows better, so overwrite platform ID. We
+ * can't trust partitions any longer, but we'll let
+ * mtd apply them anyway, since some partitions may be
+ * marked read-only, and we don't want to lose that
+ * information, even if it's not 100% accurate.
+ */
+ dev_warn(dev, "found %s, expected %s\n",
+ jid->name, id->name);
+ id = jid;
+ info = (void *)jid->driver_data;
+ }
+ }
+
+ mutex_init(&nor->lock);
+
+ /*
+ * Atmel, SST and Intel/Numonyx serial nor tend to power
+ * up with the software protection bits set
+ */
+
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ }
+
+ if (data && data->name)
+ mtd->name = data->name;
+ else
+ mtd->name = dev_name(dev);
+
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = info->sector_size * info->n_sectors;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+
+ /* nor protection support for STmicro chips */
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
+ mtd->_lock = spi_nor_lock;
+ mtd->_unlock = spi_nor_unlock;
+ }
+
+ /* sst nor chips use AAI word program */
+ if (info->flags & SST_WRITE)
+ mtd->_write = sst_write;
+ else
+ mtd->_write = spi_nor_write;
+
+ /* prefer "small sector" erase if possible */
+ if (info->flags & SECT_4K) {
+ nor->erase_opcode = SPINOR_OP_BE_4K;
+ mtd->erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
+ nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
+ mtd->erasesize = 4096;
+ } else {
+ nor->erase_opcode = SPINOR_OP_SE;
+ mtd->erasesize = info->sector_size;
+ }
+
+ if (info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+
+ mtd->dev.parent = dev;
+ nor->page_size = info->page_size;
+ mtd->writebufsize = nor->page_size;
+
+ if (np) {
+ /* If we were instantiated by DT, use it */
+ if (of_property_read_bool(np, "m25p,fast-read"))
+ nor->flash_read = SPI_NOR_FAST;
+ else
+ nor->flash_read = SPI_NOR_NORMAL;
+ } else {
+ /* If we weren't instantiated by DT, default to fast-read */
+ nor->flash_read = SPI_NOR_FAST;
+ }
+
+ /* Some devices cannot do fast-read, no matter what DT tells us */
+ if (info->flags & SPI_NOR_NO_FR)
+ nor->flash_read = SPI_NOR_NORMAL;
+
+ /* Quad/Dual-read mode takes precedence over fast/normal */
+ if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
+ ret = set_quad_mode(nor, info->jedec_id);
+ if (ret) {
+ dev_err(dev, "quad mode not supported\n");
+ return ret;
+ }
+ nor->flash_read = SPI_NOR_QUAD;
+ } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
+ nor->flash_read = SPI_NOR_DUAL;
+ }
+
+ /* Default commands */
+ switch (nor->flash_read) {
+ case SPI_NOR_QUAD:
+ nor->read_opcode = SPINOR_OP_READ_1_1_4;
+ break;
+ case SPI_NOR_DUAL:
+ nor->read_opcode = SPINOR_OP_READ_1_1_2;
+ break;
+ case SPI_NOR_FAST:
+ nor->read_opcode = SPINOR_OP_READ_FAST;
+ break;
+ case SPI_NOR_NORMAL:
+ nor->read_opcode = SPINOR_OP_READ;
+ break;
+ default:
+ dev_err(dev, "No Read opcode defined\n");
+ return -EINVAL;
+ }
+
+ nor->program_opcode = SPINOR_OP_PP;
+
+ if (info->addr_width)
+ nor->addr_width = info->addr_width;
+ else if (mtd->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
+ /* Dedicated 4-byte command set */
+ switch (nor->flash_read) {
+ case SPI_NOR_QUAD:
+ nor->read_opcode = SPINOR_OP_READ4_1_1_4;
+ break;
+ case SPI_NOR_DUAL:
+ nor->read_opcode = SPINOR_OP_READ4_1_1_2;
+ break;
+ case SPI_NOR_FAST:
+ nor->read_opcode = SPINOR_OP_READ4_FAST;
+ break;
+ case SPI_NOR_NORMAL:
+ nor->read_opcode = SPINOR_OP_READ4;
+ break;
+ }
+ nor->program_opcode = SPINOR_OP_PP_4B;
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE_4B;
+ mtd->erasesize = info->sector_size;
+ } else
+ set_4byte(nor, info->jedec_id, 1);
+ } else {
+ nor->addr_width = 3;
+ }
+
+ nor->read_dummy = spi_nor_read_dummy_cycles(nor);
+
+ dev_info(dev, "%s (%lld Kbytes)\n", id->name,
+ (long long)mtd->size >> 10);
+
+ dev_dbg(dev,
+ "mtd .name = %s, .size = 0x%llx (%lldMiB), "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+ mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+ if (mtd->numeraseregions)
+ for (i = 0; i < mtd->numeraseregions; i++)
+ dev_dbg(dev,
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ ".erasesize = 0x%.8x (%uKiB), "
+ ".numblocks = %d }\n",
+ i, (long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].erasesize / 1024,
+ mtd->eraseregions[i].numblocks);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+const struct spi_device_id *spi_nor_match_id(char *name)
+{
+ const struct spi_device_id *id = spi_nor_ids;
+
+ while (id->name[0]) {
+ if (!strcmp(name, id->name))
+ return id;
+ id++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(spi_nor_match_id);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 2e9e2d11f204..f19ab1acde1f 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -69,8 +69,8 @@ static int write_eraseblock(int ebnum)
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
+ prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
- prandom_bytes_state(&rnd_state, writebuf, use_len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -78,7 +78,7 @@ static int write_eraseblock(int ebnum)
ops.oobretlen = 0;
ops.ooboffs = use_offset;
ops.datbuf = NULL;
- ops.oobbuf = writebuf;
+ ops.oobbuf = writebuf + (use_len_max * i) + use_offset;
err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
pr_err("error: writeoob failed at %#llx\n",
@@ -122,8 +122,8 @@ static int verify_eraseblock(int ebnum)
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
+ prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
- prandom_bytes_state(&rnd_state, writebuf, use_len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -139,7 +139,8 @@ static int verify_eraseblock(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf, writebuf, use_len)) {
+ if (memcmp(readbuf, writebuf + (use_len_max * i) + use_offset,
+ use_len)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
@@ -166,7 +167,9 @@ static int verify_eraseblock(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf + use_offset, writebuf, use_len)) {
+ if (memcmp(readbuf + use_offset,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
@@ -566,8 +569,8 @@ static int __init mtd_oobtest_init(void)
if (bbt[i] || bbt[i + 1])
continue;
addr = (i + 1) * mtd->erasesize - mtd->writesize;
+ prandom_bytes_state(&rnd_state, writebuf, sz * cnt);
for (pg = 0; pg < cnt; ++pg) {
- prandom_bytes_state(&rnd_state, writebuf, sz);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
@@ -575,7 +578,7 @@ static int __init mtd_oobtest_init(void)
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.oobbuf = writebuf;
+ ops.oobbuf = writebuf + pg * sz;
err = mtd_write_oob(mtd, addr, &ops);
if (err)
goto out;
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 20a667c95da4..8457df7ec5af 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -432,8 +432,10 @@ int ubiblock_create(struct ubi_volume_info *vi)
* Rembember workqueues are cheap, they're not threads.
*/
dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
- if (!dev->wq)
+ if (!dev->wq) {
+ ret = -ENOMEM;
goto out_free_queue;
+ }
INIT_WORK(&dev->work, ubiblock_do_work);
mutex_lock(&devices_mutex);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index f54562a5998e..7646220ca6e2 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -731,7 +731,7 @@ static int rename_volumes(struct ubi_device *ubi,
goto out_free;
}
- re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
+ re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_READWRITE);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
ubi_err("cannot open volume %d, error %d", vol_id, err);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index c5dad652614d..b04e7d059888 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -125,9 +125,9 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb);
- if (vol_id > av->vol_id)
+ if (vol_id < av->vol_id)
p = &(*p)->rb_left;
- else if (vol_id > av->vol_id)
+ else
p = &(*p)->rb_right;
}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b667a51ed215..0dfeaf5da3f2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -157,7 +157,7 @@ static inline struct aggregator *__get_first_agg(struct port *port)
rcu_read_lock();
first_slave = bond_first_slave_rcu(bond);
- agg = first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+ agg = first_slave ? &(SLAVE_AD_INFO(first_slave)->aggregator) : NULL;
rcu_read_unlock();
return agg;
@@ -192,7 +192,7 @@ static inline void __enable_port(struct port *port)
{
struct slave *slave = port->slave;
- if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
+ if ((slave->link == BOND_LINK_UP) && bond_slave_is_up(slave))
bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
}
@@ -241,7 +241,7 @@ static inline int __check_agg_selection_timer(struct port *port)
*/
static inline void __get_state_machine_lock(struct port *port)
{
- spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+ spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
}
/**
@@ -250,7 +250,7 @@ static inline void __get_state_machine_lock(struct port *port)
*/
static inline void __release_state_machine_lock(struct port *port)
{
- spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+ spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
}
/**
@@ -350,7 +350,7 @@ static u8 __get_duplex(struct port *port)
static inline void __initialize_port_locks(struct slave *slave)
{
/* make sure it isn't called twice */
- spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock));
+ spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock));
}
/* Conversions */
@@ -688,8 +688,8 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
struct slave *slave;
bond_for_each_slave_rcu(bond, slave, iter)
- if (SLAVE_AD_INFO(slave).aggregator.is_active)
- return &(SLAVE_AD_INFO(slave).aggregator);
+ if (SLAVE_AD_INFO(slave)->aggregator.is_active)
+ return &(SLAVE_AD_INFO(slave)->aggregator);
return NULL;
}
@@ -1293,7 +1293,7 @@ static void ad_port_selection_logic(struct port *port)
}
/* search on all aggregators for a suitable aggregator for this port */
bond_for_each_slave(bond, slave, iter) {
- aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+ aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
/* keep a free aggregator for later use(if needed) */
if (!aggregator->lag_ports) {
@@ -1504,7 +1504,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best = (active && agg_device_up(active)) ? active : NULL;
bond_for_each_slave_rcu(bond, slave, iter) {
- agg = &(SLAVE_AD_INFO(slave).aggregator);
+ agg = &(SLAVE_AD_INFO(slave)->aggregator);
agg->is_active = 0;
@@ -1549,7 +1549,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->slave ? best->slave->dev->name : "NULL");
bond_for_each_slave_rcu(bond, slave, iter) {
- agg = &(SLAVE_AD_INFO(slave).aggregator);
+ agg = &(SLAVE_AD_INFO(slave)->aggregator);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
agg->aggregator_identifier, agg->num_of_ports,
@@ -1840,16 +1840,16 @@ void bond_3ad_bind_slave(struct slave *slave)
struct aggregator *aggregator;
/* check that the slave has not been initialized yet. */
- if (SLAVE_AD_INFO(slave).port.slave != slave) {
+ if (SLAVE_AD_INFO(slave)->port.slave != slave) {
/* port initialization */
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
ad_initialize_port(port, bond->params.lacp_fast);
__initialize_port_locks(slave);
port->slave = slave;
- port->actor_port_number = SLAVE_AD_INFO(slave).id;
+ port->actor_port_number = SLAVE_AD_INFO(slave)->id;
/* key is determined according to the link speed, duplex and user key(which
* is yet not supported)
*/
@@ -1874,7 +1874,7 @@ void bond_3ad_bind_slave(struct slave *slave)
__disable_port(port);
/* aggregator initialization */
- aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+ aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
ad_initialize_agg(aggregator);
@@ -1903,8 +1903,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
struct slave *slave_iter;
struct list_head *iter;
- aggregator = &(SLAVE_AD_INFO(slave).aggregator);
- port = &(SLAVE_AD_INFO(slave).port);
+ aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
+ port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
@@ -1932,7 +1932,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
(aggregator->lag_ports->next_port_in_aggregator)) {
/* find new aggregator for the related port(s) */
bond_for_each_slave(bond, slave_iter, iter) {
- new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
+ new_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
/* if the new aggregator is empty, or it is
* connected to our port only
*/
@@ -2010,7 +2010,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
/* find the aggregator that this port is connected to */
bond_for_each_slave(bond, slave_iter, iter) {
- temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
+ temp_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
prev_port = NULL;
/* search the port in the aggregator's related ports */
for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2076,7 +2076,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
if (BOND_AD_INFO(bond).agg_select_timer &&
!(--BOND_AD_INFO(bond).agg_select_timer)) {
slave = bond_first_slave_rcu(bond);
- port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+ port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
/* select the active aggregator for the bond */
if (port) {
@@ -2094,7 +2094,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
/* for each port run the state machines */
bond_for_each_slave_rcu(bond, slave, iter) {
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
if (!port->slave) {
pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
bond->dev->name);
@@ -2155,7 +2155,7 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
if (length >= sizeof(struct lacpdu)) {
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
if (!port->slave) {
pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
@@ -2212,7 +2212,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
{
struct port *port;
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
@@ -2245,7 +2245,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
{
struct port *port;
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
@@ -2279,7 +2279,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
{
struct port *port;
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
@@ -2347,7 +2347,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
ret = 0;
goto out;
}
- active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
+ active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
if (active) {
/* are enough slaves available to consider link up? */
if (active->num_of_ports < bond->params.min_links) {
@@ -2384,7 +2384,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
struct port *port;
bond_for_each_slave_rcu(bond, slave, iter) {
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
if (port->aggregator && port->aggregator->is_active) {
aggregator = port->aggregator;
break;
@@ -2440,22 +2440,22 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
goto err_free;
}
- slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+ slave_agg_no = bond_xmit_hash(bond, skb) % slaves_in_agg;
first_ok_slave = NULL;
bond_for_each_slave_rcu(bond, slave, iter) {
- agg = SLAVE_AD_INFO(slave).port.aggregator;
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (!agg || agg->aggregator_identifier != agg_id)
continue;
if (slave_agg_no >= 0) {
- if (!first_ok_slave && SLAVE_IS_OK(slave))
+ if (!first_ok_slave && bond_slave_can_tx(slave))
first_ok_slave = slave;
slave_agg_no--;
continue;
}
- if (SLAVE_IS_OK(slave)) {
+ if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
goto out;
}
@@ -2522,7 +2522,7 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
lacp_fast = bond->params.lacp_fast;
bond_for_each_slave(bond, slave, iter) {
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
__get_state_machine_lock(port);
if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 93580a47cc54..76c0dade233f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -229,7 +229,7 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
/* Find the slave with the largest gap */
bond_for_each_slave_rcu(bond, slave, iter) {
- if (SLAVE_IS_OK(slave)) {
+ if (bond_slave_can_tx(slave)) {
long long gap = compute_gap(slave);
if (max_gap < gap) {
@@ -384,7 +384,7 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
bool found = false;
bond_for_each_slave(bond, slave, iter) {
- if (!SLAVE_IS_OK(slave))
+ if (!bond_slave_can_tx(slave))
continue;
if (!found) {
if (!before || before->speed < slave->speed)
@@ -417,7 +417,7 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
bool found = false;
bond_for_each_slave_rcu(bond, slave, iter) {
- if (!SLAVE_IS_OK(slave))
+ if (!bond_slave_can_tx(slave))
continue;
if (!found) {
if (!before || before->speed < slave->speed)
@@ -755,7 +755,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
*/
- if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
+ if (!bond_slave_has_mac_rx(bond, arp->mac_src))
return NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1039,11 +1039,14 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
struct bonding *bond = bond_get_bond_by_slave(slave);
struct net_device *upper;
struct list_head *iter;
+ struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
/* send untagged */
alb_send_lp_vid(slave, mac_addr, 0, 0);
- /* loop through vlans and send one packet for each */
+ /* loop through all devices and see if we need to send a packet
+ * for that device.
+ */
rcu_read_lock();
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
@@ -1059,6 +1062,16 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
vlan_dev_vlan_id(upper));
}
}
+
+ /* If this is a macvlan device, then only send updates
+ * when strict_match is turned off.
+ */
+ if (netif_is_macvlan(upper) && !strict_match) {
+ memset(tags, 0, sizeof(tags));
+ bond_verify_device_path(bond->dev, upper, tags);
+ alb_send_lp_vid(slave, upper->dev_addr,
+ tags[0].vlan_proto, tags[0].vlan_id);
+ }
}
rcu_read_unlock();
}
@@ -1068,7 +1081,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
struct net_device *dev = slave->dev;
struct sockaddr s_addr;
- if (slave->bond->params.mode == BOND_MODE_TLB) {
+ if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
memcpy(dev->dev_addr, addr, dev->addr_len);
return 0;
}
@@ -1111,13 +1124,13 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
struct slave *slave2)
{
- int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
+ int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
struct slave *disabled_slave = NULL;
ASSERT_RTNL();
/* fasten the change in the switch */
- if (SLAVE_IS_OK(slave1)) {
+ if (bond_slave_can_tx(slave1)) {
alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
@@ -1129,7 +1142,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
disabled_slave = slave1;
}
- if (SLAVE_IS_OK(slave2)) {
+ if (bond_slave_can_tx(slave2)) {
alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
@@ -1358,6 +1371,77 @@ void bond_alb_deinitialize(struct bonding *bond)
rlb_deinitialize(bond);
}
+static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
+ struct slave *tx_slave)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct ethhdr *eth_data = eth_hdr(skb);
+
+ if (!tx_slave) {
+ /* unbalanced or unassigned, send through primary */
+ tx_slave = rcu_dereference(bond->curr_active_slave);
+ if (bond->params.tlb_dynamic_lb)
+ bond_info->unbalanced_load += skb->len;
+ }
+
+ if (tx_slave && bond_slave_can_tx(tx_slave)) {
+ if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
+ ether_addr_copy(eth_data->h_source,
+ tx_slave->dev->dev_addr);
+ }
+
+ bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ goto out;
+ }
+
+ if (tx_slave && bond->params.tlb_dynamic_lb) {
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
+ }
+
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb_any(skb);
+out:
+ return NETDEV_TX_OK;
+}
+
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct ethhdr *eth_data;
+ struct slave *tx_slave = NULL;
+ u32 hash_index;
+
+ skb_reset_mac_header(skb);
+ eth_data = eth_hdr(skb);
+
+ /* Do not TX balance any multicast or broadcast */
+ if (!is_multicast_ether_addr(eth_data->h_dest)) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPX):
+ /* In case of IPX, it will falback to L2 hash */
+ case htons(ETH_P_IPV6):
+ hash_index = bond_xmit_hash(bond, skb);
+ if (bond->params.tlb_dynamic_lb) {
+ tx_slave = tlb_choose_channel(bond,
+ hash_index & 0xFF,
+ skb->len);
+ } else {
+ struct list_head *iter;
+ int idx = hash_index % bond->slave_cnt;
+
+ bond_for_each_slave_rcu(bond, tx_slave, iter)
+ if (--idx < 0)
+ break;
+ }
+ break;
+ }
+ }
+ return bond_do_alb_xmit(skb, bond, tx_slave);
+}
+
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -1366,7 +1450,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct slave *tx_slave = NULL;
static const __be32 ip_bcast = htonl(0xffffffff);
int hash_size = 0;
- int do_tx_balance = 1;
+ bool do_tx_balance = true;
u32 hash_index = 0;
const u8 *hash_start = NULL;
struct ipv6hdr *ip6hdr;
@@ -1381,7 +1465,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
(iph->daddr == ip_bcast) ||
(iph->protocol == IPPROTO_IGMP)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
hash_start = (char *)&(iph->daddr);
@@ -1393,7 +1477,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* that here just in case.
*/
if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1401,7 +1485,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* broadcasts in IPv4.
*/
if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1411,7 +1495,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
*/
ip6hdr = ipv6_hdr(skb);
if (ipv6_addr_any(&ip6hdr->saddr)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1421,7 +1505,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
case ETH_P_IPX:
if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1430,7 +1514,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* this family since it has an "ARP" like
* mechanism
*/
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1438,12 +1522,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
hash_size = ETH_ALEN;
break;
case ETH_P_ARP:
- do_tx_balance = 0;
+ do_tx_balance = false;
if (bond_info->rlb_enabled)
tx_slave = rlb_arp_xmit(skb, bond);
break;
default:
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1452,32 +1536,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
}
- if (!tx_slave) {
- /* unbalanced or unassigned, send through primary */
- tx_slave = rcu_dereference(bond->curr_active_slave);
- bond_info->unbalanced_load += skb->len;
- }
-
- if (tx_slave && SLAVE_IS_OK(tx_slave)) {
- if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
- ether_addr_copy(eth_data->h_source,
- tx_slave->dev->dev_addr);
- }
-
- bond_dev_queue_xmit(bond, skb, tx_slave->dev);
- goto out;
- }
-
- if (tx_slave) {
- _lock_tx_hashtbl(bond);
- __tlb_clear_slave(bond, tx_slave, 0);
- _unlock_tx_hashtbl(bond);
- }
-
- /* no suitable interface, frame not sent */
- dev_kfree_skb_any(skb);
-out:
- return NETDEV_TX_OK;
+ return bond_do_alb_xmit(skb, bond, tx_slave);
}
void bond_alb_monitor(struct work_struct *work)
@@ -1514,8 +1573,10 @@ void bond_alb_monitor(struct work_struct *work)
/* If updating current_active, use all currently
* user mac addreses (!strict_match). Otherwise, only
* use mac of the slave device.
+ * In RLB mode, we always use strict matches.
*/
- strict_match = (slave != bond->curr_active_slave);
+ strict_match = (slave != bond->curr_active_slave ||
+ bond_info->rlb_enabled);
alb_send_learning_packets(slave, slave->dev->dev_addr,
strict_match);
}
@@ -1719,7 +1780,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
/* in TLB mode, the slave might flip down/up with the old dev_addr,
* and thus filter bond->dev_addr's packets, so force bond's mac
*/
- if (bond->params.mode == BOND_MODE_TLB) {
+ if (BOND_MODE(bond) == BOND_MODE_TLB) {
struct sockaddr sa;
u8 tmp_addr[ETH_ALEN];
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e09dd4bfafff..5fc76c01636c 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -175,6 +175,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 2d3f7fa541ff..658e761c4568 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -23,7 +23,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
struct rlb_client_info *client_info;
u32 hash_index;
- if (bond->params.mode != BOND_MODE_ALB)
+ if (BOND_MODE(bond) != BOND_MODE_ALB)
return 0;
seq_printf(m, "SourceIP DestinationIP "
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d3a67896d435..3a451b6cd3d5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -343,7 +343,7 @@ static int bond_set_carrier(struct bonding *bond)
if (!bond_has_slaves(bond))
goto down;
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
return bond_3ad_set_carrier(bond);
bond_for_each_slave(bond, slave, iter) {
@@ -497,7 +497,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
struct list_head *iter;
int err = 0;
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
err = dev_set_promiscuity(bond->curr_active_slave->dev,
@@ -523,7 +523,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
struct list_head *iter;
int err = 0;
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
err = dev_set_allmulti(bond->curr_active_slave->dev,
@@ -574,7 +574,7 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* del lacpdu mc addr from mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
@@ -585,8 +585,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
/*--------------------------- Active slave change ---------------------------*/
/* Update the hardware address list and promisc/allmulti for the new and
- * old active slaves (if any). Modes that are !USES_PRIMARY keep all
- * slaves up date at all times; only the USES_PRIMARY modes need to call
+ * old active slaves (if any). Modes that are not using primary keep all
+ * slaves up date at all times; only the modes that use primary need to call
* this function to swap these settings during a failover.
*/
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
@@ -747,7 +747,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP)
return slave;
- if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+ if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
slave->delay < mintime) {
mintime = slave->delay;
bestslave = slave;
@@ -801,7 +801,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
new_active->last_link_up = jiffies;
if (new_active->link == BOND_LINK_BACK) {
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
pr_info("%s: making interface %s the new active one %d ms earlier\n",
bond->dev->name, new_active->dev->name,
(bond->params.updelay - new_active->delay) * bond->params.miimon);
@@ -810,20 +810,20 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
new_active->delay = 0;
new_active->link = BOND_LINK_UP;
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
pr_info("%s: making interface %s the new active one\n",
bond->dev->name, new_active->dev->name);
}
}
}
- if (USES_PRIMARY(bond->params.mode))
+ if (bond_uses_primary(bond))
bond_hw_addr_swap(bond, new_active, old_active);
if (bond_is_lb(bond)) {
@@ -838,7 +838,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
rcu_assign_pointer(bond->curr_active_slave, new_active);
}
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
if (old_active)
bond_set_slave_inactive_flags(old_active,
BOND_SLAVE_NOTIFY_NOW);
@@ -876,8 +876,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
* resend only if bond is brought up with the affected
* bonding modes and the retransmission is enabled */
if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
- ((USES_PRIMARY(bond->params.mode) && new_active) ||
- bond->params.mode == BOND_MODE_ROUNDROBIN)) {
+ ((bond_uses_primary(bond) && new_active) ||
+ BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
bond->igmp_retrans = bond->params.resend_igmp;
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
}
@@ -958,7 +958,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
struct slave *slave;
bond_for_each_slave(bond, slave, iter)
- if (IS_UP(slave->dev))
+ if (bond_slave_is_up(slave))
slave_disable_netpoll(slave);
}
@@ -1025,10 +1025,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
NETIF_F_HIGHDMA | NETIF_F_LRO)
+#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL)
+
static void bond_compute_features(struct bonding *bond)
{
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+ netdev_features_t enc_features = BOND_ENC_FEATURES;
struct net_device *bond_dev = bond->dev;
struct list_head *iter;
struct slave *slave;
@@ -1038,11 +1042,15 @@ static void bond_compute_features(struct bonding *bond)
if (!bond_has_slaves(bond))
goto done;
+ vlan_features &= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
+ enc_features = netdev_increment_features(enc_features,
+ slave->dev->hw_enc_features,
+ BOND_ENC_FEATURES);
dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
@@ -1053,6 +1061,7 @@ static void bond_compute_features(struct bonding *bond)
done:
bond_dev->vlan_features = vlan_features;
+ bond_dev->hw_enc_features = enc_features;
bond_dev->hard_header_len = max_hard_header_len;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -1084,7 +1093,7 @@ static bool bond_should_deliver_exact_match(struct sk_buff *skb,
struct bonding *bond)
{
if (bond_is_slave_inactive(slave)) {
- if (bond->params.mode == BOND_MODE_ALB &&
+ if (BOND_MODE(bond) == BOND_MODE_ALB &&
skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
return false;
@@ -1126,7 +1135,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
skb->dev = bond->dev;
- if (bond->params.mode == BOND_MODE_ALB &&
+ if (BOND_MODE(bond) == BOND_MODE_ALB &&
bond->dev->priv_flags & IFF_BRIDGE_PORT &&
skb->pkt_type == PACKET_HOST) {
@@ -1163,6 +1172,35 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev,
rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
}
+static struct slave *bond_alloc_slave(struct bonding *bond)
+{
+ struct slave *slave = NULL;
+
+ slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+ if (!slave)
+ return NULL;
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
+ GFP_KERNEL);
+ if (!SLAVE_AD_INFO(slave)) {
+ kfree(slave);
+ return NULL;
+ }
+ }
+ return slave;
+}
+
+static void bond_free_slave(struct slave *slave)
+{
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ kfree(SLAVE_AD_INFO(slave));
+
+ kfree(slave);
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@@ -1269,7 +1307,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond_has_slaves(bond)) {
pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
bond_dev->name);
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
bond_dev->name);
@@ -1290,11 +1328,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond->dev->addr_assign_type == NET_ADDR_RANDOM)
bond_set_dev_addr(bond->dev, slave_dev);
- new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+ new_slave = bond_alloc_slave(bond);
if (!new_slave) {
res = -ENOMEM;
goto err_undo_flags;
}
+
+ new_slave->bond = bond;
+ new_slave->dev = slave_dev;
/*
* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
@@ -1317,7 +1358,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
if (!bond->params.fail_over_mac ||
- bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/*
* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
@@ -1338,8 +1379,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_restore_mac;
}
- new_slave->bond = bond;
- new_slave->dev = slave_dev;
slave_dev->priv_flags |= IFF_BONDING;
if (bond_is_lb(bond)) {
@@ -1351,10 +1390,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}
- /* If the mode USES_PRIMARY, then the following is handled by
+ /* If the mode uses primary, then the following is handled by
* bond_change_active_slave().
*/
- if (!USES_PRIMARY(bond->params.mode)) {
+ if (!bond_uses_primary(bond)) {
/* set promiscuity level to new slave */
if (bond_dev->flags & IFF_PROMISC) {
res = dev_set_promiscuity(slave_dev, 1);
@@ -1377,7 +1416,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_unlock_bh(bond_dev);
}
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* add lacpdu mc addr to mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
@@ -1450,7 +1489,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->link == BOND_LINK_DOWN ? "DOWN" :
(new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
- if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
+ if (bond_uses_primary(bond) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
bond->primary_slave = new_slave;
@@ -1458,7 +1497,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- switch (bond->params.mode) {
+ switch (BOND_MODE(bond)) {
case BOND_MODE_ACTIVEBACKUP:
bond_set_slave_inactive_flags(new_slave,
BOND_SLAVE_NOTIFY_NOW);
@@ -1471,14 +1510,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
/* if this is the first slave */
if (!prev_slave) {
- SLAVE_AD_INFO(new_slave).id = 1;
+ SLAVE_AD_INFO(new_slave)->id = 1;
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
} else {
- SLAVE_AD_INFO(new_slave).id =
- SLAVE_AD_INFO(prev_slave).id + 1;
+ SLAVE_AD_INFO(new_slave)->id =
+ SLAVE_AD_INFO(prev_slave)->id + 1;
}
bond_3ad_bind_slave(new_slave);
@@ -1539,7 +1578,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_compute_features(bond);
bond_set_carrier(bond);
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
@@ -1563,7 +1602,7 @@ err_unregister:
netdev_rx_handler_unregister(slave_dev);
err_detach:
- if (!USES_PRIMARY(bond->params.mode))
+ if (!bond_uses_primary(bond))
bond_hw_addr_flush(bond_dev, slave_dev);
vlan_vids_del_by_dev(slave_dev, bond_dev);
@@ -1585,7 +1624,7 @@ err_close:
err_restore_mac:
if (!bond->params.fail_over_mac ||
- bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
@@ -1599,7 +1638,7 @@ err_restore_mtu:
dev_set_mtu(slave_dev, new_slave->original_mtu);
err_free:
- kfree(new_slave);
+ bond_free_slave(new_slave);
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
@@ -1661,7 +1700,7 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
/* Inform AD package of unbinding of slave. */
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
write_unlock_bh(&bond->lock);
@@ -1676,7 +1715,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
if (!all && (!bond->params.fail_over_mac ||
- bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
@@ -1748,10 +1787,10 @@ static int __bond_release_one(struct net_device *bond_dev,
/* must do this from outside any spinlocks */
vlan_vids_del_by_dev(slave_dev, bond_dev);
- /* If the mode USES_PRIMARY, then this cases was handled above by
+ /* If the mode uses primary, then this cases was handled above by
* bond_change_active_slave(..., NULL)
*/
- if (!USES_PRIMARY(bond->params.mode)) {
+ if (!bond_uses_primary(bond)) {
/* unset promiscuity level from slave
* NOTE: The NETDEV_CHANGEADDR call above may change the value
* of the IFF_PROMISC flag in the bond_dev, but we need the
@@ -1775,7 +1814,7 @@ static int __bond_release_one(struct net_device *bond_dev,
dev_close(slave_dev);
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
- bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
@@ -1786,7 +1825,7 @@ static int __bond_release_one(struct net_device *bond_dev,
slave_dev->priv_flags &= ~IFF_BONDING;
- kfree(slave);
+ bond_free_slave(slave);
return 0; /* deletion OK */
}
@@ -1821,7 +1860,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
- info->bond_mode = bond->params.mode;
+ info->bond_mode = BOND_MODE(bond);
info->miimon = bond->params.miimon;
info->num_slaves = bond->slave_cnt;
@@ -1877,7 +1916,7 @@ static int bond_miimon_inspect(struct bonding *bond)
if (slave->delay) {
pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
bond->dev->name,
- (bond->params.mode ==
+ (BOND_MODE(bond) ==
BOND_MODE_ACTIVEBACKUP) ?
(bond_is_active_slave(slave) ?
"active " : "backup ") : "",
@@ -1968,10 +2007,10 @@ static void bond_miimon_commit(struct bonding *bond)
slave->link = BOND_LINK_UP;
slave->last_link_up = jiffies;
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* prevent it from being the active one */
bond_set_backup_slave(slave);
- } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
bond_set_active_slave(slave);
} else if (slave != bond->primary_slave) {
@@ -1985,7 +2024,7 @@ static void bond_miimon_commit(struct bonding *bond)
slave->duplex ? "full" : "half");
/* notify ad that the link status has changed */
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(slave, BOND_LINK_UP);
if (bond_is_lb(bond))
@@ -2004,15 +2043,15 @@ static void bond_miimon_commit(struct bonding *bond)
slave->link = BOND_LINK_DOWN;
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
- bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+ BOND_MODE(bond) == BOND_MODE_8023AD)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(slave,
BOND_LINK_DOWN);
@@ -2175,9 +2214,9 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
* When the path is validated, collect any vlan information in the
* path.
*/
-static bool bond_verify_device_path(struct net_device *start_dev,
- struct net_device *end_dev,
- struct bond_vlan_tag *tags)
+bool bond_verify_device_path(struct net_device *start_dev,
+ struct net_device *end_dev,
+ struct bond_vlan_tag *tags)
{
struct net_device *upper;
struct list_head *iter;
@@ -2287,8 +2326,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
if (!slave_do_arp_validate(bond, slave)) {
- if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
- !slave_do_arp_validate_only(bond, slave))
+ if ((slave_do_arp_validate_only(bond) && is_arp) ||
+ !slave_do_arp_validate_only(bond))
slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
} else if (!is_arp) {
@@ -2456,7 +2495,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* do - all replies will be rx'ed on same link causing slaves
* to be unstable during low/no traffic periods
*/
- if (IS_UP(slave->dev))
+ if (bond_slave_is_up(slave))
bond_arp_send_all(bond, slave);
}
@@ -2678,10 +2717,10 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
bond_for_each_slave_rcu(bond, slave, iter) {
- if (!found && !before && IS_UP(slave->dev))
+ if (!found && !before && bond_slave_is_up(slave))
before = slave;
- if (found && !new_slave && IS_UP(slave->dev))
+ if (found && !new_slave && bond_slave_is_up(slave))
new_slave = slave;
/* if the link state is up at this point, we
* mark it down - this can happen if we have
@@ -2690,7 +2729,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
* one the current slave so it is still marked
* up when it is actually down
*/
- if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+ if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
slave->link = BOND_LINK_DOWN;
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@@ -2853,7 +2892,7 @@ static int bond_slave_netdev_event(unsigned long event,
bond_update_speed_duplex(slave);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
if (old_speed != slave->speed)
bond_3ad_adapter_speed_changed(slave);
if (old_duplex != slave->duplex)
@@ -2881,7 +2920,7 @@ static int bond_slave_netdev_event(unsigned long event,
break;
case NETDEV_CHANGENAME:
/* we don't care if we don't have primary set */
- if (!USES_PRIMARY(bond->params.mode) ||
+ if (!bond_uses_primary(bond) ||
!bond->params.primary[0])
break;
@@ -3011,20 +3050,18 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
* bond_xmit_hash - generate a hash value based on the xmit policy
* @bond: bonding device
* @skb: buffer to use for headers
- * @count: modulo value
*
* This function will extract the necessary headers from the skb buffer and use
* them to generate a hash based on the xmit_policy set in the bonding device
- * which will be reduced modulo count before returning.
*/
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
{
struct flow_keys flow;
u32 hash;
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
!bond_flow_dissect(bond, skb, &flow))
- return bond_eth_hash(skb) % count;
+ return bond_eth_hash(skb);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
@@ -3035,7 +3072,7 @@ int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
hash ^= (hash >> 16);
hash ^= (hash >> 8);
- return hash % count;
+ return hash;
}
/*-------------------------- Device entry points ----------------------------*/
@@ -3046,7 +3083,7 @@ static void bond_work_init_all(struct bonding *bond)
bond_resend_igmp_join_requests_delayed);
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
else
INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
@@ -3073,7 +3110,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond_has_slaves(bond)) {
read_lock(&bond->curr_slave_lock);
bond_for_each_slave(bond, slave, iter) {
- if (USES_PRIMARY(bond->params.mode)
+ if (bond_uses_primary(bond)
&& (slave != bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
@@ -3092,9 +3129,10 @@ static int bond_open(struct net_device *bond_dev)
/* bond_alb_initialize must be called before the timer
* is started.
*/
- if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
+ if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
return -ENOMEM;
- queue_delayed_work(bond->wq, &bond->alb_work, 0);
+ if (bond->params.tlb_dynamic_lb)
+ queue_delayed_work(bond->wq, &bond->alb_work, 0);
}
if (bond->params.miimon) /* link check interval, in milliseconds. */
@@ -3105,7 +3143,7 @@ static int bond_open(struct net_device *bond_dev)
bond->recv_probe = bond_arp_rcv;
}
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
queue_delayed_work(bond->wq, &bond->ad_work, 0);
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
@@ -3310,7 +3348,7 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
rcu_read_lock();
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave) {
dev_uc_sync(slave->dev, bond_dev);
@@ -3464,7 +3502,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
struct list_head *iter;
int res = 0;
- if (bond->params.mode == BOND_MODE_ALB)
+ if (BOND_MODE(bond) == BOND_MODE_ALB)
return bond_alb_set_mac_address(bond_dev, addr);
@@ -3475,7 +3513,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
* Returning an error causes ifenslave to fail.
*/
if (bond->params.fail_over_mac &&
- bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
return 0;
if (!is_valid_ether_addr(sa->sa_data))
@@ -3555,7 +3593,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
/* Here we start from the slave with slave_id */
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
- if (slave_can_tx(slave)) {
+ if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
@@ -3567,7 +3605,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
- if (slave_can_tx(slave)) {
+ if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
@@ -3624,7 +3662,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
*/
if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
slave = rcu_dereference(bond->curr_active_slave);
- if (slave && slave_can_tx(slave))
+ if (slave && bond_slave_can_tx(slave))
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_xmit_slave_id(bond, skb, 0);
@@ -3662,7 +3700,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
+ bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
return NETDEV_TX_OK;
}
@@ -3677,7 +3715,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
bond_for_each_slave_rcu(bond, slave, iter) {
if (bond_is_last_slave(bond, slave))
break;
- if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+ if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
@@ -3689,7 +3727,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
bond_dev_queue_xmit(bond, skb2, slave->dev);
}
}
- if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
+ if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
dev_kfree_skb_any(skb);
@@ -3714,7 +3752,7 @@ static inline int bond_slave_override(struct bonding *bond,
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->queue_id == skb->queue_mapping) {
- if (slave_can_tx(slave)) {
+ if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return 0;
}
@@ -3755,12 +3793,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
{
struct bonding *bond = netdev_priv(dev);
- if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
- if (!bond_slave_override(bond, skb))
- return NETDEV_TX_OK;
- }
+ if (bond_should_override_tx_queue(bond) &&
+ !bond_slave_override(bond, skb))
+ return NETDEV_TX_OK;
- switch (bond->params.mode) {
+ switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return bond_xmit_roundrobin(skb, dev);
case BOND_MODE_ACTIVEBACKUP:
@@ -3772,12 +3809,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
case BOND_MODE_8023AD:
return bond_3ad_xmit_xor(skb, dev);
case BOND_MODE_ALB:
- case BOND_MODE_TLB:
return bond_alb_xmit(skb, dev);
+ case BOND_MODE_TLB:
+ return bond_tlb_xmit(skb, dev);
default:
/* Should never happen, mode already checked */
pr_err("%s: Error: Unknown bonding mode %d\n",
- dev->name, bond->params.mode);
+ dev->name, BOND_MODE(bond));
WARN_ON_ONCE(1);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -3817,14 +3855,14 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
ecmd->duplex = DUPLEX_UNKNOWN;
ecmd->port = PORT_OTHER;
- /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
+ /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
* do not need to check mode. Though link speed might not represent
* the true receive or transmit bandwidth (not all modes are symmetric)
* this is an accurate maximum.
*/
read_lock(&bond->lock);
bond_for_each_slave(bond, slave, iter) {
- if (SLAVE_IS_OK(slave)) {
+ if (bond_slave_can_tx(slave)) {
if (slave->speed != SPEED_UNKNOWN)
speed += slave->speed;
if (ecmd->duplex == DUPLEX_UNKNOWN &&
@@ -3915,7 +3953,7 @@ void bond_setup(struct net_device *bond_dev)
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
- bond_dev->priv_flags |= IFF_BONDING;
+ bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
/* At first, we block adding VLANs. That's the only way to
@@ -3945,6 +3983,7 @@ void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_CTAG_FILTER;
bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
+ bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
bond_dev->features |= bond_dev->hw_features;
}
@@ -3994,7 +4033,8 @@ static int bond_check_params(struct bond_params *params)
if (xmit_hash_policy) {
if ((bond_mode != BOND_MODE_XOR) &&
- (bond_mode != BOND_MODE_8023AD)) {
+ (bond_mode != BOND_MODE_8023AD) &&
+ (bond_mode != BOND_MODE_TLB)) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
@@ -4079,7 +4119,7 @@ static int bond_check_params(struct bond_params *params)
}
/* reset values for 802.3ad/TLB/ALB */
- if (BOND_NO_USES_ARP(bond_mode)) {
+ if (!bond_mode_uses_arp(bond_mode)) {
if (!miimon) {
pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
pr_warn("Forcing miimon to 100msec\n");
@@ -4161,7 +4201,7 @@ static int bond_check_params(struct bond_params *params)
catch mistakes */
__be32 ip;
if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
- IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
+ !bond_is_ip_target_ok(ip)) {
pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[i]);
arp_interval = 0;
@@ -4234,7 +4274,7 @@ static int bond_check_params(struct bond_params *params)
pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
}
- if (primary && !USES_PRIMARY(bond_mode)) {
+ if (primary && !bond_mode_uses_primary(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
@@ -4300,6 +4340,7 @@ static int bond_check_params(struct bond_params *params)
params->min_links = min_links;
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
+ params->tlb_dynamic_lb = 1; /* Default value */
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index f847e165d252..5ab3c1847e67 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -56,10 +56,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
goto nla_put_failure;
- if (slave->bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
const struct aggregator *agg;
- agg = SLAVE_AD_INFO(slave).port.aggregator;
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
agg->aggregator_identifier))
@@ -407,7 +407,7 @@ static int bond_fill_info(struct sk_buff *skb,
unsigned int packets_per_slave;
int i, targets_added;
- if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
+ if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
goto nla_put_failure;
if (slave_dev &&
@@ -505,7 +505,7 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.ad_select))
goto nla_put_failure;
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
if (!bond_3ad_get_active_agg_info(bond, &info)) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 832070298446..540e0167bf24 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -70,6 +70,8 @@ static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_slaves_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
@@ -180,6 +182,12 @@ static const struct bond_opt_value bond_lp_interval_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
+ { "off", 0, 0},
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
static const struct bond_option bond_opts[] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -200,7 +208,7 @@ static const struct bond_option bond_opts[] = {
[BOND_OPT_XMIT_HASH] = {
.id = BOND_OPT_XMIT_HASH,
.name = "xmit_hash_policy",
- .desc = "balance-xor and 802.3ad hashing method",
+ .desc = "balance-xor, 802.3ad, and tlb hashing method",
.values = bond_xmit_hashtype_tbl,
.set = bond_option_xmit_hash_policy_set
},
@@ -365,9 +373,33 @@ static const struct bond_option bond_opts[] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_slaves_set
},
+ [BOND_OPT_TLB_DYNAMIC_LB] = {
+ .id = BOND_OPT_TLB_DYNAMIC_LB,
+ .name = "tlb_dynamic_lb",
+ .desc = "Enable dynamic flow shuffling",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
+ .values = bond_tlb_dynamic_lb_tbl,
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .set = bond_option_tlb_dynamic_lb_set,
+ },
{ }
};
+/* Searches for an option by name */
+const struct bond_option *bond_opt_get_by_name(const char *name)
+{
+ const struct bond_option *opt;
+ int option;
+
+ for (option = 0; option < BOND_OPT_LAST; option++) {
+ opt = bond_opt_get(option);
+ if (opt && !strcmp(opt->name, name))
+ return opt;
+ }
+
+ return NULL;
+}
+
/* Searches for a value in opt's values[] table */
const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
{
@@ -641,7 +673,7 @@ const struct bond_option *bond_opt_get(unsigned int option)
int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
{
- if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
+ if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
bond->dev->name, newval->string);
/* disable arp monitoring */
@@ -662,7 +694,7 @@ int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newv
static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
struct slave *slave)
{
- return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+ return bond_uses_primary(bond) && slave ? slave->dev : NULL;
}
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
@@ -727,7 +759,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
bond->dev->name, new_active->dev->name);
} else {
if (old_active && (new_active->link == BOND_LINK_UP) &&
- IS_UP(new_active->dev)) {
+ bond_slave_is_up(new_active)) {
pr_info("%s: Setting %s as active slave\n",
bond->dev->name, new_active->dev->name);
bond_change_active_slave(bond, new_active);
@@ -746,6 +778,10 @@ static int bond_option_active_slave_set(struct bonding *bond,
return ret;
}
+/* There are two tricky bits here. First, if MII monitoring is activated, then
+ * we must disable ARP monitoring. Second, if the timer isn't running, we must
+ * start it.
+ */
static int bond_option_miimon_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -784,6 +820,10 @@ static int bond_option_miimon_set(struct bonding *bond,
return 0;
}
+/* Set up and down delays. These must be multiples of the
+ * MII monitoring value, and are stored internally as the multiplier.
+ * Thus, we must translate to MS for the real world.
+ */
static int bond_option_updelay_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -842,6 +882,10 @@ static int bond_option_use_carrier_set(struct bonding *bond,
return 0;
}
+/* There are two tricky bits here. First, if ARP monitoring is activated, then
+ * we must disable MII monitoring. Second, if the ARP timer isn't running,
+ * we must start it.
+ */
static int bond_option_arp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -899,7 +943,7 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
__be32 *targets = bond->params.arp_targets;
int ind;
- if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ if (!bond_is_ip_target_ok(target)) {
pr_err("%s: invalid ARP target %pI4 specified for addition\n",
bond->dev->name, &target);
return -EINVAL;
@@ -944,7 +988,7 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
unsigned long *targets_rx;
int ind, i;
- if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ if (!bond_is_ip_target_ok(target)) {
pr_err("%s: invalid ARP target %pI4 specified for removal\n",
bond->dev->name, &target);
return -EINVAL;
@@ -1338,3 +1382,13 @@ err_no_cmd:
ret = -EPERM;
goto out;
}
+
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.tlb_dynamic_lb = newval->value;
+
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
index 12be9e1bfb0c..17ded5b29176 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/drivers/net/bonding/bond_options.h
@@ -62,6 +62,7 @@ enum {
BOND_OPT_RESEND_IGMP,
BOND_OPT_LP_INTERVAL,
BOND_OPT_SLAVES,
+ BOND_OPT_TLB_DYNAMIC_LB,
BOND_OPT_LAST
};
@@ -104,6 +105,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
struct bond_opt_value *val);
const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_option *bond_opt_get_by_name(const char *name);
const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
/* This helper is used to initialize a bond_opt_value structure for parameter
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 013fdd0f45e9..b215b479bb3a 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -72,9 +72,9 @@ static void bond_info_show_master(struct seq_file *seq)
curr = rcu_dereference(bond->curr_active_slave);
seq_printf(seq, "Bonding Mode: %s",
- bond_mode_name(bond->params.mode));
+ bond_mode_name(BOND_MODE(bond)));
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac) {
optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
@@ -83,15 +83,15 @@ static void bond_info_show_master(struct seq_file *seq)
seq_printf(seq, "\n");
- if (bond->params.mode == BOND_MODE_XOR ||
- bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_XOR ||
+ BOND_MODE(bond) == BOND_MODE_8023AD) {
optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
bond->params.xmit_policy);
seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
optval->string, bond->params.xmit_policy);
}
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
seq_printf(seq, "Primary Slave: %s",
(bond->primary_slave) ?
bond->primary_slave->dev->name : "None");
@@ -134,7 +134,7 @@ static void bond_info_show_master(struct seq_file *seq)
seq_printf(seq, "\n");
}
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
seq_puts(seq, "\n802.3ad info\n");
@@ -188,9 +188,9 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
const struct aggregator *agg
- = SLAVE_AD_INFO(slave).port.aggregator;
+ = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
seq_printf(seq, "Aggregator ID: %d\n",
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5f6babcfc26e..daed52f68ce1 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -45,8 +45,7 @@
#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
-/*
- * "show" function for the bond_masters attribute.
+/* "show" function for the bond_masters attribute.
* The class parameter is ignored.
*/
static ssize_t bonding_show_bonds(struct class *cls,
@@ -88,14 +87,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
return NULL;
}
-/*
- * "store" function for the bond_masters attribute. This is what
+/* "store" function for the bond_masters attribute. This is what
* creates and deletes entire bonds.
*
* The class parameter is ignored.
*
*/
-
static ssize_t bonding_store_bonds(struct class *cls,
struct class_attribute *attr,
const char *buffer, size_t count)
@@ -158,9 +155,26 @@ static const struct class_attribute class_attr_bonding_masters = {
.store = bonding_store_bonds,
};
-/*
- * Show the slaves in the current bond.
- */
+/* Generic "store" method for bonding sysfs option setting */
+static ssize_t bonding_sysfs_store_option(struct device *d,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct bonding *bond = to_bond(d);
+ const struct bond_option *opt;
+ int ret;
+
+ opt = bond_opt_get_by_name(attr->attr.name);
+ if (WARN_ON(!opt))
+ return -ENOENT;
+ ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
+ if (!ret)
+ ret = count;
+
+ return ret;
+}
+
+/* Show the slaves in the current bond. */
static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -190,62 +204,24 @@ static ssize_t bonding_show_slaves(struct device *d,
return res;
}
-
-/*
- * Set the slaves in the current bond.
- * This is supposed to be only thin wrapper for bond_enslave and bond_release.
- * All hard work should be done there.
- */
-static ssize_t bonding_store_slaves(struct device *d,
- struct device_attribute *attr,
- const char *buffer, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
- bonding_store_slaves);
+ bonding_sysfs_store_option);
-/*
- * Show and set the bonding mode. The bond interface must be down to
- * change the mode.
- */
+/* Show the bonding mode. */
static ssize_t bonding_show_mode(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
- val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
+ val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
- return sprintf(buf, "%s %d\n", val->string, bond->params.mode);
-}
-
-static ssize_t bonding_store_mode(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- bonding_show_mode, bonding_store_mode);
+ bonding_show_mode, bonding_sysfs_store_option);
-/*
- * Show and set the bonding transmit hash method.
- */
+/* Show the bonding transmit hash method. */
static ssize_t bonding_show_xmit_hash(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -257,26 +233,10 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
-
-static ssize_t bonding_store_xmit_hash(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
- bonding_show_xmit_hash, bonding_store_xmit_hash);
+ bonding_show_xmit_hash, bonding_sysfs_store_option);
-/*
- * Show and set arp_validate.
- */
+/* Show arp_validate. */
static ssize_t bonding_show_arp_validate(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -289,26 +249,10 @@ static ssize_t bonding_show_arp_validate(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
-
-static ssize_t bonding_store_arp_validate(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
- bonding_store_arp_validate);
-/*
- * Show and set arp_all_targets.
- */
+ bonding_sysfs_store_option);
+
+/* Show arp_all_targets. */
static ssize_t bonding_show_arp_all_targets(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -321,28 +265,10 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
-
-static ssize_t bonding_store_arp_all_targets(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
- bonding_show_arp_all_targets, bonding_store_arp_all_targets);
+ bonding_show_arp_all_targets, bonding_sysfs_store_option);
-/*
- * Show and store fail_over_mac. User only allowed to change the
- * value when there are no slaves.
- */
+/* Show fail_over_mac. */
static ssize_t bonding_show_fail_over_mac(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -355,30 +281,10 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
-
-static ssize_t bonding_store_fail_over_mac(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
- bonding_show_fail_over_mac, bonding_store_fail_over_mac);
+ bonding_show_fail_over_mac, bonding_sysfs_store_option);
-/*
- * Show and set the arp timer interval. There are two tricky bits
- * here. First, if ARP monitoring is activated, then we must disable
- * MII monitoring. Second, if the ARP timer isn't running, we must
- * start it.
- */
+/* Show the arp timer interval. */
static ssize_t bonding_show_arp_interval(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -387,26 +293,10 @@ static ssize_t bonding_show_arp_interval(struct device *d,
return sprintf(buf, "%d\n", bond->params.arp_interval);
}
-
-static ssize_t bonding_store_arp_interval(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
- bonding_show_arp_interval, bonding_store_arp_interval);
+ bonding_show_arp_interval, bonding_sysfs_store_option);
-/*
- * Show and set the arp targets.
- */
+/* Show the arp targets. */
static ssize_t bonding_show_arp_targets(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -424,27 +314,10 @@ static ssize_t bonding_show_arp_targets(struct device *d,
return res;
}
+static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR,
+ bonding_show_arp_targets, bonding_sysfs_store_option);
-static ssize_t bonding_store_arp_targets(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
-
-/*
- * Show and set the up and down delays. These must be multiples of the
- * MII monitoring value, and are stored internally as the multiplier.
- * Thus, we must translate to MS for the real world.
- */
+/* Show the up and down delays. */
static ssize_t bonding_show_downdelay(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -453,22 +326,8 @@ static ssize_t bonding_show_downdelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
-
-static ssize_t bonding_store_downdelay(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
- bonding_show_downdelay, bonding_store_downdelay);
+ bonding_show_downdelay, bonding_sysfs_store_option);
static ssize_t bonding_show_updelay(struct device *d,
struct device_attribute *attr,
@@ -479,27 +338,10 @@ static ssize_t bonding_show_updelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
-
-static ssize_t bonding_store_updelay(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
- bonding_show_updelay, bonding_store_updelay);
+ bonding_show_updelay, bonding_sysfs_store_option);
-/*
- * Show and set the LACP interval. Interface must be down, and the mode
- * must be set to 802.3ad mode.
- */
+/* Show the LACP interval. */
static ssize_t bonding_show_lacp(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -511,22 +353,8 @@ static ssize_t bonding_show_lacp(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
-
-static ssize_t bonding_store_lacp(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
- bonding_show_lacp, bonding_store_lacp);
+ bonding_show_lacp, bonding_sysfs_store_option);
static ssize_t bonding_show_min_links(struct device *d,
struct device_attribute *attr,
@@ -536,22 +364,8 @@ static ssize_t bonding_show_min_links(struct device *d,
return sprintf(buf, "%u\n", bond->params.min_links);
}
-
-static ssize_t bonding_store_min_links(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
- bonding_show_min_links, bonding_store_min_links);
+ bonding_show_min_links, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_select(struct device *d,
struct device_attribute *attr,
@@ -564,27 +378,10 @@ static ssize_t bonding_show_ad_select(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
}
-
-
-static ssize_t bonding_store_ad_select(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
- bonding_show_ad_select, bonding_store_ad_select);
+ bonding_show_ad_select, bonding_sysfs_store_option);
-/*
- * Show and set the number of peer notifications to send after a failover event.
- */
+/* Show and set the number of peer notifications to send after a failover event. */
static ssize_t bonding_show_num_peer_notif(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -611,12 +408,7 @@ static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
bonding_show_num_peer_notif, bonding_store_num_peer_notif);
-/*
- * Show and set the MII monitor interval. There are two tricky bits
- * here. First, if MII monitoring is activated, then we must disable
- * ARP monitoring. Second, if the timer isn't running, we must
- * start it.
- */
+/* Show the MII monitor interval. */
static ssize_t bonding_show_miimon(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -625,30 +417,10 @@ static ssize_t bonding_show_miimon(struct device *d,
return sprintf(buf, "%d\n", bond->params.miimon);
}
-
-static ssize_t bonding_store_miimon(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
- bonding_show_miimon, bonding_store_miimon);
+ bonding_show_miimon, bonding_sysfs_store_option);
-/*
- * Show and set the primary slave. The store function is much
- * simpler than bonding_store_slaves function because it only needs to
- * handle one interface name.
- * The bond must be a mode that supports a primary for this be
- * set.
- */
+/* Show the primary slave. */
static ssize_t bonding_show_primary(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -661,26 +433,10 @@ static ssize_t bonding_show_primary(struct device *d,
return count;
}
-
-static ssize_t bonding_store_primary(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
- bonding_show_primary, bonding_store_primary);
+ bonding_show_primary, bonding_sysfs_store_option);
-/*
- * Show and set the primary_reselect flag.
- */
+/* Show the primary_reselect flag. */
static ssize_t bonding_show_primary_reselect(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -694,28 +450,10 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.primary_reselect);
}
-
-static ssize_t bonding_store_primary_reselect(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
- bonding_show_primary_reselect,
- bonding_store_primary_reselect);
+ bonding_show_primary_reselect, bonding_sysfs_store_option);
-/*
- * Show and set the use_carrier flag.
- */
+/* Show the use_carrier flag. */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -724,27 +462,11 @@ static ssize_t bonding_show_carrier(struct device *d,
return sprintf(buf, "%d\n", bond->params.use_carrier);
}
-
-static ssize_t bonding_store_carrier(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
- bonding_show_carrier, bonding_store_carrier);
+ bonding_show_carrier, bonding_sysfs_store_option);
-/*
- * Show and set currently active_slave.
- */
+/* Show currently active_slave. */
static ssize_t bonding_show_active_slave(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -761,27 +483,10 @@ static ssize_t bonding_show_active_slave(struct device *d,
return count;
}
-
-static ssize_t bonding_store_active_slave(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
- bonding_show_active_slave, bonding_store_active_slave);
-
+ bonding_show_active_slave, bonding_sysfs_store_option);
-/*
- * Show link status of the bond interface.
- */
+/* Show link status of the bond interface. */
static ssize_t bonding_show_mii_status(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -792,9 +497,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
}
static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
-/*
- * Show current 802.3ad aggregator ID.
- */
+/* Show current 802.3ad aggregator ID. */
static ssize_t bonding_show_ad_aggregator(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -802,7 +505,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -814,9 +517,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
-/*
- * Show number of active 802.3ad ports.
- */
+/* Show number of active 802.3ad ports. */
static ssize_t bonding_show_ad_num_ports(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -824,7 +525,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -836,9 +537,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
-/*
- * Show current 802.3ad actor key.
- */
+/* Show current 802.3ad actor key. */
static ssize_t bonding_show_ad_actor_key(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -846,7 +545,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -858,9 +557,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
-/*
- * Show current 802.3ad partner key.
- */
+/* Show current 802.3ad partner key. */
static ssize_t bonding_show_ad_partner_key(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -868,7 +565,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -880,9 +577,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
-/*
- * Show current 802.3ad partner mac.
- */
+/* Show current 802.3ad partner mac. */
static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -890,7 +585,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
count = sprintf(buf, "%pM\n", ad_info.partner_system);
@@ -900,9 +595,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
}
static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
-/*
- * Show the queue_ids of the slaves in the current bond.
- */
+/* Show the queue_ids of the slaves in the current bond. */
static ssize_t bonding_show_queue_id(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -933,31 +626,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
return res;
}
-
-/*
- * Set the queue_ids of the slaves in the current bond. The bond
- * interface must be enslaved for this to work.
- */
-static ssize_t bonding_store_queue_id(struct device *d,
- struct device_attribute *attr,
- const char *buffer, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
- bonding_store_queue_id);
+ bonding_sysfs_store_option);
-/*
- * Show and set the all_slaves_active flag.
- */
+/* Show the all_slaves_active flag. */
static ssize_t bonding_show_slaves_active(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -966,27 +639,10 @@ static ssize_t bonding_show_slaves_active(struct device *d,
return sprintf(buf, "%d\n", bond->params.all_slaves_active);
}
-
-static ssize_t bonding_store_slaves_active(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
- bonding_show_slaves_active, bonding_store_slaves_active);
+ bonding_show_slaves_active, bonding_sysfs_store_option);
-/*
- * Show and set the number of IGMP membership reports to send on link failure
- */
+/* Show the number of IGMP membership reports to send on link failure */
static ssize_t bonding_show_resend_igmp(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -995,23 +651,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
return sprintf(buf, "%d\n", bond->params.resend_igmp);
}
-
-static ssize_t bonding_store_resend_igmp(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
- bonding_show_resend_igmp, bonding_store_resend_igmp);
+ bonding_show_resend_igmp, bonding_sysfs_store_option);
static ssize_t bonding_show_lp_interval(struct device *d,
@@ -1019,25 +660,21 @@ static ssize_t bonding_show_lp_interval(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+
return sprintf(buf, "%d\n", bond->params.lp_interval);
}
+static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+ bonding_show_lp_interval, bonding_sysfs_store_option);
-static ssize_t bonding_store_lp_interval(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
-
-static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
- bonding_show_lp_interval, bonding_store_lp_interval);
+static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR,
+ bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
static ssize_t bonding_show_packets_per_slave(struct device *d,
struct device_attribute *attr,
@@ -1045,27 +682,11 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
{
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
-}
-
-static ssize_t bonding_store_packets_per_slave(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%u\n", packets_per_slave);
}
-
static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
- bonding_show_packets_per_slave,
- bonding_store_packets_per_slave);
+ bonding_show_packets_per_slave, bonding_sysfs_store_option);
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
@@ -1099,6 +720,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_min_links.attr,
&dev_attr_lp_interval.attr,
&dev_attr_packets_per_slave.attr,
+ &dev_attr_tlb_dynamic_lb.attr,
NULL,
};
@@ -1107,8 +729,7 @@ static struct attribute_group bonding_group = {
.attrs = per_bond_attrs,
};
-/*
- * Initialize sysfs. This sets up the bonding_masters file in
+/* Initialize sysfs. This sets up the bonding_masters file in
* /sys/class/net.
*/
int bond_create_sysfs(struct bond_net *bn)
@@ -1120,8 +741,7 @@ int bond_create_sysfs(struct bond_net *bn)
ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
bn->net);
- /*
- * Permit multiple loads of the module by ignoring failures to
+ /* Permit multiple loads of the module by ignoring failures to
* create the bonding_masters sysfs file. Bonding devices
* created by second or subsequent loads of the module will
* not be listed in, or controllable by, bonding_masters, but
@@ -1144,16 +764,13 @@ int bond_create_sysfs(struct bond_net *bn)
}
-/*
- * Remove /sys/class/net/bonding_masters.
- */
+/* Remove /sys/class/net/bonding_masters. */
void bond_destroy_sysfs(struct bond_net *bn)
{
netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
}
-/*
- * Initialize sysfs for each bond. This sets up and registers
+/* Initialize sysfs for each bond. This sets up and registers
* the 'bondctl' directory for each individual bond under /sys/class/net.
*/
void bond_prepare_sysfs_group(struct bonding *bond)
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2e4eec5450c8..198677f58ce0 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -69,8 +69,8 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
{
const struct aggregator *agg;
- if (slave->bond->params.mode == BOND_MODE_8023AD) {
- agg = SLAVE_AD_INFO(slave).port.aggregator;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
return sprintf(buf, "%d\n",
agg->aggregator_identifier);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 00bea320e3b5..0b4d9cde0b05 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -41,42 +41,6 @@
#define BOND_DEFAULT_MIIMON 100
-#define IS_UP(dev) \
- ((((dev)->flags & IFF_UP) == IFF_UP) && \
- netif_running(dev) && \
- netif_carrier_ok(dev))
-
-/*
- * Checks whether slave is ready for transmit.
- */
-#define SLAVE_IS_OK(slave) \
- (((slave)->dev->flags & IFF_UP) && \
- netif_running((slave)->dev) && \
- ((slave)->link == BOND_LINK_UP) && \
- bond_is_active_slave(slave))
-
-
-#define USES_PRIMARY(mode) \
- (((mode) == BOND_MODE_ACTIVEBACKUP) || \
- ((mode) == BOND_MODE_TLB) || \
- ((mode) == BOND_MODE_ALB))
-
-#define BOND_NO_USES_ARP(mode) \
- (((mode) == BOND_MODE_8023AD) || \
- ((mode) == BOND_MODE_TLB) || \
- ((mode) == BOND_MODE_ALB))
-
-#define TX_QUEUE_OVERRIDE(mode) \
- (((mode) == BOND_MODE_ACTIVEBACKUP) || \
- ((mode) == BOND_MODE_ROUNDROBIN))
-
-#define BOND_MODE_IS_LB(mode) \
- (((mode) == BOND_MODE_TLB) || \
- ((mode) == BOND_MODE_ALB))
-
-#define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \
- ((htonl(INADDR_BROADCAST) == a) || \
- ipv4_is_zeronet(a))
/*
* Less bad way to call ioctl from within the kernel; this needs to be
* done some other way to get the call out of interrupt context.
@@ -90,6 +54,8 @@
set_fs(fs); \
res; })
+#define BOND_MODE(bond) ((bond)->params.mode)
+
/* slave list primitives */
#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
@@ -175,6 +141,7 @@ struct bond_params {
int resend_igmp;
int lp_interval;
int packets_per_slave;
+ int tlb_dynamic_lb;
struct reciprocal_value reciprocal_packets_per_slave;
};
@@ -183,8 +150,6 @@ struct bond_parm_tbl {
int mode;
};
-#define BOND_MAX_MODENAME_LEN 20
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -205,7 +170,7 @@ struct slave {
u32 speed;
u16 queue_id;
u8 perm_hwaddr[ETH_ALEN];
- struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
+ struct ad_slave_info *ad_info;
struct tlb_slave_info tlb_info;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
@@ -285,14 +250,41 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
{
- if (!slave || !slave->bond)
- return NULL;
return slave->bond;
}
+static inline bool bond_should_override_tx_queue(struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+ BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
+}
+
static inline bool bond_is_lb(const struct bonding *bond)
{
- return BOND_MODE_IS_LB(bond->params.mode);
+ return BOND_MODE(bond) == BOND_MODE_TLB ||
+ BOND_MODE(bond) == BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_arp(int mode)
+{
+ return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
+ mode != BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_primary(int mode)
+{
+ return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
+ mode == BOND_MODE_ALB;
+}
+
+static inline bool bond_uses_primary(struct bonding *bond)
+{
+ return bond_mode_uses_primary(BOND_MODE(bond));
+}
+
+static inline bool bond_slave_is_up(struct slave *slave)
+{
+ return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
}
static inline void bond_set_active_slave(struct slave *slave)
@@ -365,6 +357,12 @@ static inline bool bond_is_active_slave(struct slave *slave)
return !bond_slave_state(slave);
}
+static inline bool bond_slave_can_tx(struct slave *slave)
+{
+ return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
+ bond_is_active_slave(slave);
+}
+
#define BOND_PRI_RESELECT_ALWAYS 0
#define BOND_PRI_RESELECT_BETTER 1
#define BOND_PRI_RESELECT_FAILURE 2
@@ -396,12 +394,16 @@ static inline int slave_do_arp_validate(struct bonding *bond,
return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
-static inline int slave_do_arp_validate_only(struct bonding *bond,
- struct slave *slave)
+static inline int slave_do_arp_validate_only(struct bonding *bond)
{
return bond->params.arp_validate & BOND_ARP_FILTER;
}
+static inline int bond_is_ip_target_ok(__be32 addr)
+{
+ return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
+}
+
/* Get the oldest arp which we've received on this slave for bond's
* arp_targets.
*/
@@ -479,16 +481,14 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
return addr;
}
-static inline bool slave_can_tx(struct slave *slave)
-{
- if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
- bond_is_active_slave(slave))
- return true;
- else
- return false;
-}
-
-struct bond_net;
+struct bond_net {
+ struct net *net; /* Associated network namespace */
+ struct list_head dev_list;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_dir;
+#endif
+ struct class_attribute class_attr_bonding_masters;
+};
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
@@ -500,7 +500,7 @@ int bond_sysfs_slave_add(struct slave *slave);
void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
@@ -516,15 +516,9 @@ void bond_netlink_fini(void);
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
const char *bond_slave_link_status(s8 link);
-
-struct bond_net {
- struct net * net; /* Associated network namespace */
- struct list_head dev_list;
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry * proc_dir;
-#endif
- struct class_attribute class_attr_bonding_masters;
-};
+bool bond_verify_device_path(struct net_device *start_dev,
+ struct net_device *end_dev,
+ struct bond_vlan_tag *tags);
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);
@@ -576,6 +570,27 @@ static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
return NULL;
}
+/* Caller must hold rcu_read_lock() for read */
+static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+ struct netdev_hw_addr *ha;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return true;
+
+ if (netdev_uc_empty(bond->dev))
+ return false;
+
+ netdev_for_each_uc_addr(ha, bond->dev)
+ if (ether_addr_equal_64bits(mac, ha->addr))
+ return true;
+
+ return false;
+}
+
/* Check if the ip is present in arp ip list, or first free slot if ip == 0
* Returns -1 if not found, index if found
*/
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9e7d95dae2c7..41688229c570 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
- depends on ARM
+ depends on ARCH_AT91 || COMPILE_TEST
---help---
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
@@ -77,12 +77,6 @@ config CAN_TI_HECC
Driver for TI HECC (High End CAN Controller) module found on many
TI devices. The device specifications are available from www.ti.com
-config CAN_MCP251X
- tristate "Microchip MCP251x SPI CAN controllers"
- depends on SPI && HAS_DMA
- ---help---
- Driver for the Microchip MCP251x SPI CAN controllers.
-
config CAN_BFIN
depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
tristate "Analog Devices Blackfin on-chip CAN"
@@ -110,7 +104,7 @@ config CAN_FLEXCAN
config PCH_CAN
tristate "Intel EG20T PCH CAN controller"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
---help---
This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
is an IOH for x86 embedded processor (Intel Atom E6xx series).
@@ -125,6 +119,24 @@ config CAN_GRCAN
endian syntheses of the cores would need some modifications on
the hardware level to work.
+config CAN_RCAR
+ tristate "Renesas R-Car CAN controller"
+ depends on ARM
+ ---help---
+ Say Y here if you want to use CAN controller found on Renesas R-Car
+ SoCs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called rcar_can.
+
+config CAN_XILINXCAN
+ tristate "Xilinx CAN"
+ depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+ depends on COMMON_CLK && HAS_IOMEM
+ ---help---
+ Xilinx CAN driver. This driver supports both soft AXI CAN IP and
+ Zynq CANPS IP.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
@@ -133,6 +145,8 @@ source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/spi/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c7440392adbb..1697f22353a9 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
can-dev-$(CONFIG_CAN_LEDS) += led.o
+obj-y += spi/
obj-y += usb/
obj-y += softing/
@@ -19,11 +20,12 @@ obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
-obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
obj-$(CONFIG_PCH_CAN) += pch_can.o
obj-$(CONFIG_CAN_GRCAN) += grcan.o
+obj-$(CONFIG_CAN_RCAR) += rcar_can.o
+obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 95e04e2002da..8e78bb48f5a4 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -252,8 +252,7 @@ static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj
struct c_can_priv *priv = netdev_priv(dev);
int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
- priv->write_reg(priv, reg + 1, cmd);
- priv->write_reg(priv, reg, obj);
+ priv->write_reg32(priv, reg, (cmd << 16) | obj);
for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
@@ -328,8 +327,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
change_bit(idx, &priv->tx_dir);
}
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
@@ -391,8 +389,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
frame->can_dlc = get_can_dlc(ctrl & 0x0F);
- arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
- arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
+ arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
if (arb & IF_ARB_MSGXTD)
frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -424,12 +421,10 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
struct c_can_priv *priv = netdev_priv(dev);
mask |= BIT(29);
- priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
- priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
id |= IF_ARB_MSGVAL;
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index c56f1b1c11ca..99ad1aa576b0 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -78,6 +78,7 @@ enum reg {
C_CAN_INTPND2_REG,
C_CAN_MSGVAL1_REG,
C_CAN_MSGVAL2_REG,
+ C_CAN_FUNCTION_REG,
};
static const u16 reg_map_c_can[] = {
@@ -129,6 +130,7 @@ static const u16 reg_map_d_can[] = {
[C_CAN_BRPEXT_REG] = 0x0E,
[C_CAN_INT_REG] = 0x10,
[C_CAN_TEST_REG] = 0x14,
+ [C_CAN_FUNCTION_REG] = 0x18,
[C_CAN_TXRQST1_REG] = 0x88,
[C_CAN_TXRQST2_REG] = 0x8A,
[C_CAN_NEWDAT1_REG] = 0x9C,
@@ -176,8 +178,10 @@ struct c_can_priv {
atomic_t tx_active;
unsigned long tx_dir;
int last_status;
- u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
- void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
+ u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
+ void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val);
+ u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index);
+ void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val);
void __iomem *base;
const u16 *regs;
void *priv; /* for board-specific data */
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index fe5f6303b584..5d11e0e4225b 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -19,9 +19,13 @@
#include "c_can.h"
+#define PCI_DEVICE_ID_PCH_CAN 0x8818
+#define PCH_PCI_SOFT_RESET 0x01fc
+
enum c_can_pci_reg_align {
C_CAN_REG_ALIGN_16,
C_CAN_REG_ALIGN_32,
+ C_CAN_REG_32,
};
struct c_can_pci_data {
@@ -31,6 +35,10 @@ struct c_can_pci_data {
enum c_can_pci_reg_align reg_align;
/* Set the frequency */
unsigned int freq;
+ /* PCI bar number */
+ int bar;
+ /* Callback for reset */
+ void (*init)(const struct c_can_priv *priv, bool enable);
};
/*
@@ -39,30 +47,70 @@ struct c_can_pci_data {
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
*/
-static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + priv->regs[index]);
}
-static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
-static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + 2 * priv->regs[index]);
}
-static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
+static u16 c_can_pci_read_reg_32bit(const struct c_can_priv *priv,
+ enum reg index)
+{
+ return (u16)ioread32(priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_32bit(const struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ iowrite32((u32)val, priv->base + 2 * priv->regs[index]);
+}
+
+static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+ u32 val;
+
+ val = priv->read_reg(priv, index);
+ val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+ return val;
+}
+
+static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index,
+ u32 val)
+{
+ priv->write_reg(priv, index + 1, val >> 16);
+ priv->write_reg(priv, index, val);
+}
+
+static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable)
+{
+ if (enable) {
+ u32 __iomem *addr = priv->base + PCH_PCI_SOFT_RESET;
+
+ /* write to sw reset register */
+ iowrite32(1, addr);
+ iowrite32(0, addr);
+ }
+}
+
static int c_can_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -90,7 +138,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
}
- addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ addr = pci_iomap(pdev, c_can_pci_data->bar,
+ pci_resource_len(pdev, c_can_pci_data->bar));
if (!addr) {
dev_err(&pdev->dev,
"device has no PCI memory resources, "
@@ -147,10 +196,18 @@ static int c_can_pci_probe(struct pci_dev *pdev,
priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
break;
+ case C_CAN_REG_32:
+ priv->read_reg = c_can_pci_read_reg_32bit;
+ priv->write_reg = c_can_pci_write_reg_32bit;
+ break;
default:
ret = -EINVAL;
goto out_free_c_can;
}
+ priv->read_reg32 = c_can_pci_read_reg32;
+ priv->write_reg32 = c_can_pci_write_reg32;
+
+ priv->raminit = c_can_pci_data->init;
ret = register_c_can_dev(dev);
if (ret) {
@@ -198,6 +255,15 @@ static struct c_can_pci_data c_can_sta2x11= {
.type = BOSCH_C_CAN,
.reg_align = C_CAN_REG_ALIGN_32,
.freq = 52000000, /* 52 Mhz */
+ .bar = 0,
+};
+
+static struct c_can_pci_data c_can_pch = {
+ .type = BOSCH_C_CAN,
+ .reg_align = C_CAN_REG_32,
+ .freq = 50000000, /* 50 MHz */
+ .init = c_can_pci_reset_pch,
+ .bar = 1,
};
#define C_CAN_ID(_vend, _dev, _driverdata) { \
@@ -207,6 +273,8 @@ static struct c_can_pci_data c_can_sta2x11= {
static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
c_can_sta2x11),
+ C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN,
+ c_can_pch),
{},
};
static struct pci_driver c_can_pci_driver = {
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 1df0b322d1e4..824108cd9fd5 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -40,6 +40,7 @@
#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
+#define DCAN_RAM_INIT_BIT (1 << 3)
static DEFINE_SPINLOCK(raminit_lock);
/*
* 16-bit c_can registers can be arranged differently in the memory
@@ -47,31 +48,31 @@ static DEFINE_SPINLOCK(raminit_lock);
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
*/
-static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + priv->regs[index]);
}
-static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
-static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + 2 * priv->regs[index]);
}
-static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
-static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
+static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask,
u32 val)
{
/* We look only at the bits of our instance. */
@@ -80,7 +81,7 @@ static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
udelay(1);
}
-static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
{
u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
u32 ctrl;
@@ -96,18 +97,68 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
writel(ctrl, priv->raminit_ctrlreg);
ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait(priv, ctrl, mask);
+ c_can_hw_raminit_wait_ti(priv, ctrl, mask);
if (enable) {
/* Set start bit and wait for the done bit. */
ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
writel(ctrl, priv->raminit_ctrlreg);
ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait(priv, ctrl, mask);
+ c_can_hw_raminit_wait_ti(priv, ctrl, mask);
}
spin_unlock(&raminit_lock);
}
+static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+ u32 val;
+
+ val = priv->read_reg(priv, index);
+ val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+ return val;
+}
+
+static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+ u32 val)
+{
+ priv->write_reg(priv, index + 1, val >> 16);
+ priv->write_reg(priv, index, val);
+}
+
+static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+ return readl(priv->base + priv->regs[index]);
+}
+
+static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+ u32 val)
+{
+ writel(val, priv->base + priv->regs[index]);
+}
+
+static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask)
+{
+ while (priv->read_reg32(priv, C_CAN_FUNCTION_REG) & mask)
+ udelay(1);
+}
+
+static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+{
+ u32 ctrl;
+
+ ctrl = priv->read_reg32(priv, C_CAN_FUNCTION_REG);
+ ctrl &= ~DCAN_RAM_INIT_BIT;
+ priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+ c_can_hw_raminit_wait(priv, ctrl);
+
+ if (enable) {
+ ctrl |= DCAN_RAM_INIT_BIT;
+ priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+ c_can_hw_raminit_wait(priv, ctrl);
+ }
+}
+
static struct platform_device_id c_can_id_table[] = {
[BOSCH_C_CAN_PLATFORM] = {
.name = KBUILD_MODNAME,
@@ -201,11 +252,15 @@ static int c_can_plat_probe(struct platform_device *pdev)
case IORESOURCE_MEM_32BIT:
priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+ priv->read_reg32 = c_can_plat_read_reg32;
+ priv->write_reg32 = c_can_plat_write_reg32;
break;
case IORESOURCE_MEM_16BIT:
default:
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ priv->read_reg32 = c_can_plat_read_reg32;
+ priv->write_reg32 = c_can_plat_write_reg32;
break;
}
break;
@@ -214,6 +269,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ priv->read_reg32 = d_can_plat_read_reg32;
+ priv->write_reg32 = d_can_plat_write_reg32;
if (pdev->dev.of_node)
priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
@@ -221,11 +278,20 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->instance = pdev->id;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /* Not all D_CAN modules have a separate register for the D_CAN
+ * RAM initialization. Use default RAM init bit in D_CAN module
+ * if not specified in DT.
+ */
+ if (!res) {
+ priv->raminit = c_can_hw_raminit;
+ break;
+ }
+
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
- priv->raminit = c_can_hw_raminit;
+ priv->raminit = c_can_hw_raminit_ti;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index f19be5269e7b..81c711719490 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
config CAN_MSCAN
- depends on PPC || M68K
+ depends on PPC
tristate "Support for Freescale MSCAN based chips"
---help---
The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
new file mode 100644
index 000000000000..5268d216ecfa
--- /dev/null
+++ b/drivers/net/can/rcar_can.c
@@ -0,0 +1,876 @@
+/* Renesas R-Car CAN device driver
+ *
+ * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/can/led.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/can/platform/rcar_can.h>
+
+#define RCAR_CAN_DRV_NAME "rcar_can"
+
+/* Mailbox configuration:
+ * mailbox 60 - 63 - Rx FIFO mailboxes
+ * mailbox 56 - 59 - Tx FIFO mailboxes
+ * non-FIFO mailboxes are not used
+ */
+#define RCAR_CAN_N_MBX 64 /* Number of mailboxes in non-FIFO mode */
+#define RCAR_CAN_RX_FIFO_MBX 60 /* Mailbox - window to Rx FIFO */
+#define RCAR_CAN_TX_FIFO_MBX 56 /* Mailbox - window to Tx FIFO */
+#define RCAR_CAN_FIFO_DEPTH 4
+
+/* Mailbox registers structure */
+struct rcar_can_mbox_regs {
+ u32 id; /* IDE and RTR bits, SID and EID */
+ u8 stub; /* Not used */
+ u8 dlc; /* Data Length Code - bits [0..3] */
+ u8 data[8]; /* Data Bytes */
+ u8 tsh; /* Time Stamp Higher Byte */
+ u8 tsl; /* Time Stamp Lower Byte */
+};
+
+struct rcar_can_regs {
+ struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */
+ u32 mkr_2_9[8]; /* Mask Registers 2-9 */
+ u32 fidcr[2]; /* FIFO Received ID Compare Register */
+ u32 mkivlr1; /* Mask Invalid Register 1 */
+ u32 mier1; /* Mailbox Interrupt Enable Register 1 */
+ u32 mkr_0_1[2]; /* Mask Registers 0-1 */
+ u32 mkivlr0; /* Mask Invalid Register 0*/
+ u32 mier0; /* Mailbox Interrupt Enable Register 0 */
+ u8 pad_440[0x3c0];
+ u8 mctl[64]; /* Message Control Registers */
+ u16 ctlr; /* Control Register */
+ u16 str; /* Status register */
+ u8 bcr[3]; /* Bit Configuration Register */
+ u8 clkr; /* Clock Select Register */
+ u8 rfcr; /* Receive FIFO Control Register */
+ u8 rfpcr; /* Receive FIFO Pointer Control Register */
+ u8 tfcr; /* Transmit FIFO Control Register */
+ u8 tfpcr; /* Transmit FIFO Pointer Control Register */
+ u8 eier; /* Error Interrupt Enable Register */
+ u8 eifr; /* Error Interrupt Factor Judge Register */
+ u8 recr; /* Receive Error Count Register */
+ u8 tecr; /* Transmit Error Count Register */
+ u8 ecsr; /* Error Code Store Register */
+ u8 cssr; /* Channel Search Support Register */
+ u8 mssr; /* Mailbox Search Status Register */
+ u8 msmr; /* Mailbox Search Mode Register */
+ u16 tsr; /* Time Stamp Register */
+ u8 afsr; /* Acceptance Filter Support Register */
+ u8 pad_857;
+ u8 tcr; /* Test Control Register */
+ u8 pad_859[7];
+ u8 ier; /* Interrupt Enable Register */
+ u8 isr; /* Interrupt Status Register */
+ u8 pad_862;
+ u8 mbsmr; /* Mailbox Search Mask Register */
+};
+
+struct rcar_can_priv {
+ struct can_priv can; /* Must be the first member! */
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct rcar_can_regs __iomem *regs;
+ struct clk *clk;
+ u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
+ u32 tx_head;
+ u32 tx_tail;
+ u8 clock_select;
+ u8 ier;
+};
+
+static const struct can_bittiming_const rcar_can_bittiming_const = {
+ .name = RCAR_CAN_DRV_NAME,
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* Control Register bits */
+#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
+ /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM (1 << 10)
+#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
+#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
+#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
+
+/* Status Register bits */
+#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
+
+/* FIFO Received ID Compare Registers 0 and 1 bits */
+#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
+
+/* Receive FIFO Control Register bits */
+#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
+
+/* Transmit FIFO Control Register bits */
+#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
+ /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
+ /* Message Number Status Bits */
+#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
+ /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_N_RX_MKREGS2 8
+
+/* Bit Configuration Register settings */
+#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
+#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
+#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
+#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
+
+/* Mailbox and Mask Registers bits */
+#define RCAR_CAN_IDE (1 << 31)
+#define RCAR_CAN_RTR (1 << 30)
+#define RCAR_CAN_SID_SHIFT 18
+
+/* Mailbox Interrupt Enable Register 1 bits */
+#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
+
+/* Interrupt Enable Register bits */
+#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
+ /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
+ /* Enable Bit */
+/* Interrupt Status Register bits */
+#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
+ /* Status Bit */
+#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
+ /* Status Bit */
+
+/* Error Interrupt Enable Register bits */
+#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
+ /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
+
+/* Error Interrupt Factor Judge Register bits */
+#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
+ /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
+
+/* Error Code Store Register bits */
+#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
+
+#define RCAR_CAN_NAPI_WEIGHT 4
+#define MAX_STR_READS 0x100
+
+static void tx_failure_cleanup(struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++)
+ can_free_echo_skb(ndev, i);
+}
+
+static void rcar_can_error(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 eifr, txerr = 0, rxerr = 0;
+
+ /* Propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ eifr = readb(&priv->regs->eifr);
+ if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
+ txerr = readb(&priv->regs->tecr);
+ rxerr = readb(&priv->regs->recr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ }
+ if (eifr & RCAR_CAN_EIFR_BEIF) {
+ int rx_errors = 0, tx_errors = 0;
+ u8 ecsr;
+
+ netdev_dbg(priv->ndev, "Bus error interrupt:\n");
+ if (skb) {
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ }
+ ecsr = readb(&priv->regs->ecsr);
+ if (ecsr & RCAR_CAN_ECSR_ADEF) {
+ netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
+ }
+ if (ecsr & RCAR_CAN_ECSR_BE0F) {
+ netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
+ if (ecsr & RCAR_CAN_ECSR_BE1F) {
+ netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
+ if (ecsr & RCAR_CAN_ECSR_CEF) {
+ netdev_dbg(priv->ndev, "CRC Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ if (ecsr & RCAR_CAN_ECSR_AEF) {
+ netdev_dbg(priv->ndev, "ACK Error\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+ }
+ if (ecsr & RCAR_CAN_ECSR_FEF) {
+ netdev_dbg(priv->ndev, "Form Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+ if (ecsr & RCAR_CAN_ECSR_SEF) {
+ netdev_dbg(priv->ndev, "Stuff Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+
+ priv->can.can_stats.bus_error++;
+ ndev->stats.rx_errors += rx_errors;
+ ndev->stats.tx_errors += tx_errors;
+ writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+ }
+ if (eifr & RCAR_CAN_EIFR_EWIF) {
+ netdev_dbg(priv->ndev, "Error warning interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+ if (skb)
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ }
+ if (eifr & RCAR_CAN_EIFR_EPIF) {
+ netdev_dbg(priv->ndev, "Error passive interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+ if (skb)
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ if (eifr & RCAR_CAN_EIFR_BOEIF) {
+ netdev_dbg(priv->ndev, "Bus-off entry interrupt\n");
+ tx_failure_cleanup(ndev);
+ priv->ier = RCAR_CAN_IER_ERSIE;
+ writeb(priv->ier, &priv->regs->ier);
+ priv->can.state = CAN_STATE_BUS_OFF;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ }
+ if (eifr & RCAR_CAN_EIFR_ORIF) {
+ netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_errors++;
+ writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+ }
+ if (eifr & RCAR_CAN_EIFR_OLIF) {
+ netdev_dbg(priv->ndev,
+ "Overload Frame Transmission error interrupt\n");
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_errors++;
+ writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+static void rcar_can_tx_done(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u8 isr;
+
+ while (1) {
+ u8 unsent = readb(&priv->regs->tfcr);
+
+ unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
+ RCAR_CAN_TFCR_TFUST_SHIFT;
+ if (priv->tx_head - priv->tx_tail <= unsent)
+ break;
+ stats->tx_packets++;
+ stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
+ RCAR_CAN_FIFO_DEPTH];
+ priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
+ can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+ priv->tx_tail++;
+ netif_wake_queue(ndev);
+ }
+ /* Clear interrupt */
+ isr = readb(&priv->regs->isr);
+ writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+}
+
+static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u8 isr;
+
+ isr = readb(&priv->regs->isr);
+ if (!(isr & priv->ier))
+ return IRQ_NONE;
+
+ if (isr & RCAR_CAN_ISR_ERSF)
+ rcar_can_error(ndev);
+
+ if (isr & RCAR_CAN_ISR_TXFF)
+ rcar_can_tx_done(ndev);
+
+ if (isr & RCAR_CAN_ISR_RXFF) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable Rx FIFO interrupts */
+ priv->ier &= ~RCAR_CAN_IER_RXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rcar_can_set_bittiming(struct net_device *dev)
+{
+ struct rcar_can_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 bcr;
+
+ bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
+ RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
+ RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+ /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
+ * All the registers are big-endian but they get byte-swapped on 32-bit
+ * read/write (but not on 8-bit, contrary to the manuals)...
+ */
+ writel((bcr << 8) | priv->clock_select, &priv->regs->bcr);
+}
+
+static void rcar_can_start(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int i;
+
+ /* Set controller to known mode:
+ * - FIFO mailbox mode
+ * - accept all messages
+ * - overrun mode
+ * CAN is in sleep mode after MCU hardware or software reset.
+ */
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ /* Go to reset mode */
+ ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ writew(ctlr, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+ break;
+ }
+ rcar_can_set_bittiming(ndev);
+ ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
+ ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
+ /* at bus-off */
+ ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
+ ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
+ writew(ctlr, &priv->regs->ctlr);
+
+ /* Accept all SID and EID */
+ writel(0, &priv->regs->mkr_2_9[6]);
+ writel(0, &priv->regs->mkr_2_9[7]);
+ /* In FIFO mailbox mode, write "0" to bits 24 to 31 */
+ writel(0, &priv->regs->mkivlr1);
+ /* Accept all frames */
+ writel(0, &priv->regs->fidcr[0]);
+ writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]);
+ /* Enable and configure FIFO mailbox interrupts */
+ writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1);
+
+ priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE |
+ RCAR_CAN_IER_TXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+
+ /* Accumulate error codes */
+ writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr);
+ /* Enable error interrupts */
+ writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE |
+ (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ?
+ RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE |
+ RCAR_CAN_EIER_OLIE, &priv->regs->eier);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Go to operation mode */
+ writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
+ break;
+ }
+ /* Enable Rx and Tx FIFO */
+ writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr);
+ writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr);
+}
+
+static int rcar_can_open(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev, "clk_prepare_enable() failed, error %d\n",
+ err);
+ goto out;
+ }
+ err = open_candev(ndev);
+ if (err) {
+ netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ goto out_clock;
+ }
+ napi_enable(&priv->napi);
+ err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
+ if (err) {
+ netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+ goto out_close;
+ }
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ rcar_can_start(ndev);
+ netif_start_queue(ndev);
+ return 0;
+out_close:
+ napi_disable(&priv->napi);
+ close_candev(ndev);
+out_clock:
+ clk_disable_unprepare(priv->clk);
+out:
+ return err;
+}
+
+static void rcar_can_stop(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int i;
+
+ /* Go to (force) reset mode */
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ writew(ctlr, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+ break;
+ }
+ writel(0, &priv->regs->mier0);
+ writel(0, &priv->regs->mier1);
+ writeb(0, &priv->regs->ier);
+ writeb(0, &priv->regs->eier);
+ /* Go to sleep mode */
+ ctlr |= RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int rcar_can_close(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ rcar_can_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ napi_disable(&priv->napi);
+ clk_disable_unprepare(priv->clk);
+ close_candev(ndev);
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+ return 0;
+}
+
+static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 data, i;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
+ data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+ else /* Standard frame format */
+ data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+
+ if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
+ data |= RCAR_CAN_RTR;
+ } else {
+ for (i = 0; i < cf->can_dlc; i++)
+ writeb(cf->data[i],
+ &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]);
+ }
+
+ writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id);
+
+ writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
+
+ priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc;
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+ priv->tx_head++;
+ /* Start Tx: write 0xff to the TFPCR register to increment
+ * the CPU-side pointer for the transmit FIFO to the next
+ * mailbox location
+ */
+ writeb(0xff, &priv->regs->tfpcr);
+ /* Stop the queue if we've filled all FIFO entries */
+ if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH)
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops rcar_can_netdev_ops = {
+ .ndo_open = rcar_can_open,
+ .ndo_stop = rcar_can_close,
+ .ndo_start_xmit = rcar_can_start_xmit,
+};
+
+static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 data;
+ u8 dlc;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
+ if (data & RCAR_CAN_IDE)
+ cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+
+ dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
+ cf->can_dlc = get_can_dlc(dlc);
+ if (data & RCAR_CAN_RTR) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ for (dlc = 0; dlc < cf->can_dlc; dlc++)
+ cf->data[dlc] =
+ readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
+ }
+
+ can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+
+ stats->rx_bytes += cf->can_dlc;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+}
+
+static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct rcar_can_priv *priv = container_of(napi,
+ struct rcar_can_priv, napi);
+ int num_pkts;
+
+ for (num_pkts = 0; num_pkts < quota; num_pkts++) {
+ u8 rfcr, isr;
+
+ isr = readb(&priv->regs->isr);
+ /* Clear interrupt bit */
+ if (isr & RCAR_CAN_ISR_RXFF)
+ writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr);
+ rfcr = readb(&priv->regs->rfcr);
+ if (rfcr & RCAR_CAN_RFCR_RFEST)
+ break;
+ rcar_can_rx_pkt(priv);
+ /* Write 0xff to the RFPCR register to increment
+ * the CPU-side pointer for the receive FIFO
+ * to the next mailbox location
+ */
+ writeb(0xff, &priv->regs->rfpcr);
+ }
+ /* All packets processed */
+ if (num_pkts < quota) {
+ napi_complete(napi);
+ priv->ier |= RCAR_CAN_IER_RXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+ }
+ return num_pkts;
+}
+
+static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ rcar_can_start(ndev);
+ netif_wake_queue(ndev);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rcar_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct rcar_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+ bec->txerr = readb(&priv->regs->tecr);
+ bec->rxerr = readb(&priv->regs->recr);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int rcar_can_probe(struct platform_device *pdev)
+{
+ struct rcar_can_platform_data *pdata;
+ struct rcar_can_priv *priv;
+ struct net_device *ndev;
+ struct resource *mem;
+ void __iomem *addr;
+ int err = -ENODEV;
+ int irq;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ goto fail;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ goto fail;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto fail;
+ }
+
+ ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
+ if (!ndev) {
+ dev_err(&pdev->dev, "alloc_candev() failed\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ priv = netdev_priv(ndev);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ err = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "cannot get clock: %d\n", err);
+ goto fail_clk;
+ }
+
+ ndev->netdev_ops = &rcar_can_netdev_ops;
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO;
+ priv->ndev = ndev;
+ priv->regs = addr;
+ priv->clock_select = pdata->clock_select;
+ priv->can.clock.freq = clk_get_rate(priv->clk);
+ priv->can.bittiming_const = &rcar_can_bittiming_const;
+ priv->can.do_set_mode = rcar_can_do_set_mode;
+ priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
+ RCAR_CAN_NAPI_WEIGHT);
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(&pdev->dev, "register_candev() failed, error %d\n",
+ err);
+ goto fail_candev;
+ }
+
+ devm_can_led_init(ndev);
+
+ dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+ priv->regs, ndev->irq);
+
+ return 0;
+fail_candev:
+ netif_napi_del(&priv->napi);
+fail_clk:
+ free_candev(ndev);
+fail:
+ return err;
+}
+
+static int rcar_can_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(ndev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+ return 0;
+}
+
+static int __maybe_unused rcar_can_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+ writew(ctlr, &priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ clk_disable(priv->clk);
+ return 0;
+}
+
+static int __maybe_unused rcar_can_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int err;
+
+ err = clk_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+ return err;
+ }
+
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_CANM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+
+static struct platform_driver rcar_can_driver = {
+ .driver = {
+ .name = RCAR_CAN_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &rcar_can_pm_ops,
+ },
+ .probe = rcar_can_probe,
+ .remove = rcar_can_remove,
+};
+
+module_platform_driver(rcar_can_driver);
+
+MODULE_AUTHOR("Cogent Embedded, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC");
+MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index dcf9196f6316..ea4d4f1a6411 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -52,6 +52,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/skb.h>
@@ -85,6 +86,7 @@ struct slcan {
struct tty_struct *tty; /* ptr to TTY structure */
struct net_device *dev; /* easy for intr handling */
spinlock_t lock;
+ struct work_struct tx_work; /* Flushes transmit buffer */
/* These are pointers to the malloc()ed frame buffers. */
unsigned char rbuff[SLC_MTU]; /* receiver buffer */
@@ -309,36 +311,46 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
sl->dev->stats.tx_bytes += cf->can_dlc;
}
-/*
- * Called by the driver when there's room for more data. If we have
- * more packets to send, we send them here.
- */
-static void slcan_write_wakeup(struct tty_struct *tty)
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slcan_transmit(struct work_struct *work)
{
+ struct slcan *sl = container_of(work, struct slcan, tx_work);
int actual;
- struct slcan *sl = (struct slcan *) tty->disc_data;
+ spin_lock_bh(&sl->lock);
/* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+ if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
+ spin_unlock_bh(&sl->lock);
return;
+ }
- spin_lock_bh(&sl->lock);
if (sl->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->dev->stats.tx_packets++;
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
spin_unlock_bh(&sl->lock);
netif_wake_queue(sl->dev);
return;
}
- actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+ actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
sl->xleft -= actual;
sl->xhead += actual;
spin_unlock_bh(&sl->lock);
}
+/*
+ * Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+ struct slcan *sl = tty->disc_data;
+
+ schedule_work(&sl->tx_work);
+}
+
/* Send a can_frame to a TTY queue. */
static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -528,6 +540,7 @@ static struct slcan *slc_alloc(dev_t line)
sl->magic = SLCAN_MAGIC;
sl->dev = dev;
spin_lock_init(&sl->lock);
+ INIT_WORK(&sl->tx_work, slcan_transmit);
slcan_devs[i] = dev;
return sl;
@@ -626,8 +639,12 @@ static void slcan_close(struct tty_struct *tty)
if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
return;
+ spin_lock_bh(&sl->lock);
tty->disc_data = NULL;
sl->tty = NULL;
+ spin_unlock_bh(&sl->lock);
+
+ flush_work(&sl->tx_work);
/* Flush network side */
unregister_netdev(sl->dev);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 7d8c8f3672dd..bacd236ce306 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -556,15 +556,6 @@ failed:
/*
* netdev sysfs
*/
-static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *ndev = to_net_dev(dev);
- struct softing_priv *priv = netdev2softing(ndev);
-
- return sprintf(buf, "%i\n", priv->index);
-}
-
static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -609,12 +600,10 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
return count;
}
-static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
static const struct attribute *const netdev_sysfs_attrs[] = {
- &dev_attr_channel.attr,
&dev_attr_chip.attr,
&dev_attr_output.attr,
NULL,
@@ -679,17 +668,20 @@ static int softing_netdev_register(struct net_device *netdev)
{
int ret;
- netdev->sysfs_groups[0] = &netdev_sysfs_group;
ret = register_candev(netdev);
if (ret) {
dev_alert(&netdev->dev, "register failed\n");
return ret;
}
+ if (sysfs_create_group(&netdev->dev.kobj, &netdev_sysfs_group) < 0)
+ netdev_alert(netdev, "sysfs group failed\n");
+
return 0;
}
static void softing_netdev_cleanup(struct net_device *netdev)
{
+ sysfs_remove_group(&netdev->dev.kobj, &netdev_sysfs_group);
unregister_candev(netdev);
free_candev(netdev);
}
@@ -721,8 +713,6 @@ DEV_ATTR_RO(firmware_version, id.fw_version);
DEV_ATTR_RO_STR(hardware, pdat->name);
DEV_ATTR_RO(hardware_version, id.hw_version);
DEV_ATTR_RO(license, id.license);
-DEV_ATTR_RO(frequency, id.freq);
-DEV_ATTR_RO(txpending, tx.pending);
static struct attribute *softing_pdev_attrs[] = {
&dev_attr_serial.attr,
@@ -731,8 +721,6 @@ static struct attribute *softing_pdev_attrs[] = {
&dev_attr_hardware.attr,
&dev_attr_hardware_version.attr,
&dev_attr_license.attr,
- &dev_attr_frequency.attr,
- &dev_attr_txpending.attr,
NULL,
};
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
new file mode 100644
index 000000000000..148cae5871a6
--- /dev/null
+++ b/drivers/net/can/spi/Kconfig
@@ -0,0 +1,10 @@
+menu "CAN SPI interfaces"
+ depends on SPI
+
+config CAN_MCP251X
+ tristate "Microchip MCP251x SPI CAN controllers"
+ depends on HAS_DMA
+ ---help---
+ Driver for the Microchip MCP251x SPI CAN controllers.
+
+endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
new file mode 100644
index 000000000000..90bcacffbc65
--- /dev/null
+++ b/drivers/net/can/spi/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux Controller Area Network SPI drivers.
+#
+
+
+obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 28c11f815245..5df239e68812 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -214,6 +214,8 @@
#define TX_ECHO_SKB_MAX 1
+#define MCP251X_OST_DELAY_MS (5)
+
#define DEVICE_NAME "mcp251x"
static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
@@ -624,50 +626,45 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
+ u8 reg;
int ret;
- unsigned long timeout;
+
+ /* Wait for oscillator startup timer after power up */
+ mdelay(MCP251X_OST_DELAY_MS);
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
- ret = spi_write(spi, priv->spi_tx_buf, 1);
- if (ret) {
- dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
- return -EIO;
- }
+ ret = mcp251x_spi_trans(spi, 1);
+ if (ret)
+ return ret;
+
+ /* Wait for oscillator startup timer after reset */
+ mdelay(MCP251X_OST_DELAY_MS);
+
+ reg = mcp251x_read_reg(spi, CANSTAT);
+ if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
+ return -ENODEV;
- /* Wait for reset to finish */
- timeout = jiffies + HZ;
- mdelay(10);
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
- != CANCTRL_REQOP_CONF) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't"
- " enter in conf mode after reset\n");
- return -EBUSY;
- }
- }
return 0;
}
static int mcp251x_hw_probe(struct spi_device *spi)
{
- int st1, st2;
+ u8 ctrl;
+ int ret;
- mcp251x_hw_reset(spi);
+ ret = mcp251x_hw_reset(spi);
+ if (ret)
+ return ret;
- /*
- * Please note that these are "magic values" based on after
- * reset defaults taken from data sheet which allows us to see
- * if we really have a chip on the bus (we avoid common all
- * zeroes or all ones situations)
- */
- st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
- st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
+ ctrl = mcp251x_read_reg(spi, CANCTRL);
+
+ dev_dbg(&spi->dev, "CANCTRL 0x%02x\n", ctrl);
- dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
+ /* Check for power up default value */
+ if ((ctrl & 0x17) != 0x07)
+ return -ENODEV;
- /* Check for power up default values */
- return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
+ return 0;
}
static int mcp251x_power_enable(struct regulator *reg, int enable)
@@ -776,7 +773,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
- mdelay(10);
mcp251x_hw_reset(spi);
mcp251x_setup(net, priv, spi);
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
@@ -955,7 +951,7 @@ static int mcp251x_open(struct net_device *net)
priv->tx_len = 0;
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- flags, DEVICE_NAME, priv);
+ flags | IRQF_ONESHOT, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
mcp251x_power_enable(priv->transceiver, 0);
@@ -1032,8 +1028,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
- int freq, ret = -ENODEV;
struct clk *clk;
+ int freq, ret;
clk = devm_clk_get(&spi->dev, NULL);
if (IS_ERR(clk)) {
@@ -1076,6 +1072,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->net = net;
priv->clk = clk;
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ if (mcp251x_is_2510(spi))
+ spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
+ else
+ spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
+ ret = spi_setup(spi);
+ if (ret)
+ goto out_clk;
+
priv->power = devm_regulator_get(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
@@ -1088,8 +1096,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- spi_set_drvdata(spi, priv);
-
priv->spi = spi;
mutex_init(&priv->mcp_lock);
@@ -1134,20 +1140,11 @@ static int mcp251x_can_probe(struct spi_device *spi)
SET_NETDEV_DEV(net, &spi->dev);
- /* Configure the SPI bus */
- spi->mode = spi->mode ? : SPI_MODE_0;
- if (mcp251x_is_2510(spi))
- spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
- else
- spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
- spi->bits_per_word = 8;
- spi_setup(spi);
-
/* Here is OK to not lock the MCP, no one knows about it yet */
- if (!mcp251x_hw_probe(spi)) {
- ret = -ENODEV;
+ ret = mcp251x_hw_probe(spi);
+ if (ret)
goto error_probe;
- }
+
mcp251x_hw_sleep(spi);
ret = register_candev(net);
@@ -1156,7 +1153,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
- return ret;
+ return 0;
error_probe:
if (mcp251x_enable_dma)
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index fc96a3d83ebe..a77db919363c 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -13,13 +13,21 @@ config CAN_ESD_USB2
This driver supports the CAN-USB/2 interface
from esd electronic system design gmbh (http://www.esd.eu).
+config CAN_GS_USB
+ tristate "Geschwister Schneider UG interfaces"
+ ---help---
+ This driver supports the Geschwister Schneider USB/CAN devices.
+ If unsure choose N,
+ choose Y for built in support,
+ M to compile as module (module will be named: gs_usb).
+
config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
---help---
This driver adds support for Kvaser CAN/USB devices like Kvaser
Leaf Light.
- The driver gives support for the following devices:
+ The driver provides support for the following devices:
- Kvaser Leaf Light
- Kvaser Leaf Professional HS
- Kvaser Leaf SemiPro HS
@@ -36,6 +44,8 @@ config CAN_KVASER_USB
- Kvaser Leaf Light "China"
- Kvaser BlackBird SemiPro
- Kvaser USBcan R
+ - Kvaser Leaf Light v2
+ - Kvaser Mini PCI Express HS
If unsure, say N.
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index becef460a91a..7b9a393b1ac8 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
new file mode 100644
index 000000000000..04b0f84612f0
--- /dev/null
+++ b/drivers/net/can/usb/gs_usb.c
@@ -0,0 +1,971 @@
+/* CAN driver for Geschwister Schneider USB/CAN devices.
+ *
+ * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ *
+ * Many thanks to all socketcan devs!
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+/* Device specific constants */
+#define USB_GSUSB_1_VENDOR_ID 0x1d50
+#define USB_GSUSB_1_PRODUCT_ID 0x606f
+
+#define GSUSB_ENDPOINT_IN 1
+#define GSUSB_ENDPOINT_OUT 2
+
+/* Device specific constants */
+enum gs_usb_breq {
+ GS_USB_BREQ_HOST_FORMAT = 0,
+ GS_USB_BREQ_BITTIMING,
+ GS_USB_BREQ_MODE,
+ GS_USB_BREQ_BERR,
+ GS_USB_BREQ_BT_CONST,
+ GS_USB_BREQ_DEVICE_CONFIG
+};
+
+enum gs_can_mode {
+ /* reset a channel. turns it off */
+ GS_CAN_MODE_RESET = 0,
+ /* starts a channel */
+ GS_CAN_MODE_START
+};
+
+enum gs_can_state {
+ GS_CAN_STATE_ERROR_ACTIVE = 0,
+ GS_CAN_STATE_ERROR_WARNING,
+ GS_CAN_STATE_ERROR_PASSIVE,
+ GS_CAN_STATE_BUS_OFF,
+ GS_CAN_STATE_STOPPED,
+ GS_CAN_STATE_SLEEPING
+};
+
+/* data types passed between host and device */
+struct gs_host_config {
+ u32 byte_order;
+} __packed;
+/* All data exchanged between host and device is exchanged in host byte order,
+ * thanks to the struct gs_host_config byte_order member, which is sent first
+ * to indicate the desired byte order.
+ */
+
+struct gs_device_config {
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ u8 icount;
+ u32 sw_version;
+ u32 hw_version;
+} __packed;
+
+#define GS_CAN_MODE_NORMAL 0
+#define GS_CAN_MODE_LISTEN_ONLY (1<<0)
+#define GS_CAN_MODE_LOOP_BACK (1<<1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2)
+#define GS_CAN_MODE_ONE_SHOT (1<<3)
+
+struct gs_device_mode {
+ u32 mode;
+ u32 flags;
+} __packed;
+
+struct gs_device_state {
+ u32 state;
+ u32 rxerr;
+ u32 txerr;
+} __packed;
+
+struct gs_device_bittiming {
+ u32 prop_seg;
+ u32 phase_seg1;
+ u32 phase_seg2;
+ u32 sjw;
+ u32 brp;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0)
+#define GS_CAN_FEATURE_LOOP_BACK (1<<1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2)
+#define GS_CAN_FEATURE_ONE_SHOT (1<<3)
+
+struct gs_device_bt_const {
+ u32 feature;
+ u32 fclk_can;
+ u32 tseg1_min;
+ u32 tseg1_max;
+ u32 tseg2_min;
+ u32 tseg2_max;
+ u32 sjw_max;
+ u32 brp_min;
+ u32 brp_max;
+ u32 brp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW 1
+
+struct gs_host_frame {
+ u32 echo_id;
+ u32 can_id;
+
+ u8 can_dlc;
+ u8 channel;
+ u8 flags;
+ u8 reserved;
+
+ u8 data[8];
+} __packed;
+/* The GS USB devices make use of the same flags and masks as in
+ * linux/can.h and linux/can/error.h, and no additional mapping is necessary.
+ */
+
+/* Only send a max of GS_MAX_TX_URBS frames per channel at a time. */
+#define GS_MAX_TX_URBS 10
+/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
+#define GS_MAX_RX_URBS 30
+/* Maximum number of interfaces the driver supports per device.
+ * Current hardware only supports 2 interfaces. The future may vary.
+ */
+#define GS_MAX_INTF 2
+
+struct gs_tx_context {
+ struct gs_can *dev;
+ unsigned int echo_id;
+};
+
+struct gs_can {
+ struct can_priv can; /* must be the first member */
+
+ struct gs_usb *parent;
+
+ struct net_device *netdev;
+ struct usb_device *udev;
+ struct usb_interface *iface;
+
+ struct can_bittiming_const bt_const;
+ unsigned int channel; /* channel number */
+
+ /* This lock prevents a race condition between xmit and recieve. */
+ spinlock_t tx_ctx_lock;
+ struct gs_tx_context tx_context[GS_MAX_TX_URBS];
+
+ struct usb_anchor tx_submitted;
+ atomic_t active_tx_urbs;
+};
+
+/* usb interface struct */
+struct gs_usb {
+ struct gs_can *canch[GS_MAX_INTF];
+ struct usb_anchor rx_submitted;
+ atomic_t active_channels;
+ struct usb_device *udev;
+};
+
+/* 'allocate' a tx context.
+ * returns a valid tx context or NULL if there is no space.
+ */
+static struct gs_tx_context *gs_alloc_tx_context(struct gs_can *dev)
+{
+ int i = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+
+ for (; i < GS_MAX_TX_URBS; i++) {
+ if (dev->tx_context[i].echo_id == GS_MAX_TX_URBS) {
+ dev->tx_context[i].echo_id = i;
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ return &dev->tx_context[i];
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ return NULL;
+}
+
+/* releases a tx context
+ */
+static void gs_free_tx_context(struct gs_tx_context *txc)
+{
+ txc->echo_id = GS_MAX_TX_URBS;
+}
+
+/* Get a tx context by id.
+ */
+static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id)
+{
+ unsigned long flags;
+
+ if (id < GS_MAX_TX_URBS) {
+ spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+ if (dev->tx_context[id].echo_id == id) {
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ return &dev->tx_context[id];
+ }
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ }
+ return NULL;
+}
+
+static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
+{
+ struct gs_device_mode *dm;
+ struct usb_interface *intf = gsdev->iface;
+ int rc;
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return -ENOMEM;
+
+ dm->mode = GS_CAN_MODE_RESET;
+
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ gsdev->channel,
+ 0,
+ dm,
+ sizeof(*dm),
+ 1000);
+
+ return rc;
+}
+
+static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
+{
+ struct can_device_stats *can_stats = &dev->can.can_stats;
+
+ if (cf->can_id & CAN_ERR_RESTARTED) {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ can_stats->restarts++;
+ } else if (cf->can_id & CAN_ERR_BUSOFF) {
+ dev->can.state = CAN_STATE_BUS_OFF;
+ can_stats->bus_off++;
+ } else if (cf->can_id & CAN_ERR_CRTL) {
+ if ((cf->data[1] & CAN_ERR_CRTL_TX_WARNING) ||
+ (cf->data[1] & CAN_ERR_CRTL_RX_WARNING)) {
+ dev->can.state = CAN_STATE_ERROR_WARNING;
+ can_stats->error_warning++;
+ } else if ((cf->data[1] & CAN_ERR_CRTL_TX_PASSIVE) ||
+ (cf->data[1] & CAN_ERR_CRTL_RX_PASSIVE)) {
+ dev->can.state = CAN_STATE_ERROR_PASSIVE;
+ can_stats->error_passive++;
+ } else {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+}
+
+static void gs_usb_recieve_bulk_callback(struct urb *urb)
+{
+ struct gs_usb *usbcan = urb->context;
+ struct gs_can *dev;
+ struct net_device *netdev;
+ int rc;
+ struct net_device_stats *stats;
+ struct gs_host_frame *hf = urb->transfer_buffer;
+ struct gs_tx_context *txc;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ BUG_ON(!usbcan);
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default:
+ /* do not resubmit aborted urbs. eg: when device goes down */
+ return;
+ }
+
+ /* device reports out of range channel id */
+ if (hf->channel >= GS_MAX_INTF)
+ goto resubmit_urb;
+
+ dev = usbcan->canch[hf->channel];
+
+ netdev = dev->netdev;
+ stats = &netdev->stats;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (hf->echo_id == -1) { /* normal rx */
+ skb = alloc_can_skb(dev->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id = hf->can_id;
+
+ cf->can_dlc = get_can_dlc(hf->can_dlc);
+ memcpy(cf->data, hf->data, 8);
+
+ /* ERROR frames tell us information about the controller */
+ if (hf->can_id & CAN_ERR_FLAG)
+ gs_update_state(dev, cf);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += hf->can_dlc;
+
+ netif_rx(skb);
+ } else { /* echo_id == hf->echo_id */
+ if (hf->echo_id >= GS_MAX_TX_URBS) {
+ netdev_err(netdev,
+ "Unexpected out of range echo id %d\n",
+ hf->echo_id);
+ goto resubmit_urb;
+ }
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += hf->can_dlc;
+
+ txc = gs_get_tx_context(dev, hf->echo_id);
+
+ /* bad devices send bad echo_ids. */
+ if (!txc) {
+ netdev_err(netdev,
+ "Unexpected unused echo id %d\n",
+ hf->echo_id);
+ goto resubmit_urb;
+ }
+
+ can_get_echo_skb(netdev, hf->echo_id);
+
+ gs_free_tx_context(txc);
+
+ netif_wake_queue(netdev);
+ }
+
+ if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb)
+ goto resubmit_urb;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->can_dlc = CAN_ERR_DLC;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ netif_rx(skb);
+ }
+
+ resubmit_urb:
+ usb_fill_bulk_urb(urb,
+ usbcan->udev,
+ usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
+ hf,
+ sizeof(struct gs_host_frame),
+ gs_usb_recieve_bulk_callback,
+ usbcan
+ );
+
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* USB failure take down all interfaces */
+ if (rc == -ENODEV) {
+ for (rc = 0; rc < GS_MAX_INTF; rc++) {
+ if (usbcan->canch[rc])
+ netif_device_detach(usbcan->canch[rc]->netdev);
+ }
+ }
+}
+
+static int gs_usb_set_bittiming(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.bittiming;
+ struct usb_interface *intf = dev->iface;
+ int rc;
+ struct gs_device_bittiming *dbt;
+
+ dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
+ if (!dbt)
+ return -ENOMEM;
+
+ dbt->prop_seg = bt->prop_seg;
+ dbt->phase_seg1 = bt->phase_seg1;
+ dbt->phase_seg2 = bt->phase_seg2;
+ dbt->sjw = bt->sjw;
+ dbt->brp = bt->brp;
+
+ /* request bit timings */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_BITTIMING,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ dev->channel,
+ 0,
+ dbt,
+ sizeof(*dbt),
+ 1000);
+
+ kfree(dbt);
+
+ if (rc < 0)
+ dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
+ rc);
+
+ return rc;
+}
+
+static void gs_usb_xmit_callback(struct urb *urb)
+{
+ struct gs_tx_context *txc = urb->context;
+ struct gs_can *dev = txc->dev;
+ struct net_device *netdev = dev->netdev;
+
+ if (urb->status)
+ netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
+
+ usb_free_coherent(urb->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+
+ atomic_dec(&dev->active_tx_urbs);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+}
+
+static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct net_device_stats *stats = &dev->netdev->stats;
+ struct urb *urb;
+ struct gs_host_frame *hf;
+ struct can_frame *cf;
+ int rc;
+ unsigned int idx;
+ struct gs_tx_context *txc;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* find an empty context to keep track of transmission */
+ txc = gs_alloc_tx_context(dev);
+ if (!txc)
+ return NETDEV_TX_BUSY;
+
+ /* create a URB, and a buffer for it */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URB\n");
+ goto nomem_urb;
+ }
+
+ hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!hf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ goto nomem_hf;
+ }
+
+ idx = txc->echo_id;
+
+ if (idx >= GS_MAX_TX_URBS) {
+ netdev_err(netdev, "Invalid tx context %d\n", idx);
+ goto badidx;
+ }
+
+ hf->echo_id = idx;
+ hf->channel = dev->channel;
+
+ cf = (struct can_frame *)skb->data;
+
+ hf->can_id = cf->can_id;
+ hf->can_dlc = cf->can_dlc;
+ memcpy(hf->data, cf->data, cf->can_dlc);
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
+ hf,
+ sizeof(*hf),
+ gs_usb_xmit_callback,
+ txc);
+
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, idx);
+
+ atomic_inc(&dev->active_tx_urbs);
+
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(rc)) { /* usb send failed */
+ atomic_dec(&dev->active_tx_urbs);
+
+ can_free_echo_skb(netdev, idx);
+ gs_free_tx_context(txc);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev,
+ sizeof(*hf),
+ hf,
+ urb->transfer_dma);
+
+
+ if (rc == -ENODEV) {
+ netif_device_detach(netdev);
+ } else {
+ netdev_err(netdev, "usb_submit failed (err=%d)\n", rc);
+ stats->tx_dropped++;
+ }
+ } else {
+ /* Slow down tx path */
+ if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS)
+ netif_stop_queue(netdev);
+ }
+
+ /* let usb core take care of this urb */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+ badidx:
+ usb_free_coherent(dev->udev,
+ sizeof(*hf),
+ hf,
+ urb->transfer_dma);
+ nomem_hf:
+ usb_free_urb(urb);
+
+ nomem_urb:
+ gs_free_tx_context(txc);
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static int gs_can_open(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_usb *parent = dev->parent;
+ int rc, i;
+ struct gs_device_mode *dm;
+ u32 ctrlmode;
+
+ rc = open_candev(netdev);
+ if (rc)
+ return rc;
+
+ if (atomic_add_return(1, &parent->active_channels) == 1) {
+ for (i = 0; i < GS_MAX_RX_URBS; i++) {
+ struct urb *urb;
+ u8 *buf;
+
+ /* alloc rx urb */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ netdev_err(netdev,
+ "No memory left for URB\n");
+ return -ENOMEM;
+ }
+
+ /* alloc rx buffer */
+ buf = usb_alloc_coherent(dev->udev,
+ sizeof(struct gs_host_frame),
+ GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buf) {
+ netdev_err(netdev,
+ "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ /* fill, anchor, and submit rx urb */
+ usb_fill_bulk_urb(urb,
+ dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ GSUSB_ENDPOINT_IN),
+ buf,
+ sizeof(struct gs_host_frame),
+ gs_usb_recieve_bulk_callback,
+ parent);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urb, &parent->rx_submitted);
+
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+ if (rc) {
+ if (rc == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ netdev_err(netdev,
+ "usb_submit failed (err=%d)\n",
+ rc);
+
+ usb_unanchor_urb(urb);
+ break;
+ }
+
+ /* Drop reference,
+ * USB core will take care of freeing it
+ */
+ usb_free_urb(urb);
+ }
+ }
+
+ dm = kmalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return -ENOMEM;
+
+ /* flags */
+ ctrlmode = dev->can.ctrlmode;
+ dm->flags = 0;
+
+ if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ dm->flags |= GS_CAN_MODE_LOOP_BACK;
+ else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
+
+ /* Controller is not allowed to retry TX
+ * this mode is unavailable on atmels uc3c hardware
+ */
+ if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ dm->flags |= GS_CAN_MODE_ONE_SHOT;
+
+ if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
+ /* finally start device */
+ dm->mode = GS_CAN_MODE_START;
+ rc = usb_control_msg(interface_to_usbdev(dev->iface),
+ usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ dev->channel,
+ 0,
+ dm,
+ sizeof(*dm),
+ 1000);
+
+ if (rc < 0) {
+ netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
+ kfree(dm);
+ return rc;
+ }
+
+ kfree(dm);
+
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static int gs_can_close(struct net_device *netdev)
+{
+ int rc;
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_usb *parent = dev->parent;
+
+ netif_stop_queue(netdev);
+
+ /* Stop polling */
+ if (atomic_dec_and_test(&parent->active_channels))
+ usb_kill_anchored_urbs(&parent->rx_submitted);
+
+ /* Stop sending URBs */
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+
+ /* reset the device */
+ rc = gs_cmd_reset(parent, dev);
+ if (rc < 0)
+ netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
+
+ /* reset tx contexts */
+ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+ dev->tx_context[rc].dev = dev;
+ dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+ }
+
+ /* close the netdev */
+ close_candev(netdev);
+
+ return 0;
+}
+
+static const struct net_device_ops gs_usb_netdev_ops = {
+ .ndo_open = gs_can_open,
+ .ndo_stop = gs_can_close,
+ .ndo_start_xmit = gs_can_start_xmit,
+};
+
+static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
+{
+ struct gs_can *dev;
+ struct net_device *netdev;
+ int rc;
+ struct gs_device_bt_const *bt_const;
+
+ bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
+ if (!bt_const)
+ return ERR_PTR(-ENOMEM);
+
+ /* fetch bit timing constants */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_BT_CONST,
+ USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ channel,
+ 0,
+ bt_const,
+ sizeof(*bt_const),
+ 1000);
+
+ if (rc < 0) {
+ dev_err(&intf->dev,
+ "Couldn't get bit timing const for channel (err=%d)\n",
+ rc);
+ kfree(bt_const);
+ return ERR_PTR(rc);
+ }
+
+ /* create netdev */
+ netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
+ if (!netdev) {
+ dev_err(&intf->dev, "Couldn't allocate candev\n");
+ kfree(bt_const);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev = netdev_priv(netdev);
+
+ netdev->netdev_ops = &gs_usb_netdev_ops;
+
+ netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+
+ /* dev settup */
+ strcpy(dev->bt_const.name, "gs_usb");
+ dev->bt_const.tseg1_min = bt_const->tseg1_min;
+ dev->bt_const.tseg1_max = bt_const->tseg1_max;
+ dev->bt_const.tseg2_min = bt_const->tseg2_min;
+ dev->bt_const.tseg2_max = bt_const->tseg2_max;
+ dev->bt_const.sjw_max = bt_const->sjw_max;
+ dev->bt_const.brp_min = bt_const->brp_min;
+ dev->bt_const.brp_max = bt_const->brp_max;
+ dev->bt_const.brp_inc = bt_const->brp_inc;
+
+ dev->udev = interface_to_usbdev(intf);
+ dev->iface = intf;
+ dev->netdev = netdev;
+ dev->channel = channel;
+
+ init_usb_anchor(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+ spin_lock_init(&dev->tx_ctx_lock);
+ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+ dev->tx_context[rc].dev = dev;
+ dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+ }
+
+ /* can settup */
+ dev->can.state = CAN_STATE_STOPPED;
+ dev->can.clock.freq = bt_const->fclk_can;
+ dev->can.bittiming_const = &dev->bt_const;
+ dev->can.do_set_bittiming = gs_usb_set_bittiming;
+
+ dev->can.ctrlmode_supported = 0;
+
+ if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+ if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+
+ if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+ if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+
+ kfree(bt_const);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ rc = register_candev(dev->netdev);
+ if (rc) {
+ free_candev(dev->netdev);
+ dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ return dev;
+}
+
+static void gs_destroy_candev(struct gs_can *dev)
+{
+ unregister_candev(dev->netdev);
+ free_candev(dev->netdev);
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+ kfree(dev);
+}
+
+static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct gs_usb *dev;
+ int rc = -ENOMEM;
+ unsigned int icount, i;
+ struct gs_host_config *hconf;
+ struct gs_device_config *dconf;
+
+ hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
+ if (!hconf)
+ return -ENOMEM;
+
+ hconf->byte_order = 0x0000beef;
+
+ /* send host config */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_HOST_FORMAT,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 1,
+ intf->altsetting[0].desc.bInterfaceNumber,
+ hconf,
+ sizeof(*hconf),
+ 1000);
+
+ kfree(hconf);
+
+ if (rc < 0) {
+ dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
+ rc);
+ return rc;
+ }
+
+ dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
+ if (!dconf)
+ return -ENOMEM;
+
+ /* read device config */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_DEVICE_CONFIG,
+ USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 1,
+ intf->altsetting[0].desc.bInterfaceNumber,
+ dconf,
+ sizeof(*dconf),
+ 1000);
+ if (rc < 0) {
+ dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
+ rc);
+
+ kfree(dconf);
+
+ return rc;
+ }
+
+ icount = dconf->icount+1;
+
+ kfree(dconf);
+
+ dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
+
+ if (icount > GS_MAX_INTF) {
+ dev_err(&intf->dev,
+ "Driver cannot handle more that %d CAN interfaces\n",
+ GS_MAX_INTF);
+ return -EINVAL;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ init_usb_anchor(&dev->rx_submitted);
+
+ atomic_set(&dev->active_channels, 0);
+
+ usb_set_intfdata(intf, dev);
+ dev->udev = interface_to_usbdev(intf);
+
+ for (i = 0; i < icount; i++) {
+ dev->canch[i] = gs_make_candev(i, intf);
+ if (IS_ERR_OR_NULL(dev->canch[i])) {
+ /* on failure destroy previously created candevs */
+ icount = i;
+ for (i = 0; i < icount; i++) {
+ gs_destroy_candev(dev->canch[i]);
+ dev->canch[i] = NULL;
+ }
+ kfree(dev);
+ return rc;
+ }
+ dev->canch[i]->parent = dev;
+ }
+
+ return 0;
+}
+
+static void gs_usb_disconnect(struct usb_interface *intf)
+{
+ unsigned i;
+ struct gs_usb *dev = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, NULL);
+
+ if (!dev) {
+ dev_err(&intf->dev, "Disconnect (nodata)\n");
+ return;
+ }
+
+ for (i = 0; i < GS_MAX_INTF; i++) {
+ struct gs_can *can = dev->canch[i];
+
+ if (!can)
+ continue;
+
+ gs_destroy_candev(can);
+ }
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+}
+
+static const struct usb_device_id gs_usb_table[] = {
+ {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gs_usb_table);
+
+static struct usb_driver gs_usb_driver = {
+ .name = "gs_usb",
+ .probe = gs_usb_probe,
+ .disconnect = gs_usb_disconnect,
+ .id_table = gs_usb_table,
+};
+
+module_usb_driver(gs_usb_driver);
+
+MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
+MODULE_DESCRIPTION(
+"Socket CAN device driver for Geschwister Schneider Technologie-, "
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4ca46edc061d..541fb7a05625 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -53,6 +53,8 @@
#define USB_OEM_MERCURY_PRODUCT_ID 34
#define USB_OEM_LEAF_PRODUCT_ID 35
#define USB_CAN_R_PRODUCT_ID 39
+#define USB_LEAF_LITE_V2_PRODUCT_ID 288
+#define USB_MINI_PCIE_HS_PRODUCT_ID 289
/* USB devices features */
#define KVASER_HAS_SILENT_MODE BIT(0)
@@ -356,6 +358,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
.driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -379,38 +383,43 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
void *buf;
int actual_len;
int err;
- int pos = 0;
+ int pos;
+ unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE, &actual_len,
- USB_RECV_TIMEOUT);
- if (err < 0)
- goto end;
+ do {
+ err = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ buf, RX_BUFFER_SIZE, &actual_len,
+ USB_RECV_TIMEOUT);
+ if (err < 0)
+ goto end;
- while (pos <= actual_len - MSG_HEADER_LEN) {
- tmp = buf + pos;
+ pos = 0;
+ while (pos <= actual_len - MSG_HEADER_LEN) {
+ tmp = buf + pos;
- if (!tmp->len)
- break;
+ if (!tmp->len)
+ break;
- if (pos + tmp->len > actual_len) {
- dev_err(dev->udev->dev.parent, "Format error\n");
- break;
- }
+ if (pos + tmp->len > actual_len) {
+ dev_err(dev->udev->dev.parent,
+ "Format error\n");
+ break;
+ }
- if (tmp->id == id) {
- memcpy(msg, tmp, tmp->len);
- goto end;
- }
+ if (tmp->id == id) {
+ memcpy(msg, tmp, tmp->len);
+ goto end;
+ }
- pos += tmp->len;
- }
+ pos += tmp->len;
+ }
+ } while (time_before(jiffies, to));
err = -EINVAL;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
new file mode 100644
index 000000000000..5e8b5609c067
--- /dev/null
+++ b/drivers/net/can/xilinx_can.c
@@ -0,0 +1,1208 @@
+/* Xilinx CAN device driver
+ *
+ * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ * Copyright (C) 2009 PetaLogix. All rights reserved.
+ *
+ * Description:
+ * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+
+#define DRIVER_NAME "xilinx_can"
+
+/* CAN registers set */
+enum xcan_reg {
+ XCAN_SRR_OFFSET = 0x00, /* Software reset */
+ XCAN_MSR_OFFSET = 0x04, /* Mode select */
+ XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
+ XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
+ XCAN_ECR_OFFSET = 0x10, /* Error counter */
+ XCAN_ESR_OFFSET = 0x14, /* Error status */
+ XCAN_SR_OFFSET = 0x18, /* Status */
+ XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
+ XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
+ XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
+ XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
+ XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
+ XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
+ XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
+ XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
+ XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
+ XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
+ XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
+};
+
+/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
+#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
+#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
+#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
+#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
+#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
+#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
+#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
+#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
+#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
+#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
+#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
+#define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
+#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
+#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
+#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
+#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
+#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
+#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
+#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
+#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
+#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
+#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
+#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
+#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
+#define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
+#define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
+#define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
+#define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
+#define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
+#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
+#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
+#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
+#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
+#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
+#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
+#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
+#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
+#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
+
+#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
+ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
+ XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
+ XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+
+/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
+#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
+#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
+#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
+#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
+#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
+#define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
+
+/* CAN frame length constants */
+#define XCAN_FRAME_MAX_DATA_LEN 8
+#define XCAN_TIMEOUT (1 * HZ)
+
+/**
+ * struct xcan_priv - This definition define CAN driver instance
+ * @can: CAN private data structure.
+ * @tx_head: Tx CAN packets ready to send on the queue
+ * @tx_tail: Tx CAN packets successfully sended on the queue
+ * @tx_max: Maximum number packets the driver can send
+ * @napi: NAPI structure
+ * @read_reg: For reading data from CAN registers
+ * @write_reg: For writing data to CAN registers
+ * @dev: Network device data structure
+ * @reg_base: Ioremapped address to registers
+ * @irq_flags: For request_irq()
+ * @bus_clk: Pointer to struct clk
+ * @can_clk: Pointer to struct clk
+ */
+struct xcan_priv {
+ struct can_priv can;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+ unsigned int tx_max;
+ struct napi_struct napi;
+ u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
+ void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
+ u32 val);
+ struct net_device *dev;
+ void __iomem *reg_base;
+ unsigned long irq_flags;
+ struct clk *bus_clk;
+ struct clk *can_clk;
+};
+
+/* CAN Bittiming constants as per Xilinx CAN specs */
+static const struct can_bittiming_const xcan_bittiming_const = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+/**
+ * xcan_write_reg_le - Write a value to the device register little endian
+ * @priv: Driver private data structure
+ * @reg: Register offset
+ * @val: Value to write at the Register offset
+ *
+ * Write data to the paricular CAN register
+ */
+static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
+ u32 val)
+{
+ iowrite32(val, priv->reg_base + reg);
+}
+
+/**
+ * xcan_read_reg_le - Read a value from the device register little endian
+ * @priv: Driver private data structure
+ * @reg: Register offset
+ *
+ * Read data from the particular CAN register
+ * Return: value read from the CAN register
+ */
+static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
+{
+ return ioread32(priv->reg_base + reg);
+}
+
+/**
+ * xcan_write_reg_be - Write a value to the device register big endian
+ * @priv: Driver private data structure
+ * @reg: Register offset
+ * @val: Value to write at the Register offset
+ *
+ * Write data to the paricular CAN register
+ */
+static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
+ u32 val)
+{
+ iowrite32be(val, priv->reg_base + reg);
+}
+
+/**
+ * xcan_read_reg_be - Read a value from the device register big endian
+ * @priv: Driver private data structure
+ * @reg: Register offset
+ *
+ * Read data from the particular CAN register
+ * Return: value read from the CAN register
+ */
+static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
+{
+ return ioread32be(priv->reg_base + reg);
+}
+
+/**
+ * set_reset_mode - Resets the CAN device mode
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver reset mode routine.The driver
+ * enters into configuration mode.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int set_reset_mode(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned long timeout;
+
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+
+ timeout = jiffies + XCAN_TIMEOUT;
+ while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(ndev, "timed out for config mode\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(500, 10000);
+ }
+
+ return 0;
+}
+
+/**
+ * xcan_set_bittiming - CAN set bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver set bittiming routine.
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_set_bittiming(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 btr0, btr1;
+ u32 is_config_mode;
+
+ /* Check whether Xilinx CAN is in configuration mode.
+ * It cannot set bit timing if Xilinx CAN is not in configuration mode.
+ */
+ is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
+ XCAN_SR_CONFIG_MASK;
+ if (!is_config_mode) {
+ netdev_alert(ndev,
+ "BUG! Cannot set bittiming - CAN is not in config mode\n");
+ return -EPERM;
+ }
+
+ /* Setting Baud Rate prescalar value in BRPR Register */
+ btr0 = (bt->brp - 1);
+
+ /* Setting Time Segment 1 in BTR Register */
+ btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
+
+ /* Setting Time Segment 2 in BTR Register */
+ btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
+
+ /* Setting Synchronous jump width in BTR Register */
+ btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
+
+ priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
+ priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
+
+ netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
+ priv->read_reg(priv, XCAN_BRPR_OFFSET),
+ priv->read_reg(priv, XCAN_BTR_OFFSET));
+
+ return 0;
+}
+
+/**
+ * xcan_chip_start - This the drivers start routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers start routine.
+ * Based on the State of the CAN device it puts
+ * the CAN device into a proper mode.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_chip_start(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 err, reg_msr, reg_sr_mask;
+ unsigned long timeout;
+
+ /* Check if it is in reset mode */
+ err = set_reset_mode(ndev);
+ if (err < 0)
+ return err;
+
+ err = xcan_set_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ /* Enable interrupts */
+ priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
+
+ /* Check whether it is loopback mode or normal mode */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ reg_msr = XCAN_MSR_LBACK_MASK;
+ reg_sr_mask = XCAN_SR_LBACK_MASK;
+ } else {
+ reg_msr = 0x0;
+ reg_sr_mask = XCAN_SR_NORMAL_MASK;
+ }
+
+ priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
+
+ timeout = jiffies + XCAN_TIMEOUT;
+ while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(ndev,
+ "timed out for correct mode\n");
+ return -ETIMEDOUT;
+ }
+ }
+ netdev_dbg(ndev, "status:#x%08x\n",
+ priv->read_reg(priv, XCAN_SR_OFFSET));
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+}
+
+/**
+ * xcan_do_set_mode - This sets the mode of the driver
+ * @ndev: Pointer to net_device structure
+ * @mode: Tells the mode of the driver
+ *
+ * This check the drivers state and calls the
+ * the corresponding modes to set.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = xcan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "xcan_chip_start failed!\n");
+ return ret;
+ }
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xcan_start_xmit - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from upper layers to initiate transmission. This
+ * function uses the next available free txbuff and populates their fields to
+ * start the transmission.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 id, dlc, data[2] = {0, 0};
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ /* Check if the TX buffer is full */
+ if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
+ XCAN_SR_TXFLL_MASK)) {
+ netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Watch carefully on the bit sequence */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ /* Extended CAN ID format */
+ id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
+ XCAN_IDR_ID2_MASK;
+ id |= (((cf->can_id & CAN_EFF_MASK) >>
+ (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
+ XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
+
+ /* The substibute remote TX request bit should be "1"
+ * for extended frames as in the Xilinx CAN datasheet
+ */
+ id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ /* Extended frames remote TX request */
+ id |= XCAN_IDR_RTR_MASK;
+ } else {
+ /* Standard CAN ID format */
+ id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
+ XCAN_IDR_ID1_MASK;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ /* Standard frames remote TX request */
+ id |= XCAN_IDR_SRR_MASK;
+ }
+
+ dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
+
+ if (cf->can_dlc > 0)
+ data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
+ if (cf->can_dlc > 4)
+ data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+ priv->tx_head++;
+
+ /* Write the Frame to Xilinx CAN TX FIFO */
+ priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
+ /* If the CAN frame is RTR frame this write triggers tranmission */
+ priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
+ /* If the CAN frame is Standard/Extended frame this
+ * write triggers tranmission
+ */
+ priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
+ stats->tx_bytes += cf->can_dlc;
+ }
+
+ /* Check if the TX buffer is full */
+ if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * xcan_rx - Is called from CAN isr to complete the received
+ * frame processing
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It
+ * does minimal processing and invokes "netif_receive_skb" to complete further
+ * processing.
+ * Return: 1 on success and 0 on failure.
+ */
+static int xcan_rx(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 id_xcan, dlc, data[2] = {0, 0};
+
+ skb = alloc_can_skb(ndev, &cf);
+ if (unlikely(!skb)) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ /* Read a frame from Xilinx zynq CANPS */
+ id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
+ dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
+ XCAN_DLCR_DLC_SHIFT;
+
+ /* Change Xilinx CAN data length format to socketCAN data format */
+ cf->can_dlc = get_can_dlc(dlc);
+
+ /* Change Xilinx CAN ID format to socketCAN ID format */
+ if (id_xcan & XCAN_IDR_IDE_MASK) {
+ /* The received frame is an Extended format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
+ cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
+ XCAN_IDR_ID2_SHIFT;
+ cf->can_id |= CAN_EFF_FLAG;
+ if (id_xcan & XCAN_IDR_RTR_MASK)
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ /* The received frame is a standard format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
+ XCAN_IDR_ID1_SHIFT;
+ if (id_xcan & XCAN_IDR_SRR_MASK)
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ if (!(id_xcan & XCAN_IDR_SRR_MASK)) {
+ data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
+ data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
+
+ /* Change Xilinx CAN data format to socketCAN data format */
+ if (cf->can_dlc > 0)
+ *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
+ if (cf->can_dlc > 4)
+ *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
+ }
+
+ stats->rx_bytes += cf->can_dlc;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
+ * xcan_err_interrupt - error frame Isr
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This is the CAN error interrupt and it will
+ * check the the type of error and forward the error
+ * frame to upper layers.
+ */
+static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 err_status, status, txerr = 0, rxerr = 0;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
+ priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
+ txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
+ rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
+ XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
+ status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+ if (isr & XCAN_IXR_BSOFF_MASK) {
+ priv->can.state = CAN_STATE_BUS_OFF;
+ priv->can.can_stats.bus_off++;
+ /* Leave device in Config Mode in bus-off state */
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ } else if (status & XCAN_SR_ERRWRN_MASK) {
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ }
+
+ /* Check for Arbitration lost interrupt */
+ if (isr & XCAN_IXR_ARBLST_MASK) {
+ priv->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ /* Check for RX FIFO Overflow interrupt */
+ if (isr & XCAN_IXR_RXOFLW_MASK) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+ }
+
+ /* Check for error interrupt */
+ if (isr & XCAN_IXR_ERROR_MASK) {
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ }
+
+ /* Check for Ack error interrupt */
+ if (err_status & XCAN_ESR_ACKER_MASK) {
+ stats->tx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+ }
+
+ /* Check for Bit error interrupt */
+ if (err_status & XCAN_ESR_BERR_MASK) {
+ stats->tx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_BIT;
+ }
+ }
+
+ /* Check for Stuff error interrupt */
+ if (err_status & XCAN_ESR_STER_MASK) {
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_STUFF;
+ }
+ }
+
+ /* Check for Form error interrupt */
+ if (err_status & XCAN_ESR_FMER_MASK) {
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_FORM;
+ }
+ }
+
+ /* Check for CRC error interrupt */
+ if (err_status & XCAN_ESR_CRCER_MASK) {
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ }
+ }
+ priv->can.can_stats.bus_error++;
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+
+ netdev_dbg(ndev, "%s: error status register:0x%x\n",
+ __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
+}
+
+/**
+ * xcan_state_interrupt - It will check the state of the CAN device
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This will checks the state of the CAN device
+ * and puts the device into appropriate state.
+ */
+static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ /* Check for Sleep interrupt if set put CAN device in sleep state */
+ if (isr & XCAN_IXR_SLP_MASK)
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ /* Check for Wake up interrupt if set put CAN device in Active state */
+ if (isr & XCAN_IXR_WKUP_MASK)
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_rx_poll - Poll routine for rx packets (NAPI)
+ * @napi: napi structure pointer
+ * @quota: Max number of rx packets to be processed.
+ *
+ * This is the poll routine for rx part.
+ * It will process the packets maximux quota value.
+ *
+ * Return: number of packets received
+ */
+static int xcan_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 isr, ier;
+ int work_done = 0;
+
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
+ if (isr & XCAN_IXR_RXOK_MASK) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET,
+ XCAN_IXR_RXOK_MASK);
+ work_done += xcan_rx(ndev);
+ } else {
+ priv->write_reg(priv, XCAN_ICR_OFFSET,
+ XCAN_IXR_RXNEMP_MASK);
+ break;
+ }
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ }
+
+ if (work_done)
+ can_led_event(ndev, CAN_LED_EVENT_RX);
+
+ if (work_done < quota) {
+ napi_complete(napi);
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ }
+ return work_done;
+}
+
+/**
+ * xcan_tx_interrupt - Tx Done Isr
+ * @ndev: net_device pointer
+ * @isr: Interrupt status register value
+ */
+static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+
+ while ((priv->tx_head - priv->tx_tail > 0) &&
+ (isr & XCAN_IXR_TXOK_MASK)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ can_get_echo_skb(ndev, priv->tx_tail %
+ priv->tx_max);
+ priv->tx_tail++;
+ stats->tx_packets++;
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ }
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+ netif_wake_queue(ndev);
+}
+
+/**
+ * xcan_interrupt - CAN Isr
+ * @irq: irq number
+ * @dev_id: device id poniter
+ *
+ * This is the xilinx CAN Isr. It checks for the type of interrupt
+ * and invokes the corresponding ISR.
+ *
+ * Return:
+ * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
+ */
+static irqreturn_t xcan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 isr, ier;
+
+ /* Get the interrupt status from Xilinx CAN */
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ if (!isr)
+ return IRQ_NONE;
+
+ /* Check for the type of interrupt and Processing it */
+ if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
+ XCAN_IXR_WKUP_MASK));
+ xcan_state_interrupt(ndev, isr);
+ }
+
+ /* Check for Tx interrupt and Processing it */
+ if (isr & XCAN_IXR_TXOK_MASK)
+ xcan_tx_interrupt(ndev, isr);
+
+ /* Check for the type of error interrupt and Processing it */
+ if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
+ XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
+ XCAN_IXR_ARBLST_MASK));
+ xcan_err_interrupt(ndev, isr);
+ }
+
+ /* Check for the type of receive interrupt and Processing it */
+ if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ napi_schedule(&priv->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xcan_chip_stop - Driver stop routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers stop routine. It will disable the
+ * interrupts and put the device into configuration mode.
+ */
+static void xcan_chip_stop(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 ier;
+
+ /* Disable interrupts and leave the can in configuration mode */
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier &= ~XCAN_INTR_ALL;
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+/**
+ * xcan_open - Driver open routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver open routine.
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_open(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
+ ndev->name, ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "irq allocation for CAN failed\n");
+ goto err;
+ }
+
+ ret = clk_prepare_enable(priv->can_clk);
+ if (ret) {
+ netdev_err(ndev, "unable to enable device clock\n");
+ goto err_irq;
+ }
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret) {
+ netdev_err(ndev, "unable to enable bus clock\n");
+ goto err_can_clk;
+ }
+
+ /* Set chip into reset mode */
+ ret = set_reset_mode(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "mode resetting failed!\n");
+ goto err_bus_clk;
+ }
+
+ /* Common open */
+ ret = open_candev(ndev);
+ if (ret)
+ goto err_bus_clk;
+
+ ret = xcan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "xcan_chip_start failed!\n");
+ goto err_candev;
+ }
+
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_candev:
+ close_candev(ndev);
+err_bus_clk:
+ clk_disable_unprepare(priv->bus_clk);
+err_can_clk:
+ clk_disable_unprepare(priv->can_clk);
+err_irq:
+ free_irq(ndev->irq, ndev);
+err:
+ return ret;
+}
+
+/**
+ * xcan_close - Driver close routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 always
+ */
+static int xcan_close(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ xcan_chip_stop(ndev);
+ clk_disable_unprepare(priv->bus_clk);
+ clk_disable_unprepare(priv->can_clk);
+ free_irq(ndev->irq, ndev);
+ close_candev(ndev);
+
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+
+ return 0;
+}
+
+/**
+ * xcan_get_berr_counter - error counter routine
+ * @ndev: Pointer to net_device structure
+ * @bec: Pointer to can_berr_counter structure
+ *
+ * This is the driver error counter routine.
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->can_clk);
+ if (ret)
+ goto err;
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret)
+ goto err_clk;
+
+ bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
+ bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
+ XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
+
+ clk_disable_unprepare(priv->bus_clk);
+ clk_disable_unprepare(priv->can_clk);
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(priv->can_clk);
+err:
+ return ret;
+}
+
+
+static const struct net_device_ops xcan_netdev_ops = {
+ .ndo_open = xcan_open,
+ .ndo_stop = xcan_close,
+ .ndo_start_xmit = xcan_start_xmit,
+};
+
+/**
+ * xcan_suspend - Suspend method for the driver
+ * @dev: Address of the platform_device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused xcan_suspend(struct device *dev)
+{
+ struct platform_device *pdev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+
+ priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ clk_disable(priv->bus_clk);
+ clk_disable(priv->can_clk);
+
+ return 0;
+}
+
+/**
+ * xcan_resume - Resume from suspend
+ * @dev: Address of the platformdevice structure
+ *
+ * Resume operation after suspend.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused xcan_resume(struct device *dev)
+{
+ struct platform_device *pdev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = clk_enable(priv->bus_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ return ret;
+ }
+ ret = clk_enable(priv->can_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ clk_disable_unprepare(priv->bus_clk);
+ return ret;
+ }
+
+ priv->write_reg(priv, XCAN_MSR_OFFSET, 0);
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume);
+
+/**
+ * xcan_probe - Platform registration call
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_probe(struct platform_device *pdev)
+{
+ struct resource *res; /* IO mem resources */
+ struct net_device *ndev;
+ struct xcan_priv *priv;
+ void __iomem *addr;
+ int ret, rx_max, tx_max;
+
+ /* Get the virtual base address for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+ if (ret < 0)
+ goto err;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
+ if (ret < 0)
+ goto err;
+
+ /* Create a CAN device instance */
+ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->dev = ndev;
+ priv->can.bittiming_const = &xcan_bittiming_const;
+ priv->can.do_set_mode = xcan_do_set_mode;
+ priv->can.do_get_berr_counter = xcan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_BERR_REPORTING;
+ priv->reg_base = addr;
+ priv->tx_max = tx_max;
+
+ /* Get IRQ for the device */
+ ndev->irq = platform_get_irq(pdev, 0);
+ ndev->flags |= IFF_ECHO; /* We support local echo */
+
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &xcan_netdev_ops;
+
+ /* Getting the CAN can_clk info */
+ priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+ if (IS_ERR(priv->can_clk)) {
+ dev_err(&pdev->dev, "Device clock not found.\n");
+ ret = PTR_ERR(priv->can_clk);
+ goto err_free;
+ }
+ /* Check for type of CAN device */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,zynq-can-1.0")) {
+ priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(priv->bus_clk)) {
+ dev_err(&pdev->dev, "bus clock not found\n");
+ ret = PTR_ERR(priv->bus_clk);
+ goto err_free;
+ }
+ } else {
+ priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(priv->bus_clk)) {
+ dev_err(&pdev->dev, "bus clock not found\n");
+ ret = PTR_ERR(priv->bus_clk);
+ goto err_free;
+ }
+ }
+
+ ret = clk_prepare_enable(priv->can_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable device clock\n");
+ goto err_free;
+ }
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable bus clock\n");
+ goto err_unprepare_disable_dev;
+ }
+
+ priv->write_reg = xcan_write_reg_le;
+ priv->read_reg = xcan_read_reg_le;
+
+ if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
+ priv->write_reg = xcan_write_reg_be;
+ priv->read_reg = xcan_read_reg_be;
+ }
+
+ priv->can.clock.freq = clk_get_rate(priv->can_clk);
+
+ netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
+
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
+ goto err_unprepare_disable_busclk;
+ }
+
+ devm_can_led_init(ndev);
+ clk_disable_unprepare(priv->bus_clk);
+ clk_disable_unprepare(priv->can_clk);
+ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+ priv->reg_base, ndev->irq, priv->can.clock.freq,
+ priv->tx_max);
+
+ return 0;
+
+err_unprepare_disable_busclk:
+ clk_disable_unprepare(priv->bus_clk);
+err_unprepare_disable_dev:
+ clk_disable_unprepare(priv->can_clk);
+err_free:
+ free_candev(ndev);
+err:
+ return ret;
+}
+
+/**
+ * xcan_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static int xcan_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ if (set_reset_mode(ndev) < 0)
+ netdev_err(ndev, "mode resetting failed!\n");
+
+ unregister_candev(ndev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+
+ return 0;
+}
+
+/* Match table for OF platform binding */
+static struct of_device_id xcan_of_match[] = {
+ { .compatible = "xlnx,zynq-can-1.0", },
+ { .compatible = "xlnx,axi-can-1.00.a", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
+static struct platform_driver xcan_driver = {
+ .probe = xcan_probe,
+ .remove = xcan_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .pm = &xcan_dev_pm_ops,
+ .of_match_table = xcan_of_match,
+ },
+};
+
+module_platform_driver(xcan_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx CAN interface");
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 41ee5b6ae917..69c42513dd72 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -289,7 +289,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
static int mv88e6123_61_65_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i;
int ret;
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index dadfafba64e9..953bc6a49e59 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -155,7 +155,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = REG_PORT(p);
u16 val;
@@ -274,7 +274,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
static int mv88e6131_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i;
int ret;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 17314ed9456d..9ce2146346b6 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -74,7 +74,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
@@ -118,7 +118,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
@@ -256,7 +256,7 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->ppu_mutex);
@@ -283,7 +283,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
/* Schedule a timer to re-enable the PHY polling unit. */
mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
@@ -292,7 +292,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
mutex_init(&ps->ppu_mutex);
INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
@@ -463,7 +463,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int nr_stats, struct mv88e6xxx_hw_stat *stats,
int port, uint64_t *data)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
int i;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 35df0b9e6848..a968654b631d 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -534,7 +534,7 @@ static int el3_common_init(struct net_device *dev)
/* The EL3-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 063557e037f2..f18647c23559 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -218,7 +218,7 @@ static int tc589_probe(struct pcmcia_device *link)
dev->netdev_ops = &el3_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
return tc589_config(link);
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 465cc7108d8a..e13b04624ded 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2435,7 +2435,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
+ dev->ethtool_ops = &typhoon_ethtool_ops;
/* We can handle scatter gather, up to 16 entries, and
* we can do IP checksumming (only version 4, doh...)
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 455d4c399b52..1d162ccb4733 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -157,7 +157,7 @@ static void ax_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
- if (jiffies - reset_start_time > 2 * HZ / 100) {
+ if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
netdev_warn(dev, "%s: did not complete.\n", __func__);
break;
}
@@ -293,7 +293,7 @@ static void ax_block_output(struct net_device *dev, int count,
dma_start = jiffies;
while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
- if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
+ if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ax_reset_8390(dev);
ax_NS8390_init(dev, 1);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 051349458462..edb718661850 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -68,6 +68,7 @@ source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/hp/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 35190e36c456..58de3339ab3c 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 171d73c1d3c2..40dbbf740331 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -784,7 +784,7 @@ static int starfire_init_one(struct pci_dev *pdev,
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 28460676b8ca..d81e7167a8b5 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -736,6 +736,7 @@ static int emac_open(struct net_device *dev)
ret = emac_mdio_probe(dev);
if (ret < 0) {
+ free_irq(dev->irq, dev);
netdev_err(dev, "cannot probe MDIO bus\n");
return ret;
}
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 1517e9df5ba1..9a6991be9749 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -476,7 +476,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
dev->watchdog_timeo = 5*HZ;
dev->netdev_ops = &ace_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
+ dev->ethtool_ops = &ace_ethtool_ops;
/* we only display this string ONCE */
if (!boards_found)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 99cc56f451cf..580553d42d34 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -353,7 +353,6 @@ static int sgdma_async_read(struct altera_tse_private *priv)
struct sgdma_descrip __iomem *cdesc = &descbase[0];
struct sgdma_descrip __iomem *ndesc = &descbase[1];
-
struct tse_buffer *rxbuffer = NULL;
if (!sgdma_rxbusy(priv)) {
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 54c25eff7952..be72e1e64525 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -271,5 +271,5 @@ static const struct ethtool_ops tse_ethtool_ops = {
void altera_tse_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
+ netdev->ethtool_ops = &tse_ethtool_ops;
}
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 562df46e0a82..bbaf36d9f5e1 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_AMD
default y
depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
- (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA
+ (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -177,4 +177,16 @@ config SUNLANCE
To compile this driver as a module, choose M here: the module
will be called sunlance.
+config AMD_XGBE
+ tristate "AMD 10GbE Ethernet driver"
+ depends on OF_NET
+ select PHYLIB
+ select AMD_XGBE_PHY
+ ---help---
+ This driver supports the AMD 10GbE Ethernet device found on an
+ AMD SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called amd-xgbe.
+
endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index cdd4301a973d..a38a2dce3eb3 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_NI65) += ni65.o
obj-$(CONFIG_PCNET32) += pcnet32.o
obj-$(CONFIG_SUN3LANCE) += sun3lance.o
obj-$(CONFIG_SUNLANCE) += sunlance.o
+obj-$(CONFIG_AMD_XGBE) += xgbe/
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 26efaaa5e73f..068dc7cad5fa 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1900,7 +1900,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Initialize driver entry points */
dev->netdev_ops = &amd8111e_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
dev->irq =pdev->irq;
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index b08101b31b8b..968b7bfac8fc 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -718,7 +718,6 @@ static int ariadne_init_one(struct zorro_dev *z,
unsigned long mem_start = board + ARIADNE_RAM;
struct resource *r1, *r2;
struct net_device *dev;
- struct ariadne_private *priv;
u32 serial;
int err;
@@ -738,8 +737,6 @@ static int ariadne_init_one(struct zorro_dev *z,
return -ENOMEM;
}
- priv = netdev_priv(dev);
-
r1->name = dev->name;
r2->name = dev->name;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index a2bd91e3d302..a78e4c136959 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1229,7 +1229,7 @@ static int au1000_probe(struct platform_device *pdev)
dev->base_addr = base->start;
dev->irq = irq;
dev->netdev_ops = &au1000_netdev_ops;
- SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+ dev->ethtool_ops = &au1000_ethtool_ops;
dev->watchdog_timeo = ETH_TX_TIMEOUT;
/*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 47ce57c2c893..6c9de117ffc6 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -27,9 +27,9 @@
#include "hplance.h"
-/* We have 16834 bytes of RAM for the init block and buffers. This places
+/* We have 16392 bytes of RAM for the init block and buffers. This places
* an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
- * buffers and 2 Tx buffers.
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
*/
#define LANCE_LOG_TX_BUFFERS 1
#define LANCE_LOG_RX_BUFFERS 3
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 0e8399dec054..0660ac5846bb 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -26,9 +26,9 @@
#include <asm/pgtable.h>
#include <asm/mvme147hw.h>
-/* We have 16834 bytes of RAM for the init block and buffers. This places
+/* We have 32K of RAM for the init block and buffers. This places
* an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
- * buffers and 2 Tx buffers.
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
*/
#define LANCE_LOG_TX_BUFFERS 1
#define LANCE_LOG_RX_BUFFERS 3
@@ -111,7 +111,7 @@ struct net_device * __init mvme147lance_probe(int unit)
dev->dev_addr);
lp = netdev_priv(dev);
- lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
+ lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
if (!lp->ram) {
printk("%s: No memory for LANCE buffers\n", dev->name);
free_netdev(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 08569fe2b182..abf3b1581c82 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -457,7 +457,7 @@ static int nmclan_probe(struct pcmcia_device *link)
lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
dev->netdev_ops = &mace_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
return nmclan_config(link);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
new file mode 100644
index 000000000000..26cf9af1642f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
+
+amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
+ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o
+
+amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
new file mode 100644
index 000000000000..bf462ee86f5c
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -0,0 +1,1007 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_COMMON_H__
+#define __XGBE_COMMON_H__
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+#define DMA_DSR2 0x3028
+#define DMA_DSR3 0x302c
+#define DMA_DSR4 0x3030
+
+/* DMA register entry bit positions and sizes */
+#define DMA_AXIARCR_DRC_INDEX 0
+#define DMA_AXIARCR_DRC_WIDTH 4
+#define DMA_AXIARCR_DRD_INDEX 4
+#define DMA_AXIARCR_DRD_WIDTH 2
+#define DMA_AXIARCR_TEC_INDEX 8
+#define DMA_AXIARCR_TEC_WIDTH 4
+#define DMA_AXIARCR_TED_INDEX 12
+#define DMA_AXIARCR_TED_WIDTH 2
+#define DMA_AXIARCR_THC_INDEX 16
+#define DMA_AXIARCR_THC_WIDTH 4
+#define DMA_AXIARCR_THD_INDEX 20
+#define DMA_AXIARCR_THD_WIDTH 2
+#define DMA_AXIAWCR_DWC_INDEX 0
+#define DMA_AXIAWCR_DWC_WIDTH 4
+#define DMA_AXIAWCR_DWD_INDEX 4
+#define DMA_AXIAWCR_DWD_WIDTH 2
+#define DMA_AXIAWCR_RPC_INDEX 8
+#define DMA_AXIAWCR_RPC_WIDTH 4
+#define DMA_AXIAWCR_RPD_INDEX 12
+#define DMA_AXIAWCR_RPD_WIDTH 2
+#define DMA_AXIAWCR_RHC_INDEX 16
+#define DMA_AXIAWCR_RHC_WIDTH 4
+#define DMA_AXIAWCR_RHD_INDEX 20
+#define DMA_AXIAWCR_RHD_WIDTH 2
+#define DMA_AXIAWCR_TDC_INDEX 24
+#define DMA_AXIAWCR_TDC_WIDTH 4
+#define DMA_AXIAWCR_TDD_INDEX 28
+#define DMA_AXIAWCR_TDD_WIDTH 2
+#define DMA_DSR0_RPS_INDEX 8
+#define DMA_DSR0_RPS_WIDTH 4
+#define DMA_DSR0_TPS_INDEX 12
+#define DMA_DSR0_TPS_WIDTH 4
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 15
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 16
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_HTR1 0x0014
+#define MAC_HTR2 0x0018
+#define MAC_HTR3 0x001c
+#define MAC_HTR4 0x0020
+#define MAC_HTR5 0x0024
+#define MAC_HTR6 0x0028
+#define MAC_HTR7 0x002c
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_TCECR 0x10
+#define MTL_Q_TCESR 0x14
+#define MTL_Q_TCQWR 0x18
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x4c
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_TCQWR_QW_INDEX 0
+#define MTL_Q_TCQWR_QW_WIDTH 21
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RFA_INDEX 8
+#define MTL_Q_RQOMR_RFA_WIDTH 3
+#define MTL_Q_RQOMR_RFD_INDEX 13
+#define MTL_Q_RQOMR_RFD_WIDTH 3
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+
+/* PCS MMD select register offset
+ * The MMD select register is used for accessing PCS registers
+ * when the underlying APB3 interface is using indirect addressing.
+ * Indirect addressing requires accessing registers in two phases,
+ * an address phase and a data phase. The address phases requires
+ * writing an address selection value to the MMD select regiesters.
+ */
+#define PCS_MMD_SELECT 0xff
+
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \
+ (_var) |= cpu_to_le32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define XGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XGMAC_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ ioread32((_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define XGMAC_DMA_IOREAD(_channel, _reg) \
+ ioread32((_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ iowrite32((_val), (_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_IOWRITE(_pdata, _off, _val) \
+ iowrite32(_val, (_pdata)->xpcs_regs + (_off))
+
+#define XPCS_IOREAD(_pdata, _off) \
+ ioread32((_pdata)->xpcs_regs + (_off))
+
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
+ mmd_val &= ~_mask; \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
+} while (0)
+
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
new file mode 100644
index 000000000000..6bb76d5c817b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -0,0 +1,375 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static ssize_t xgbe_common_read(char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int value)
+{
+ char *buf;
+ ssize_t len;
+
+ if (*ppos != 0)
+ return 0;
+
+ buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return len;
+}
+
+static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int *value)
+{
+ char workarea[32];
+ ssize_t len;
+ unsigned int scan_value;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count >= sizeof(workarea))
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
+ buffer, count);
+ if (len < 0)
+ return len;
+
+ workarea[len] = '\0';
+ if (sscanf(workarea, "%x", &scan_value) == 1)
+ *value = scan_value;
+ else
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xgmac_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xgmac_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_addr_read,
+ .write = xgmac_reg_addr_write,
+};
+
+static const struct file_operations xgmac_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_value_read,
+ .write = xgmac_reg_value_write,
+};
+
+static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xpcs_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xpcs_mmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_mmd_read,
+ .write = xpcs_mmd_write,
+};
+
+static const struct file_operations xpcs_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_addr_read,
+ .write = xpcs_reg_addr_write,
+};
+
+static const struct file_operations xpcs_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_value_read,
+ .write = xpcs_reg_value_write,
+};
+
+void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
+{
+ struct dentry *pfile;
+ char *buf;
+
+ /* Set defaults */
+ pdata->debugfs_xgmac_reg = 0;
+ pdata->debugfs_xpcs_mmd = 1;
+ pdata->debugfs_xpcs_reg = 0;
+
+ buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+ pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
+ if (pdata->xgbe_debugfs == NULL) {
+ netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+ return;
+ }
+
+ pfile = debugfs_create_file("xgmac_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xgmac_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_mmd", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ kfree(buf);
+}
+
+void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
+{
+ debugfs_remove_recursive(pdata->xgbe_debugfs);
+ pdata->xgbe_debugfs = NULL;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
new file mode 100644
index 000000000000..6f1c85956d50
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -0,0 +1,556 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
+
+static void xgbe_free_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_ring_data *rdata;
+ unsigned int i;
+
+ if (!ring)
+ return;
+
+ if (ring->rdata) {
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = GET_DESC_DATA(ring, i);
+ xgbe_unmap_skb(pdata, rdata);
+ }
+
+ kfree(ring->rdata);
+ ring->rdata = NULL;
+ }
+
+ if (ring->rdesc) {
+ dma_free_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ ring->rdesc_count),
+ ring->rdesc, ring->rdesc_dma);
+ ring->rdesc = NULL;
+ }
+}
+
+static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_free_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ xgbe_free_ring(pdata, channel->tx_ring);
+ xgbe_free_ring(pdata, channel->rx_ring);
+ }
+
+ DBGPR("<--xgbe_free_ring_resources\n");
+}
+
+static int xgbe_init_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, unsigned int rdesc_count)
+{
+ DBGPR("-->xgbe_init_ring\n");
+
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
+ ring->rdesc_count = rdesc_count;
+ ring->rdesc = dma_alloc_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ rdesc_count), &ring->rdesc_dma,
+ GFP_KERNEL);
+ if (!ring->rdesc)
+ return -ENOMEM;
+
+ /* Descriptor information */
+ ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
+ GFP_KERNEL);
+ if (!ring->rdata)
+ return -ENOMEM;
+
+ DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
+ ring->rdesc, ring->rdesc_dma, ring->rdata);
+
+ DBGPR("<--xgbe_init_ring\n");
+
+ return 0;
+}
+
+static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+ int ret;
+
+ DBGPR("-->xgbe_alloc_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ DBGPR(" %s - tx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->tx_ring,
+ pdata->tx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+
+ DBGPR(" %s - rx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->rx_ring,
+ pdata->rx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+ }
+
+ DBGPR("<--xgbe_alloc_ring_resources\n");
+
+ return 0;
+
+err_ring:
+ xgbe_free_ring_resources(pdata);
+
+ return ret;
+}
+
+static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ ring->tx.queue_stopped = 0;
+
+ hw_if->tx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
+}
+
+static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_ring_data *rdata;
+ dma_addr_t rdesc_dma, skb_dma;
+ struct sk_buff *skb = NULL;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ /* Allocate skb & assign to each rdesc */
+ skb = dev_alloc_skb(pdata->rx_buf_size);
+ if (skb == NULL)
+ break;
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ pdata->rx_buf_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "failed to do the dma map\n");
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rdata->skb = skb;
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = pdata->rx_buf_size;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ ring->rx.realloc_index = 0;
+ ring->rx.realloc_threshold = 0;
+
+ hw_if->rx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
+}
+
+static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata)
+{
+ if (rdata->skb_dma) {
+ if (rdata->mapped_as_page) {
+ dma_unmap_page(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ }
+ rdata->skb_dma = 0;
+ rdata->skb_dma_len = 0;
+ }
+
+ if (rdata->skb) {
+ dev_kfree_skb_any(rdata->skb);
+ rdata->skb = NULL;
+ }
+
+ rdata->tso_header = 0;
+ rdata->len = 0;
+ rdata->interrupt = 0;
+ rdata->mapped_as_page = 0;
+}
+
+static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct skb_frag_struct *frag;
+ dma_addr_t skb_dma;
+ unsigned int start_index, cur_index;
+ unsigned int offset, tso, vlan, datalen, len;
+ unsigned int i;
+
+ DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
+
+ offset = 0;
+ start_index = ring->cur;
+ cur_index = ring->cur;
+
+ packet = &ring->packet_data;
+ packet->rdesc_count = 0;
+ packet->length = 0;
+
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ /* Save space for a context descriptor if needed */
+ if ((tso && (packet->mss != ring->tx.cur_mss)) ||
+ (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+
+ if (tso) {
+ DBGPR(" TSO packet\n");
+
+ /* Map the TSO header */
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ packet->header_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = packet->header_len;
+ rdata->tso_header = 1;
+
+ offset = packet->header_len;
+
+ packet->length += packet->header_len;
+
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+ }
+
+ /* Map the (remainder of the) packet */
+ for (datalen = skb_headlen(skb) - offset; datalen; ) {
+ len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+
+ skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ DBGPR(" mapping frag %u\n", i);
+
+ frag = &skb_shinfo(skb)->frags[i];
+ offset = 0;
+
+ for (datalen = skb_frag_size(frag); datalen; ) {
+ len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+
+ skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "skb_frag_dma_map failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ rdata->mapped_as_page = 1;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+ }
+ }
+
+ /* Save the skb address in the last entry */
+ rdata->skb = skb;
+
+ /* Save the number of descriptor entries used */
+ packet->rdesc_count = cur_index - start_index;
+
+ DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
+
+ return packet->rdesc_count;
+
+err_out:
+ while (start_index < cur_index) {
+ rdata = GET_DESC_DATA(ring, start_index++);
+ xgbe_unmap_skb(pdata, rdata);
+ }
+
+ DBGPR("<--xgbe_map_tx_skb: count=0\n");
+
+ return 0;
+}
+
+static void xgbe_realloc_skb(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct sk_buff *skb = NULL;
+ dma_addr_t skb_dma;
+ int i;
+
+ DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
+ ring->rx.realloc_index);
+
+ for (i = 0; i < ring->dirty; i++) {
+ rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
+
+ /* Reset rdata values */
+ xgbe_unmap_skb(pdata, rdata);
+
+ /* Allocate skb & assign to each rdesc */
+ skb = dev_alloc_skb(pdata->rx_buf_size);
+ if (skb == NULL) {
+ netdev_alert(pdata->netdev,
+ "failed to allocate skb\n");
+ break;
+ }
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ pdata->rx_buf_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "failed to do the dma map\n");
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rdata->skb = skb;
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = pdata->rx_buf_size;
+
+ hw_if->rx_desc_reset(rdata);
+
+ ring->rx.realloc_index++;
+ }
+ ring->dirty = 0;
+
+ DBGPR("<--xgbe_realloc_skb\n");
+}
+
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs_desc\n");
+
+ desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
+ desc_if->free_ring_resources = xgbe_free_ring_resources;
+ desc_if->map_tx_skb = xgbe_map_tx_skb;
+ desc_if->realloc_skb = xgbe_realloc_skb;
+ desc_if->unmap_skb = xgbe_unmap_skb;
+ desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
+ desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
+
+ DBGPR("<--xgbe_init_function_ptrs_desc\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
new file mode 100644
index 000000000000..002293b0819d
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -0,0 +1,2182 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/phy.h>
+#include <linux/clk.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
+ unsigned int usec)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_usec_to_riwt\n");
+
+ rate = clk_get_rate(pdata->sysclock);
+
+ /*
+ * Convert the input usec value to the watchdog timer value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( usec * ( system_clock_mhz / 10^6 ) / 256
+ */
+ ret = (usec * (rate / 1000000)) / 256;
+
+ DBGPR("<--xgbe_usec_to_riwt\n");
+
+ return ret;
+}
+
+static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
+ unsigned int riwt)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_riwt_to_usec\n");
+
+ rate = clk_get_rate(pdata->sysclock);
+
+ /*
+ * Convert the input watchdog timer value to the usec value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+ */
+ ret = (riwt * 256) / (rate / 1000000);
+
+ DBGPR("<--xgbe_riwt_to_usec\n");
+
+ return ret;
+}
+
+static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+
+ return 0;
+}
+
+static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
+}
+
+static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
+}
+
+static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
+ pdata->rx_riwt);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ return 0;
+}
+
+static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
+ pdata->rx_buf_size);
+ }
+}
+
+static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
+ }
+}
+
+static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
+
+ /* Set MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ if (pdata->tx_pause)
+ xgbe_enable_tx_flow_control(pdata);
+ else
+ xgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ if (pdata->rx_pause)
+ xgbe_enable_rx_flow_control(pdata);
+ else
+ xgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
+{
+ xgbe_config_tx_flow_control(pdata);
+ xgbe_config_rx_flow_control(pdata);
+}
+
+static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless polling)
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
+ }
+}
+
+static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
+{
+ /* No MAC interrupts to be enabled */
+ XGMAC_IOWRITE(pdata, MAC_IER, 0);
+
+ /* Enable all counter interrupts */
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
+}
+
+static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
+
+ return 0;
+}
+
+static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
+
+ return 0;
+}
+
+static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
+ return 0;
+
+ DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
+
+ return 0;
+}
+
+static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
+ return 0;
+
+ DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
+
+ return 0;
+}
+
+static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
+ unsigned int am_mode)
+{
+ struct netdev_hw_addr *ha;
+ unsigned int mac_reg;
+ unsigned int mac_addr_hi, mac_addr_lo;
+ u8 *mac_addr;
+ unsigned int i;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
+
+ i = 0;
+ mac_reg = MAC_MACA1HR;
+
+ netdev_for_each_uc_addr(ha, pdata->netdev) {
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ DBGPR(" adding unicast address %pM at 0x%04x\n",
+ ha->addr, mac_reg);
+
+ XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
+ mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
+ mac_reg += MAC_MACA_INC;
+
+ i++;
+ }
+
+ if (!am_mode) {
+ netdev_for_each_mc_addr(ha, pdata->netdev) {
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ DBGPR(" adding multicast address %pM at 0x%04x\n",
+ ha->addr, mac_reg);
+
+ XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
+ mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
+ mac_reg += MAC_MACA_INC;
+
+ i++;
+ }
+ }
+
+ /* Clear remaining additional MAC address entries */
+ for (; i < pdata->hw_feat.addn_mac; i++) {
+ XGMAC_IOWRITE(pdata, mac_reg, 0);
+ mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, mac_reg, 0);
+ mac_reg += MAC_MACA_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
+ mutex_unlock(&pdata->xpcs_mutex);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
+ mutex_unlock(&pdata->xpcs_mutex);
+}
+
+static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
+{
+ return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
+}
+
+static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ /* Put the VLAN tag in the Rx descriptor */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
+
+ /* Don't check the VLAN type */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
+
+ /* Check only C-TAG (0x8100) packets */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
+
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
+
+ /* Enable VLAN tag stripping */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
+
+ return 0;
+}
+
+static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+ */
+ rdesc->desc0 = 0;
+ rdesc->desc1 = 0;
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+}
+
+static void xgbe_tx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ int i;
+ int start_index = ring->cur;
+
+ DBGPR("-->tx_desc_init\n");
+
+ /* Initialze all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = GET_DESC_DATA(ring, i);
+ rdesc = rdata->rdesc;
+
+ /* Initialize Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
+ * etc)
+ */
+ rdesc->desc0 = 0;
+ rdesc->desc1 = 0;
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+ }
+
+ /* Make sure everything is written to the descriptor(s) before
+ * telling the device about them
+ */
+ wmb();
+
+ /* Update the total number of Tx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--tx_desc_init\n");
+}
+
+static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to dma address (lo)
+ * Set buffer 1 (hi) address to dma address (hi)
+ * Set buffer 2 (lo) address to zero
+ * Set buffer 2 (hi) address to zero and set control bits
+ * OWN and INTE
+ */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc2 = 0;
+
+ rdesc->desc3 = 0;
+ if (rdata->interrupt)
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+ wmb();
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
+}
+
+static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ unsigned int start_index = ring->cur;
+ unsigned int rx_coalesce, rx_frames;
+ unsigned int i;
+
+ DBGPR("-->rx_desc_init\n");
+
+ rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
+ rx_frames = pdata->rx_frames;
+
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = GET_DESC_DATA(ring, i);
+ rdesc = rdata->rdesc;
+
+ /* Initialize Rx descriptor
+ * Set buffer 1 (lo) address to dma address (lo)
+ * Set buffer 1 (hi) address to dma address (hi)
+ * Set buffer 2 (lo) address to zero
+ * Set buffer 2 (hi) address to zero and set control
+ * bits OWN and INTE appropriateley
+ */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+ rdata->interrupt = 1;
+ if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
+ /* Clear interrupt on completion bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+ 0);
+ rdata->interrupt = 0;
+ }
+ }
+
+ /* Make sure everything is written to the descriptors before
+ * telling the device about them
+ */
+ wmb();
+
+ /* Update the total number of Rx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Update the Rx Descriptor Tail Pointer */
+ rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--rx_desc_init\n");
+}
+
+static void xgbe_pre_xmit(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int csum, tso, vlan;
+ unsigned int tso_context, vlan_context;
+ unsigned int tx_coalesce, tx_frames;
+ int start_index = ring->cur;
+ int i;
+
+ DBGPR("-->xgbe_pre_xmit\n");
+
+ csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE);
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ if (tso && (packet->mss != ring->tx.cur_mss))
+ tso_context = 1;
+ else
+ tso_context = 0;
+
+ if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
+ vlan_context = 1;
+ else
+ vlan_context = 0;
+
+ tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
+ tx_frames = pdata->tx_frames;
+ if (tx_coalesce && !channel->tx_timer_active)
+ ring->coalesce_count = 0;
+
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Create a context descriptor if this is a TSO packet */
+ if (tso_context || vlan_context) {
+ if (tso_context) {
+ DBGPR(" TSO context descriptor, mss=%u\n",
+ packet->mss);
+
+ /* Set the MSS size */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
+ MSS, packet->mss);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Indicate this descriptor contains the MSS */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ TCMSSV, 1);
+
+ ring->tx.cur_mss = packet->mss;
+ }
+
+ if (vlan_context) {
+ DBGPR(" VLAN context descriptor, ctag=%u\n",
+ packet->vlan_ctag);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Set the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VT, packet->vlan_ctag);
+
+ /* Indicate this descriptor contains the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+
+ ring->tx.cur_vlan_ctag = packet->vlan_ctag;
+ }
+
+ ring->cur++;
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+ }
+
+ /* Update buffer address (for TSO this is the header) */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* VLAN tag insertion check */
+ if (vlan)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+
+ /* Set IC bit based on Tx coalescing settings */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+ if (tx_coalesce && (!tx_frames ||
+ (++ring->coalesce_count % tx_frames)))
+ /* Clear IC bit */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
+
+ /* Mark it as First Descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
+
+ /* Mark it as a NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Set OWN bit if not the first descriptor */
+ if (ring->cur != start_index)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ if (tso) {
+ /* Enable TSO */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
+ packet->tcp_payload_len);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
+ packet->tcp_header_len / 4);
+ } else {
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+
+ /* Set the total length to be transmitted */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
+ packet->length);
+ }
+
+ for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
+ ring->cur++;
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Update buffer address */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* Set IC bit based on Tx coalescing settings */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+ if (tx_coalesce && (!tx_frames ||
+ (++ring->coalesce_count % tx_frames)))
+ /* Clear IC bit */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
+
+ /* Set OWN bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+ }
+
+ /* Set LAST bit for the last descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+ wmb();
+
+ /* Set OWN bit for the first descriptor */
+ rdata = GET_DESC_DATA(ring, start_index);
+ rdesc = rdata->rdesc;
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
+#endif
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ ring->cur++;
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Start the Tx coalescing timer */
+ if (tx_coalesce && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ hrtimer_start(&channel->tx_timer,
+ ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+ }
+
+ DBGPR(" %s: descriptors %u to %u written\n",
+ channel->name, start_index & (ring->rdesc_count - 1),
+ (ring->cur - 1) & (ring->rdesc_count - 1));
+
+ DBGPR("<--xgbe_pre_xmit\n");
+}
+
+static int xgbe_dev_read(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int err, etlt;
+
+ DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
+
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Check for data availability */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
+ return 1;
+
+#ifdef XGMAC_ENABLE_RX_DESC_DUMP
+ xgbe_dump_rx_desc(ring, rdesc, ring->cur);
+#endif
+
+ /* Get the packet length */
+ rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+
+ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
+ /* Not all the data has been transferred for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 1);
+ return 0;
+ }
+
+ /* This is the last of the data for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 0);
+
+ /* Set checksum done indicator as appropriate */
+ if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 1);
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
+ etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
+ DBGPR(" err=%u, etlt=%#x\n", err, etlt);
+
+ if (!err || (err && !etlt)) {
+ if (etlt == 0x09) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
+ RX_NORMAL_DESC0,
+ OVT);
+ DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
+ }
+ } else {
+ if ((etlt == 0x05) || (etlt == 0x06))
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+ else
+ XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
+ FRAME, 1);
+ }
+
+ DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->rdesc_count - 1), ring->cur);
+
+ return 0;
+}
+
+static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
+}
+
+static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share LD bit, so check TDES3.LD bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
+}
+
+static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
+ enum xgbe_int_state int_state)
+{
+ unsigned int dma_ch_ier;
+
+ if (int_state == XGMAC_INT_STATE_SAVE) {
+ channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ channel->saved_ier &= DMA_INTERRUPT_MASK;
+ } else {
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ dma_ch_ier |= channel->saved_ier;
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static int xgbe_enable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ switch (int_id) {
+ case XGMAC_INT_DMA_ISR_DC0IS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xgbe_disable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ switch (int_id) {
+ case XGMAC_INT_DMA_ISR_DC0IS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
+
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ dma_ch_ier &= ~DMA_INTERRUPT_MASK;
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xgbe_exit(struct xgbe_prv_data *pdata)
+{
+ unsigned int count = 2000;
+
+ DBGPR("-->xgbe_exit\n");
+
+ /* Issue a software reset */
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ usleep_range(10, 15);
+
+ /* Poll Until Poll Condition */
+ while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+
+ DBGPR("<--xgbe_exit\n");
+
+ return 0;
+}
+
+static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
+{
+ unsigned int i, count;
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
+ count = 2000;
+ while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+{
+ /* Set enhanced addressing mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+ /* Set the System Bus mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+}
+
+static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+{
+ unsigned int arcache, awcache;
+
+ arcache = 0;
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+ awcache = 0;
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+}
+
+static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm (when
+ * traffic class is using ETS algorithm)
+ */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to strict priority algorithm */
+ for (i = 0; i < XGBE_TC_CNT; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
+
+ /* Set Rx to strict priority algorithm */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
+ unsigned char queue_count)
+{
+ unsigned int q_fifo_size = 0;
+ enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ /* Calculate Tx/Rx fifo share per queue */
+ switch (fifo_size) {
+ case 0:
+ q_fifo_size = FIFO_SIZE_B(128);
+ break;
+ case 1:
+ q_fifo_size = FIFO_SIZE_B(256);
+ break;
+ case 2:
+ q_fifo_size = FIFO_SIZE_B(512);
+ break;
+ case 3:
+ q_fifo_size = FIFO_SIZE_KB(1);
+ break;
+ case 4:
+ q_fifo_size = FIFO_SIZE_KB(2);
+ break;
+ case 5:
+ q_fifo_size = FIFO_SIZE_KB(4);
+ break;
+ case 6:
+ q_fifo_size = FIFO_SIZE_KB(8);
+ break;
+ case 7:
+ q_fifo_size = FIFO_SIZE_KB(16);
+ break;
+ case 8:
+ q_fifo_size = FIFO_SIZE_KB(32);
+ break;
+ case 9:
+ q_fifo_size = FIFO_SIZE_KB(64);
+ break;
+ case 10:
+ q_fifo_size = FIFO_SIZE_KB(128);
+ break;
+ case 11:
+ q_fifo_size = FIFO_SIZE_KB(256);
+ break;
+ }
+ q_fifo_size = q_fifo_size / queue_count;
+
+ /* Set the queue fifo size programmable value */
+ if (q_fifo_size >= FIFO_SIZE_KB(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(128))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(64))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(32))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(16))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(8))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(4))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(2))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(1))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
+ else if (q_fifo_size >= FIFO_SIZE_B(512))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_512;
+ else if (q_fifo_size >= FIFO_SIZE_B(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ return p_fifo;
+}
+
+static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
+ pdata->hw_feat.tx_q_cnt);
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
+
+ netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
+ pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
+ pdata->hw_feat.rx_q_cnt);
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
+
+ netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
+ pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
+{
+ unsigned int i, reg, reg_val;
+ unsigned int q_count = pdata->hw_feat.rx_q_cnt;
+
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+}
+
+static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
+ /* Activate flow control when less than 4k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+
+ /* De-activate flow control when more than 6k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+ }
+}
+
+static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
+}
+
+static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+ xgbe_enable_rx_csum(pdata);
+ else
+ xgbe_disable_rx_csum(pdata);
+}
+
+static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ xgbe_enable_rx_vlan_stripping(pdata);
+ else
+ xgbe_disable_rx_vlan_stripping(pdata);
+}
+
+static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
+ stats->txoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
+ stats->txframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
+ stats->txmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
+ stats->tx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
+ stats->tx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
+ stats->tx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
+ stats->tx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
+ stats->tx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
+ stats->tx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
+ stats->txunicastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
+ stats->txmulticastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
+ stats->txunderflowerror +=
+ XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
+ stats->txoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
+ stats->txframecount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
+ stats->txpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
+ stats->txvlanframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
+ stats->rxframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
+ stats->rxoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
+ stats->rxoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
+ stats->rxbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
+ stats->rxmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
+ stats->rxcrcerror +=
+ XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
+ stats->rxrunterror +=
+ XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
+ stats->rxjabbererror +=
+ XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
+ stats->rxundersize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
+ stats->rxoversize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
+ stats->rx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
+ stats->rx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
+ stats->rx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
+ stats->rx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
+ stats->rx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
+ stats->rx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
+ stats->rxunicastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
+ stats->rxlengtherror +=
+ XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
+ stats->rxoutofrangetype +=
+ XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
+ stats->rxpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
+ stats->rxfifooverflow +=
+ XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
+ stats->rxvlanframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
+ stats->rxwatchdogerror +=
+ XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+
+ /* Freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
+
+ stats->txoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ stats->txframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ stats->txmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ stats->tx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+
+ stats->tx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ stats->tx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ stats->tx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ stats->tx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ stats->tx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ stats->txunicastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ stats->txmulticastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ stats->txunderflowerror +=
+ XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ stats->txoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ stats->txframecount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ stats->txpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ stats->txvlanframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+
+ stats->rxframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ stats->rxoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ stats->rxoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ stats->rxbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ stats->rxmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ stats->rxcrcerror +=
+ XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+
+ stats->rxrunterror +=
+ XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+
+ stats->rxjabbererror +=
+ XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+
+ stats->rxundersize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+
+ stats->rxoversize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+
+ stats->rx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+
+ stats->rx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ stats->rx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ stats->rx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ stats->rx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ stats->rx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ stats->rxunicastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ stats->rxlengtherror +=
+ XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+
+ stats->rxoutofrangetype +=
+ XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ stats->rxpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ stats->rxfifooverflow +=
+ XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ stats->rxvlanframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ stats->rxwatchdogerror +=
+ XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+
+ /* Un-freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
+}
+
+static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
+{
+ /* Set counters to reset on read */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
+
+ /* Reset the counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
+}
+
+static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable each Tx queue */
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx queue */
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int reg_val, i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+
+ /* Enable each Rx queue */
+ reg_val = 0;
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ reg_val |= (0x02 << (i << 1));
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Disable each Rx queue */
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+}
+
+static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+}
+
+static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+}
+
+static int xgbe_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_init\n");
+
+ /* Flush Tx queues */
+ ret = xgbe_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+
+ /*
+ * Initialize DMA related features
+ */
+ xgbe_config_dma_bus(pdata);
+ xgbe_config_dma_cache(pdata);
+ xgbe_config_osp_mode(pdata);
+ xgbe_config_pblx8(pdata);
+ xgbe_config_tx_pbl_val(pdata);
+ xgbe_config_rx_pbl_val(pdata);
+ xgbe_config_rx_coalesce(pdata);
+ xgbe_config_tx_coalesce(pdata);
+ xgbe_config_rx_buffer_size(pdata);
+ xgbe_config_tso_mode(pdata);
+ desc_if->wrapper_tx_desc_init(pdata);
+ desc_if->wrapper_rx_desc_init(pdata);
+ xgbe_enable_dma_interrupts(pdata);
+
+ /*
+ * Initialize MTL related features
+ */
+ xgbe_config_mtl_mode(pdata);
+ xgbe_config_rx_queue_mapping(pdata);
+ /*TODO: Program the priorities mapped to the Selected Traffic Classes
+ in MTL_TC_Prty_Map0-3 registers */
+ xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ xgbe_config_tx_fifo_size(pdata);
+ xgbe_config_rx_fifo_size(pdata);
+ xgbe_config_flow_control_threshold(pdata);
+ /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
+ /*TODO: Error Packet and undersized good Packet forwarding enable
+ (FEP and FUP)
+ */
+ xgbe_enable_mtl_interrupts(pdata);
+
+ /* Transmit Class Weight */
+ XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
+
+ /*
+ * Initialize MAC related features
+ */
+ xgbe_config_mac_address(pdata);
+ xgbe_config_jumbo_enable(pdata);
+ xgbe_config_flow_control(pdata);
+ xgbe_config_checksum_offload(pdata);
+ xgbe_config_vlan_support(pdata);
+ xgbe_config_mmc(pdata);
+ xgbe_enable_mac_interrupts(pdata);
+
+ DBGPR("<--xgbe_init\n");
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs\n");
+
+ hw_if->tx_complete = xgbe_tx_complete;
+
+ hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
+ hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
+ hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
+ hw_if->set_mac_address = xgbe_set_mac_address;
+
+ hw_if->enable_rx_csum = xgbe_enable_rx_csum;
+ hw_if->disable_rx_csum = xgbe_disable_rx_csum;
+
+ hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
+ hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+
+ hw_if->read_mmd_regs = xgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = xgbe_write_mmd_regs;
+
+ hw_if->set_gmii_speed = xgbe_set_gmii_speed;
+ hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
+ hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
+
+ hw_if->enable_tx = xgbe_enable_tx;
+ hw_if->disable_tx = xgbe_disable_tx;
+ hw_if->enable_rx = xgbe_enable_rx;
+ hw_if->disable_rx = xgbe_disable_rx;
+
+ hw_if->powerup_tx = xgbe_powerup_tx;
+ hw_if->powerdown_tx = xgbe_powerdown_tx;
+ hw_if->powerup_rx = xgbe_powerup_rx;
+ hw_if->powerdown_rx = xgbe_powerdown_rx;
+
+ hw_if->pre_xmit = xgbe_pre_xmit;
+ hw_if->dev_read = xgbe_dev_read;
+ hw_if->enable_int = xgbe_enable_int;
+ hw_if->disable_int = xgbe_disable_int;
+ hw_if->init = xgbe_init;
+ hw_if->exit = xgbe_exit;
+
+ /* Descriptor related Sequences have to be initialized here */
+ hw_if->tx_desc_init = xgbe_tx_desc_init;
+ hw_if->rx_desc_init = xgbe_rx_desc_init;
+ hw_if->tx_desc_reset = xgbe_tx_desc_reset;
+ hw_if->rx_desc_reset = xgbe_rx_desc_reset;
+ hw_if->is_last_desc = xgbe_is_last_desc;
+ hw_if->is_context_desc = xgbe_is_context_desc;
+
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
+
+ /* For RX coalescing */
+ hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
+ hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
+ hw_if->usec_to_riwt = xgbe_usec_to_riwt;
+ hw_if->riwt_to_usec = xgbe_riwt_to_usec;
+
+ /* For RX and TX threshold config */
+ hw_if->config_rx_threshold = xgbe_config_rx_threshold;
+ hw_if->config_tx_threshold = xgbe_config_tx_threshold;
+
+ /* For RX and TX Store and Forward Mode config */
+ hw_if->config_rsf_mode = xgbe_config_rsf_mode;
+ hw_if->config_tsf_mode = xgbe_config_tsf_mode;
+
+ /* For TX DMA Operating on Second Frame config */
+ hw_if->config_osp_mode = xgbe_config_osp_mode;
+
+ /* For RX and TX PBL config */
+ hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
+ hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
+ hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
+ hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
+ hw_if->config_pblx8 = xgbe_config_pblx8;
+
+ /* For MMC statistics support */
+ hw_if->tx_mmc_int = xgbe_tx_mmc_int;
+ hw_if->rx_mmc_int = xgbe_rx_mmc_int;
+ hw_if->read_mmc_stats = xgbe_read_mmc_stats;
+
+ DBGPR("<--xgbe_init_function_ptrs\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
new file mode 100644
index 000000000000..cfe3d93b5f52
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -0,0 +1,1351 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <net/busy_poll.h>
+#include <linux/clk.h>
+#include <linux/if_ether.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static int xgbe_poll(struct napi_struct *, int);
+static void xgbe_set_rx_mode(struct net_device *);
+
+static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
+{
+ return (ring->rdesc_count - (ring->cur - ring->dirty));
+}
+
+static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+ unsigned int rx_buf_size;
+
+ if (mtu > XGMAC_JUMBO_PACKET_MTU) {
+ netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+ return -EINVAL;
+ }
+
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ if (rx_buf_size < RX_MIN_BUF_SIZE)
+ rx_buf_size = RX_MIN_BUF_SIZE;
+ rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
+
+ return rx_buf_size;
+}
+
+static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring)
+ hw_if->enable_int(channel,
+ XGMAC_INT_DMA_CH_SR_TI);
+ if (channel->rx_ring)
+ hw_if->enable_int(channel,
+ XGMAC_INT_DMA_CH_SR_RI);
+ }
+}
+
+static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring)
+ hw_if->disable_int(channel,
+ XGMAC_INT_DMA_CH_SR_TI);
+ if (channel->rx_ring)
+ hw_if->disable_int(channel,
+ XGMAC_INT_DMA_CH_SR_RI);
+ }
+}
+
+static irqreturn_t xgbe_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int dma_isr, dma_ch_isr;
+ unsigned int mac_isr;
+ unsigned int i;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+ * interrupts. So for polling mode, we just need to check for
+ * this register to be non-zero
+ */
+ dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
+ if (!dma_isr)
+ goto isr_done;
+
+ DBGPR("-->xgbe_isr\n");
+
+ DBGPR(" DMA_ISR = %08x\n", dma_isr);
+ DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
+ DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel + i;
+
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
+ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(&pdata->napi);
+ }
+ }
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ schedule_work(&pdata->restart_work);
+
+ /* Clear all interrupt signals */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+ }
+
+ if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
+ mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
+ hw_if->tx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
+ hw_if->rx_mmc_int(pdata);
+ }
+
+ DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
+
+ DBGPR("<--xgbe_isr\n");
+
+isr_done:
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
+{
+ struct xgbe_channel *channel = container_of(timer,
+ struct xgbe_channel,
+ tx_timer);
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_prv_data *pdata = channel->pdata;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_tx_timer\n");
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(&pdata->napi);
+ }
+
+ channel->tx_timer_active = 0;
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ DBGPR("<--xgbe_tx_timer\n");
+
+ return HRTIMER_NORESTART;
+}
+
+static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_init_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s adding tx timer\n", channel->name);
+ hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ channel->tx_timer.function = xgbe_tx_timer;
+ }
+
+ DBGPR("<--xgbe_init_tx_timers\n");
+}
+
+static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_stop_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s deleting tx timer\n", channel->name);
+ channel->tx_timer_active = 0;
+ hrtimer_cancel(&channel->tx_timer);
+ }
+
+ DBGPR("<--xgbe_stop_tx_timers\n");
+}
+
+void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ DBGPR("-->xgbe_get_all_hw_features\n");
+
+ mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+
+ /* The Queue and Channel counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+
+ DBGPR("<--xgbe_get_all_hw_features\n");
+}
+
+static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
+{
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&pdata->napi);
+}
+
+static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
+{
+ napi_disable(&pdata->napi);
+}
+
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_tx_coalesce\n");
+
+ pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
+ pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
+
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_tx_coalesce\n");
+}
+
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_rx_coalesce\n");
+
+ pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
+
+ hw_if->config_rx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_rx_coalesce\n");
+}
+
+static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_tx_skbuff\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+ desc_if->unmap_skb(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_tx_skbuff\n");
+}
+
+static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_rx_skbuff\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+ desc_if->unmap_skb(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_rx_skbuff\n");
+}
+
+int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered down\n");
+ DBGPR("<--xgbe_powerdown\n");
+ return -EINVAL;
+ }
+
+ phy_stop(pdata->phydev);
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+ xgbe_napi_disable(pdata);
+
+ /* Powerdown Tx/Rx */
+ hw_if->powerdown_tx(pdata);
+ hw_if->powerdown_rx(pdata);
+
+ pdata->power_down = 1;
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+}
+
+int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered up\n");
+ DBGPR("<--xgbe_powerup\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ pdata->power_down = 0;
+
+ phy_start(pdata->phydev);
+
+ /* Enable Tx/Rx */
+ hw_if->powerup_tx(pdata);
+ hw_if->powerup_rx(pdata);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_attach(netdev);
+
+ xgbe_napi_enable(pdata, 0);
+ netif_tx_start_all_queues(netdev);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+}
+
+static int xgbe_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct net_device *netdev = pdata->netdev;
+
+ DBGPR("-->xgbe_start\n");
+
+ xgbe_set_rx_mode(netdev);
+
+ hw_if->init(pdata);
+
+ phy_start(pdata->phydev);
+
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
+ xgbe_init_tx_timers(pdata);
+
+ xgbe_napi_enable(pdata, 1);
+ netif_tx_start_all_queues(netdev);
+
+ DBGPR("<--xgbe_start\n");
+
+ return 0;
+}
+
+static void xgbe_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct net_device *netdev = pdata->netdev;
+
+ DBGPR("-->xgbe_stop\n");
+
+ phy_stop(pdata->phydev);
+
+ netif_tx_stop_all_queues(netdev);
+ xgbe_napi_disable(pdata);
+
+ xgbe_stop_tx_timers(pdata);
+
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
+ DBGPR("<--xgbe_stop\n");
+}
+
+static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_restart_dev\n");
+
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xgbe_stop(pdata);
+ synchronize_irq(pdata->irq_number);
+
+ xgbe_free_tx_skbuff(pdata);
+ xgbe_free_rx_skbuff(pdata);
+
+ /* Issue software reset to device if requested */
+ if (reset)
+ hw_if->exit(pdata);
+
+ xgbe_start(pdata);
+
+ DBGPR("<--xgbe_restart_dev\n");
+}
+
+static void xgbe_restart(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ restart_work);
+
+ rtnl_lock();
+
+ xgbe_restart_dev(pdata, 1);
+
+ rtnl_unlock();
+}
+
+static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ if (vlan_tx_tag_present(skb))
+ packet->vlan_ctag = vlan_tx_tag_get(skb);
+}
+
+static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ int ret;
+
+ if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ packet->tcp_header_len = tcp_hdrlen(skb);
+ packet->tcp_payload_len = skb->len - packet->header_len;
+ packet->mss = skb_shinfo(skb)->gso_size;
+ DBGPR(" packet->header_len=%u\n", packet->header_len);
+ DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
+ packet->tcp_header_len, packet->tcp_payload_len);
+ DBGPR(" packet->mss=%u\n", packet->mss);
+
+ return 0;
+}
+
+static int xgbe_is_tso(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ DBGPR(" TSO packet to be processed\n");
+
+ return 1;
+}
+
+static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ struct skb_frag_struct *frag;
+ unsigned int context_desc;
+ unsigned int len;
+ unsigned int i;
+
+ context_desc = 0;
+ packet->rdesc_count = 0;
+
+ if (xgbe_is_tso(skb)) {
+ /* TSO requires an extra desriptor if mss is different */
+ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ /* TSO requires an extra desriptor for TSO header */
+ packet->rdesc_count++;
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE, 1);
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+
+ if (vlan_tx_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ /* We can share with the TSO context descriptor */
+ if (!context_desc) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ }
+
+ for (len = skb_headlen(skb); len;) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ for (len = skb_frag_size(frag); len; ) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+ }
+ }
+}
+
+static int xgbe_open(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_open\n");
+
+ /* Enable the clock */
+ ret = clk_prepare_enable(pdata->sysclock);
+ if (ret) {
+ netdev_alert(netdev, "clk_prepare_enable failed\n");
+ return ret;
+ }
+
+ /* Calculate the Rx buffer size before allocating rings */
+ ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
+ if (ret < 0)
+ goto err_clk;
+ pdata->rx_buf_size = ret;
+
+ /* Allocate the ring descriptors and buffers */
+ ret = desc_if->alloc_ring_resources(pdata);
+ if (ret)
+ goto err_clk;
+
+ /* Initialize the device restart work struct */
+ INIT_WORK(&pdata->restart_work, xgbe_restart);
+
+ /* Request interrupts */
+ ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->irq_number);
+ goto err_irq;
+ }
+ pdata->irq_number = netdev->irq;
+
+ ret = xgbe_start(pdata);
+ if (ret)
+ goto err_start;
+
+ DBGPR("<--xgbe_open\n");
+
+ return 0;
+
+err_start:
+ hw_if->exit(pdata);
+
+ devm_free_irq(pdata->dev, pdata->irq_number, pdata);
+ pdata->irq_number = 0;
+
+err_irq:
+ desc_if->free_ring_resources(pdata);
+
+err_clk:
+ clk_disable_unprepare(pdata->sysclock);
+
+ return ret;
+}
+
+static int xgbe_close(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+
+ DBGPR("-->xgbe_close\n");
+
+ /* Stop the device */
+ xgbe_stop(pdata);
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ /* Free all the ring data */
+ desc_if->free_ring_resources(pdata);
+
+ /* Release the interrupt */
+ if (pdata->irq_number != 0) {
+ devm_free_irq(pdata->dev, pdata->irq_number, pdata);
+ pdata->irq_number = 0;
+ }
+
+ /* Disable the clock */
+ clk_disable_unprepare(pdata->sysclock);
+
+ DBGPR("<--xgbe_close\n");
+
+ return 0;
+}
+
+static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ unsigned long flags;
+ int ret;
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+
+ channel = pdata->channel + skb->queue_mapping;
+ ring = channel->tx_ring;
+ packet = &ring->packet_data;
+
+ ret = NETDEV_TX_OK;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ if (skb->len == 0) {
+ netdev_err(netdev, "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ /* Calculate preliminary packet info */
+ memset(packet, 0, sizeof(*packet));
+ xgbe_packet_info(ring, skb, packet);
+
+ /* Check that there are enough descriptors available */
+ if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
+ DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+ ret = NETDEV_TX_BUSY;
+ goto tx_netdev_return;
+ }
+
+ ret = xgbe_prep_tso(skb, packet);
+ if (ret) {
+ netdev_err(netdev, "error processing TSO packet\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+ xgbe_prep_vlan(skb, packet);
+
+ if (!desc_if->map_tx_skb(channel, skb)) {
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ /* Configure required descriptor fields for transmission */
+ hw_if->pre_xmit(channel);
+
+#ifdef XGMAC_ENABLE_TX_PKT_DUMP
+ xgbe_print_pkt(netdev, skb, true);
+#endif
+
+tx_netdev_return:
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ DBGPR("<--xgbe_xmit\n");
+
+ return ret;
+}
+
+static void xgbe_set_rx_mode(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int pr_mode, am_mode;
+
+ DBGPR("-->xgbe_set_rx_mode\n");
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
+ pr_mode = 1;
+ if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
+ am_mode = 1;
+ if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
+ pdata->hw_feat.addn_mac)
+ pr_mode = 1;
+
+ hw_if->set_promiscuous_mode(pdata, pr_mode);
+ hw_if->set_all_multicast_mode(pdata, am_mode);
+ if (!pr_mode)
+ hw_if->set_addn_mac_addrs(pdata, am_mode);
+
+ DBGPR("<--xgbe_set_rx_mode\n");
+}
+
+static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct sockaddr *saddr = addr;
+
+ DBGPR("-->xgbe_set_mac_address\n");
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+ hw_if->set_mac_address(pdata, netdev->dev_addr);
+
+ DBGPR("<--xgbe_set_mac_address\n");
+
+ return 0;
+}
+
+static int xgbe_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_change_mtu\n");
+
+ ret = xgbe_calc_rx_buf_size(netdev, mtu);
+ if (ret < 0)
+ return ret;
+
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+ xgbe_restart_dev(pdata, 0);
+
+ DBGPR("<--xgbe_change_mtu\n");
+
+ return 0;
+}
+
+static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ s->rx_packets = pstats->rxframecount_gb;
+ s->rx_bytes = pstats->rxoctetcount_gb;
+ s->rx_errors = pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g;
+ s->multicast = pstats->rxmulticastframes_g;
+ s->rx_length_errors = pstats->rxlengtherror;
+ s->rx_crc_errors = pstats->rxcrcerror;
+ s->rx_fifo_errors = pstats->rxfifooverflow;
+
+ s->tx_packets = pstats->txframecount_gb;
+ s->tx_bytes = pstats->txoctetcount_gb;
+ s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+ s->tx_dropped = netdev->stats.tx_dropped;
+
+ DBGPR("<--%s\n", __func__);
+
+ return s;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xgbe_poll_controller(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_poll_controller\n");
+
+ disable_irq(pdata->irq_number);
+
+ xgbe_isr(pdata->irq_number, pdata);
+
+ enable_irq(pdata->irq_number);
+
+ DBGPR("<--xgbe_poll_controller\n");
+}
+#endif /* End CONFIG_NET_POLL_CONTROLLER */
+
+static int xgbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rxcsum_enabled, rxvlan_enabled;
+
+ rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
+ rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
+ hw_if->enable_rx_csum(pdata);
+ netdev_alert(netdev, "state change - rxcsum enabled\n");
+ } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
+ hw_if->disable_rx_csum(pdata);
+ netdev_alert(netdev, "state change - rxcsum disabled\n");
+ }
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
+ hw_if->enable_rx_vlan_stripping(pdata);
+ netdev_alert(netdev, "state change - rxvlan enabled\n");
+ } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
+ hw_if->disable_rx_vlan_stripping(pdata);
+ netdev_alert(netdev, "state change - rxvlan disabled\n");
+ }
+
+ pdata->netdev_features = features;
+
+ DBGPR("<--xgbe_set_features\n");
+
+ return 0;
+}
+
+static const struct net_device_ops xgbe_netdev_ops = {
+ .ndo_open = xgbe_open,
+ .ndo_stop = xgbe_close,
+ .ndo_start_xmit = xgbe_xmit,
+ .ndo_set_rx_mode = xgbe_set_rx_mode,
+ .ndo_set_mac_address = xgbe_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = xgbe_change_mtu,
+ .ndo_get_stats64 = xgbe_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgbe_poll_controller,
+#endif
+ .ndo_set_features = xgbe_set_features,
+};
+
+struct net_device_ops *xgbe_get_netdev_ops(void)
+{
+ return (struct net_device_ops *)&xgbe_netdev_ops;
+}
+
+static int xgbe_tx_poll(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct net_device *netdev = pdata->netdev;
+ unsigned long flags;
+ int processed = 0;
+
+ DBGPR("-->xgbe_tx_poll\n");
+
+ /* Nothing to do if there isn't a Tx ring for this channel */
+ if (!ring)
+ return 0;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
+ rdata = GET_DESC_DATA(ring, ring->dirty);
+ rdesc = rdata->rdesc;
+
+ if (!hw_if->tx_complete(rdesc))
+ break;
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
+#endif
+
+ /* Free the SKB and reset the descriptor for re-use */
+ desc_if->unmap_skb(pdata, rdata);
+ hw_if->tx_desc_reset(rdata);
+
+ processed++;
+ ring->dirty++;
+ }
+
+ if ((ring->tx.queue_stopped == 1) &&
+ (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
+ ring->tx.queue_stopped = 0;
+ netif_wake_subqueue(netdev, channel->queue_index);
+ }
+
+ DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return processed;
+}
+
+static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct net_device *netdev = pdata->netdev;
+ struct sk_buff *skb;
+ unsigned int incomplete, error;
+ unsigned int cur_len, put_len, max_len;
+ int received = 0;
+
+ DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
+
+ /* Nothing to do if there isn't a Rx ring for this channel */
+ if (!ring)
+ return 0;
+
+ packet = &ring->packet_data;
+ while (received < budget) {
+ DBGPR(" cur = %d\n", ring->cur);
+
+ /* Clear the packet data information */
+ memset(packet, 0, sizeof(*packet));
+ skb = NULL;
+ error = 0;
+ cur_len = 0;
+
+read_again:
+ rdata = GET_DESC_DATA(ring, ring->cur);
+
+ if (hw_if->dev_read(channel))
+ break;
+
+ received++;
+ ring->cur++;
+ ring->dirty++;
+
+ dma_unmap_single(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_FROM_DEVICE);
+ rdata->skb_dma = 0;
+
+ incomplete = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ INCOMPLETE);
+
+ /* Earlier error, just drain the remaining data */
+ if (incomplete && error)
+ goto read_again;
+
+ if (error || packet->errors) {
+ if (packet->errors)
+ DBGPR("Error in received packet\n");
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ put_len = rdata->len - cur_len;
+ if (skb) {
+ if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
+ DBGPR("pskb_expand_head error\n");
+ if (incomplete) {
+ error = 1;
+ goto read_again;
+ }
+
+ dev_kfree_skb(skb);
+ continue;
+ }
+ memcpy(skb_tail_pointer(skb), rdata->skb->data,
+ put_len);
+ } else {
+ skb = rdata->skb;
+ rdata->skb = NULL;
+ }
+ skb_put(skb, put_len);
+ cur_len += put_len;
+
+ if (incomplete)
+ goto read_again;
+
+ /* Be sure we don't exceed the configured MTU */
+ max_len = netdev->mtu + ETH_HLEN;
+ if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (skb->protocol == htons(ETH_P_8021Q)))
+ max_len += VLAN_HLEN;
+
+ if (skb->len > max_len) {
+ DBGPR("packet length exceeds configured MTU\n");
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+#ifdef XGMAC_ENABLE_RX_PKT_DUMP
+ xgbe_print_pkt(netdev, skb, false);
+#endif
+
+ skb_checksum_none_assert(skb);
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, CSUM_DONE))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, VLAN_CTAG))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ packet->vlan_ctag);
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, channel->queue_index);
+ skb_mark_napi_id(skb, &pdata->napi);
+
+ netdev->last_rx = jiffies;
+ napi_gro_receive(&pdata->napi, skb);
+ }
+
+ if (received) {
+ desc_if->realloc_skb(channel);
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry */
+ rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+ }
+
+ DBGPR("<--xgbe_rx_poll: received = %d\n", received);
+
+ return received;
+}
+
+static int xgbe_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
+ napi);
+ struct xgbe_channel *channel;
+ int processed;
+ unsigned int i;
+
+ DBGPR("-->xgbe_poll: budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = 0;
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ processed += xgbe_rx_poll(channel, budget - processed);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete(napi);
+
+ /* Enable Tx and Rx interrupts */
+ xgbe_enable_rx_tx_ints(pdata);
+ }
+
+ DBGPR("<--xgbe_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
+ unsigned int count, unsigned int flag)
+{
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ while (count--) {
+ rdata = GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ idx++;
+ }
+}
+
+void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+ unsigned int idx)
+{
+ DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
+ le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
+ le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+}
+
+void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ unsigned char *buf = skb->data;
+ unsigned char buffer[128];
+ unsigned int i, j;
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+
+ netdev_alert(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
+
+ netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+
+ for (i = 0, j = 0; i < skb->len;) {
+ j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
+ buf[i++]);
+
+ if ((i % 32) == 0) {
+ netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
+ j = 0;
+ } else if ((i % 16) == 0) {
+ buffer[j++] = ' ';
+ buffer[j++] = ' ';
+ } else if ((i % 4) == 0) {
+ buffer[j++] = ' ';
+ }
+ }
+ if (i % 32)
+ netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
new file mode 100644
index 000000000000..8909f2b51af1
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -0,0 +1,510 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/phy.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+struct xgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_size;
+ int stat_offset;
+};
+
+#define XGMAC_MMC_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
+ offsetof(struct xgbe_prv_data, mmc_stats._var), \
+ }
+
+static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
+ XGMAC_MMC_STAT("tx_packets", txframecount_gb),
+ XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
+ XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
+ XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
+
+ XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
+ XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
+ XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
+ XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
+ XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
+ XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
+ XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
+ XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
+ XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
+ XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
+ XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
+ XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
+ XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
+ XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+};
+#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
+
+static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ memcpy(data, xgbe_gstring_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static void xgbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ u8 *stat;
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
+ *data++ = *(u64 *)stat;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
+{
+ int ret;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = XGBE_STATS_COUNT;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ DBGPR("<--%s\n", __func__);
+
+ return ret;
+}
+
+static void xgbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_get_pauseparam\n");
+
+ pause->autoneg = pdata->pause_autoneg;
+ pause->tx_pause = pdata->tx_pause;
+ pause->rx_pause = pdata->rx_pause;
+
+ DBGPR("<--xgbe_get_pauseparam\n");
+}
+
+static int xgbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_set_pauseparam\n");
+
+ DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
+ pause->autoneg, pause->tx_pause, pause->rx_pause);
+
+ pdata->pause_autoneg = pause->autoneg;
+ if (pause->autoneg) {
+ phydev->advertising |= ADVERTISED_Pause;
+ phydev->advertising |= ADVERTISED_Asym_Pause;
+
+ } else {
+ phydev->advertising &= ~ADVERTISED_Pause;
+ phydev->advertising &= ~ADVERTISED_Asym_Pause;
+
+ pdata->tx_pause = pause->tx_pause;
+ pdata->rx_pause = pause->rx_pause;
+ }
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+ DBGPR("<--xgbe_set_pauseparam\n");
+
+ return ret;
+}
+
+static int xgbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_get_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ spin_lock_irq(&pdata->lock);
+
+ ret = phy_ethtool_gset(pdata->phydev, cmd);
+ cmd->transceiver = XCVR_EXTERNAL;
+
+ spin_unlock_irq(&pdata->lock);
+
+ DBGPR("<--xgbe_get_settings\n");
+
+ return ret;
+}
+
+static int xgbe_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ u32 speed;
+ int ret;
+
+ DBGPR("-->xgbe_set_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ spin_lock_irq(&pdata->lock);
+
+ speed = ethtool_cmd_speed(cmd);
+
+ ret = -EINVAL;
+ if (cmd->phy_address != phydev->addr)
+ goto unlock;
+
+ if ((cmd->autoneg != AUTONEG_ENABLE) &&
+ (cmd->autoneg != AUTONEG_DISABLE))
+ goto unlock;
+
+ if ((cmd->autoneg == AUTONEG_DISABLE) &&
+ (((speed != SPEED_10000) && (speed != SPEED_1000)) ||
+ (cmd->duplex != DUPLEX_FULL)))
+ goto unlock;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ /* Clear settings needed to force speeds */
+ phydev->supported &= ~SUPPORTED_1000baseT_Full;
+ phydev->supported &= ~SUPPORTED_10000baseT_Full;
+ } else {
+ /* Add settings needed to force speed */
+ phydev->supported |= SUPPORTED_1000baseT_Full;
+ phydev->supported |= SUPPORTED_10000baseT_Full;
+ }
+
+ cmd->advertising &= phydev->supported;
+ if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
+ goto unlock;
+
+ ret = 0;
+ phydev->autoneg = cmd->autoneg;
+ phydev->speed = speed;
+ phydev->duplex = cmd->duplex;
+ phydev->advertising = cmd->advertising;
+
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ phydev->advertising |= ADVERTISED_Autoneg;
+ else
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+unlock:
+ spin_unlock_irq(&pdata->lock);
+
+ DBGPR("<--xgbe_set_settings\n");
+
+ return ret;
+}
+
+static void xgbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
+ XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
+ XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
+ XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
+ drvinfo->n_stats = XGBE_STATS_COUNT;
+}
+
+static int xgbe_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int riwt;
+
+ DBGPR("-->xgbe_get_coalesce\n");
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+ riwt = pdata->rx_riwt;
+ ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
+ ec->rx_max_coalesced_frames = pdata->rx_frames;
+
+ ec->tx_coalesce_usecs = pdata->tx_usecs;
+ ec->tx_max_coalesced_frames = pdata->tx_frames;
+
+ DBGPR("<--xgbe_get_coalesce\n");
+
+ return 0;
+}
+
+static int xgbe_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+ unsigned int tx_frames, tx_usecs;
+
+ DBGPR("-->xgbe_set_coalesce\n");
+
+ /* Check for not supported parameters */
+ if ((ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) ||
+ (ec->tx_coalesce_usecs_irq) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+ (ec->use_adaptive_rx_coalesce) ||
+ (ec->use_adaptive_tx_coalesce) ||
+ (ec->pkt_rate_low) ||
+ (ec->rx_coalesce_usecs_low) ||
+ (ec->rx_max_coalesced_frames_low) ||
+ (ec->tx_coalesce_usecs_low) ||
+ (ec->tx_max_coalesced_frames_low) ||
+ (ec->pkt_rate_high) ||
+ (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) ||
+ (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_high) ||
+ (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ /* Can only change rx-frames when interface is down (see
+ * rx_descriptor_init in xgbe-dev.c)
+ */
+ rx_frames = pdata->rx_frames;
+ if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
+ netdev_alert(netdev,
+ "interface must be down to change rx-frames\n");
+ return -EINVAL;
+ }
+
+ rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
+ rx_frames = ec->rx_max_coalesced_frames;
+
+ /* Use smallest possible value if conversion resulted in zero */
+ if (ec->rx_coalesce_usecs && !rx_riwt)
+ rx_riwt = 1;
+
+ /* Check the bounds of values for Rx */
+ if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
+ rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
+ netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
+ rx_usecs);
+ return -EINVAL;
+ }
+ if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
+ netdev_alert(netdev, "rx-frames is limited to %d frames\n",
+ pdata->channel->rx_ring->rdesc_count);
+ return -EINVAL;
+ }
+
+ tx_usecs = ec->tx_coalesce_usecs;
+ tx_frames = ec->tx_max_coalesced_frames;
+
+ /* Check the bounds of values for Tx */
+ if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
+ netdev_alert(netdev, "tx-frames is limited to %d frames\n",
+ pdata->channel->tx_ring->rdesc_count);
+ return -EINVAL;
+ }
+
+ pdata->rx_riwt = rx_riwt;
+ pdata->rx_frames = rx_frames;
+ hw_if->config_rx_coalesce(pdata);
+
+ pdata->tx_usecs = tx_usecs;
+ pdata->tx_frames = tx_frames;
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_set_coalesce\n");
+
+ return 0;
+}
+
+static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_settings = xgbe_get_settings,
+ .set_settings = xgbe_set_settings,
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = xgbe_get_coalesce,
+ .set_coalesce = xgbe_set_coalesce,
+ .get_pauseparam = xgbe_get_pauseparam,
+ .set_pauseparam = xgbe_set_pauseparam,
+ .get_strings = xgbe_get_strings,
+ .get_ethtool_stats = xgbe_get_ethtool_stats,
+ .get_sset_count = xgbe_get_sset_count,
+};
+
+struct ethtool_ops *xgbe_get_ethtool_ops(void)
+{
+ return (struct ethtool_ops *)&xgbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
new file mode 100644
index 000000000000..c83584a26713
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -0,0 +1,512 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/clk.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(XGBE_DRV_VERSION);
+MODULE_DESCRIPTION(XGBE_DRV_DESC);
+
+static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel_mem, *channel;
+ struct xgbe_ring *tx_ring, *rx_ring;
+ unsigned int count, i;
+
+ DBGPR("-->xgbe_alloc_rings\n");
+
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ channel_mem = devm_kcalloc(pdata->dev, count,
+ sizeof(struct xgbe_channel), GFP_KERNEL);
+ if (!channel_mem)
+ return NULL;
+
+ tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
+ sizeof(struct xgbe_ring), GFP_KERNEL);
+ if (!tx_ring)
+ return NULL;
+
+ rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
+ sizeof(struct xgbe_ring), GFP_KERNEL);
+ if (!rx_ring)
+ return NULL;
+
+ for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (i < pdata->tx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->tx_ring = tx_ring++;
+ }
+
+ if (i < pdata->rx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->rx_ring = rx_ring++;
+ }
+
+ DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
+ channel->name, channel->queue_index, channel->dma_regs,
+ channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel_count = count;
+
+ DBGPR("<--xgbe_alloc_rings\n");
+
+ return channel_mem;
+}
+
+static void xgbe_default_config(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_default_config\n");
+
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_pbl = DMA_PBL_16;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->rx_pbl = DMA_PBL_16;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->power_down = 0;
+ pdata->default_autoneg = AUTONEG_ENABLE;
+ pdata->default_speed = SPEED_10000;
+
+ DBGPR("<--xgbe_default_config\n");
+}
+
+static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+{
+ xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_desc(&pdata->desc_if);
+}
+
+static int xgbe_probe(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata;
+ struct xgbe_hw_if *hw_if;
+ struct xgbe_desc_if *desc_if;
+ struct net_device *netdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ const u8 *mac_addr;
+ int ret;
+
+ DBGPR("--> xgbe_probe\n");
+
+ netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
+ XGBE_MAX_DMA_CHANNELS);
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev failed\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+ SET_NETDEV_DEV(netdev, dev);
+ pdata = netdev_priv(netdev);
+ pdata->netdev = netdev;
+ pdata->pdev = pdev;
+ pdata->dev = dev;
+ platform_set_drvdata(pdev, netdev);
+
+ spin_lock_init(&pdata->lock);
+ mutex_init(&pdata->xpcs_mutex);
+
+ /* Set and validate the number of descriptors for a ring */
+ BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
+ pdata->tx_desc_count = TX_DESC_CNT;
+ if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+ dev_err(dev, "tx descriptor count (%d) is not valid\n",
+ pdata->tx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+ BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
+ pdata->rx_desc_count = RX_DESC_CNT;
+ if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+ dev_err(dev, "rx descriptor count (%d) is not valid\n",
+ pdata->rx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Obtain the system clock setting */
+ pdata->sysclock = devm_clk_get(dev, NULL);
+ if (IS_ERR(pdata->sysclock)) {
+ dev_err(dev, "devm_clk_get failed\n");
+ ret = PTR_ERR(pdata->sysclock);
+ goto err_io;
+ }
+
+ /* Obtain the mmio areas for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xgmac_regs)) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = PTR_ERR(pdata->xgmac_regs);
+ goto err_io;
+ }
+ DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xpcs_regs)) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = PTR_ERR(pdata->xpcs_regs);
+ goto err_io;
+ }
+ DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ /* Set the DMA mask */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ *(dev->dma_mask) = DMA_BIT_MASK(40);
+ dev->coherent_dma_mask = DMA_BIT_MASK(40);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq failed\n");
+ goto err_io;
+ }
+ netdev->irq = ret;
+ netdev->base_addr = (unsigned long)pdata->xgmac_regs;
+
+ /* Set all the function pointers */
+ xgbe_init_all_fptrs(pdata);
+ hw_if = &pdata->hw_if;
+ desc_if = &pdata->desc_if;
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ /* Populate the hardware features */
+ xgbe_get_all_hw_features(pdata);
+
+ /* Retrieve the MAC address */
+ mac_addr = of_get_mac_address(dev->of_node);
+ if (!mac_addr) {
+ dev_err(dev, "invalid mac address for this device\n");
+ ret = -EINVAL;
+ goto err_io;
+ }
+ memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
+
+ /* Retrieve the PHY mode - it must be "xgmii" */
+ pdata->phy_mode = of_get_phy_mode(dev->of_node);
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
+ dev_err(dev, "invalid phy-mode specified for this device\n");
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Set default configuration data */
+ xgbe_default_config(pdata);
+
+ /* Calculate the number of Tx and Rx rings to be created */
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+ dev_err(dev, "error setting real tx queue count\n");
+ goto err_io;
+ }
+
+ pdata->rx_ring_count = min_t(unsigned int,
+ netif_get_num_default_rss_queues(),
+ pdata->hw_feat.rx_ch_cnt);
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
+ if (ret) {
+ dev_err(dev, "error setting real rx queue count\n");
+ goto err_io;
+ }
+
+ /* Allocate the rings for the DMA channels */
+ pdata->channel = xgbe_alloc_rings(pdata);
+ if (!pdata->channel) {
+ dev_err(dev, "ring allocation failed\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+
+ /* Prepare to regsiter with MDIO */
+ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
+ if (!pdata->mii_bus_id) {
+ dev_err(dev, "failed to allocate mii bus id\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+ ret = xgbe_mdio_register(pdata);
+ if (ret)
+ goto err_bus_id;
+
+ /* Set network and ethtool operations */
+ netdev->netdev_ops = xgbe_get_netdev_ops();
+ netdev->ethtool_ops = xgbe_get_ethtool_ops();
+
+ /* Set device features */
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+
+ netdev->features |= netdev->hw_features;
+ pdata->netdev_features = netdev->features;
+
+ xgbe_init_rx_coalesce(pdata);
+ xgbe_init_tx_coalesce(pdata);
+
+ netif_carrier_off(netdev);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+ goto err_reg_netdev;
+ }
+
+ xgbe_debugfs_init(pdata);
+
+ netdev_notice(netdev, "net device enabled\n");
+
+ DBGPR("<-- xgbe_probe\n");
+
+ return 0;
+
+err_reg_netdev:
+ xgbe_mdio_unregister(pdata);
+
+err_bus_id:
+ kfree(pdata->mii_bus_id);
+
+err_io:
+ free_netdev(netdev);
+
+err_alloc:
+ dev_notice(dev, "net device not enabled\n");
+
+ return ret;
+}
+
+static int xgbe_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_remove\n");
+
+ xgbe_debugfs_exit(pdata);
+
+ unregister_netdev(netdev);
+
+ xgbe_mdio_unregister(pdata);
+
+ kfree(pdata->mii_bus_id);
+
+ free_netdev(netdev);
+
+ DBGPR("<--xgbe_remove\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int xgbe_suspend(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_suspend\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_suspend\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_suspend\n");
+
+ return ret;
+}
+
+static int xgbe_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_resume\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_resume\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_resume\n");
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct of_device_id xgbe_of_match[] = {
+ { .compatible = "amd,xgbe-seattle-v1a", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgbe_of_match);
+static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
+
+static struct platform_driver xgbe_driver = {
+ .driver = {
+ .name = "amd-xgbe",
+ .of_match_table = xgbe_of_match,
+ .pm = &xgbe_pm_ops,
+ },
+ .probe = xgbe_probe,
+ .remove = xgbe_remove,
+};
+
+module_platform_driver(xgbe_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
new file mode 100644
index 000000000000..ea7a5d6750ea
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -0,0 +1,433 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/spinlock.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data;
+
+ DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
+ prtad, mmd_reg);
+
+ mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
+
+ DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
+
+ return mmd_data;
+}
+
+static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
+ u16 mmd_val)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data = mmd_val;
+
+ DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
+ prtad, mmd_reg, mmd_data);
+
+ hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
+
+ DBGPR_MDIO("<--xgbe_mdio_write\n");
+
+ return 0;
+}
+
+static void xgbe_adjust_link(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct phy_device *phydev = pdata->phydev;
+ unsigned long flags;
+ int new_state = 0;
+
+ if (phydev == NULL)
+ return;
+
+ DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n",
+ phydev->addr, phydev->link, pdata->phy_link);
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (phydev->link) {
+ /* Flow control support */
+ if (pdata->pause_autoneg) {
+ if (phydev->pause || phydev->asym_pause) {
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ } else {
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ }
+ }
+
+ if (pdata->tx_pause != pdata->phy_tx_pause) {
+ hw_if->config_tx_flow_control(pdata);
+ pdata->phy_tx_pause = pdata->tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy_rx_pause) {
+ hw_if->config_rx_flow_control(pdata);
+ pdata->phy_rx_pause = pdata->rx_pause;
+ }
+
+ /* Speed support */
+ if (phydev->speed != pdata->phy_speed) {
+ new_state = 1;
+
+ switch (phydev->speed) {
+ case SPEED_10000:
+ hw_if->set_xgmii_speed(pdata);
+ break;
+
+ case SPEED_2500:
+ hw_if->set_gmii_2500_speed(pdata);
+ break;
+
+ case SPEED_1000:
+ hw_if->set_gmii_speed(pdata);
+ break;
+ }
+ pdata->phy_speed = phydev->speed;
+ }
+
+ if (phydev->link != pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 1;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state)
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR_MDIO("<--xgbe_adjust_link\n");
+}
+
+void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
+ int i;
+
+ dev_alert(dev, "\n************* PHY Reg dump **********************\n");
+
+ dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+ dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_alert(dev, "MMD Device Mask = %#x\n",
+ phydev->c45_ids.devices_in_package);
+ for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
+ dev_alert(dev, " MMD %d: ID = %#08x\n", i,
+ phydev->c45_ids.device_ids[i]);
+
+ dev_alert(dev, "\n*************************************************\n");
+}
+
+int xgbe_mdio_register(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct device_node *phy_node;
+ struct mii_bus *mii;
+ struct phy_device *phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_mdio_register\n");
+
+ /* Retrieve the phy-handle */
+ phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(pdata->dev, "unable to parse phy-handle\n");
+ return -EINVAL;
+ }
+
+ /* Register with the MDIO bus */
+ mii = mdiobus_alloc();
+ if (mii == NULL) {
+ dev_err(pdata->dev, "mdiobus_alloc failed\n");
+ ret = -ENOMEM;
+ goto err_node_get;
+ }
+
+ /* Register on the MDIO bus (don't probe any PHYs) */
+ mii->name = XGBE_PHY_NAME;
+ mii->read = xgbe_mdio_read;
+ mii->write = xgbe_mdio_write;
+ snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
+ mii->priv = pdata;
+ mii->phy_mask = ~0;
+ mii->parent = pdata->dev;
+ ret = mdiobus_register(mii);
+ if (ret) {
+ dev_err(pdata->dev, "mdiobus_register failed\n");
+ goto err_mdiobus_alloc;
+ }
+ DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
+
+ /* Probe the PCS using Clause 45 */
+ phydev = get_phy_device(mii, XGBE_PRTAD, true);
+ if (IS_ERR(phydev) || !phydev ||
+ !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
+ dev_err(pdata->dev, "get_phy_device failed\n");
+ ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
+ goto err_mdiobus_register;
+ }
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
+
+ of_node_get(phy_node);
+ phydev->dev.of_node = phy_node;
+ ret = phy_device_register(phydev);
+ if (ret) {
+ dev_err(pdata->dev, "phy_device_register failed\n");
+ of_node_put(phy_node);
+ goto err_phy_device;
+ }
+
+ /* Add a reference to the PHY driver so it can't be unloaded */
+ pdata->phy_module = phydev->dev.driver ?
+ phydev->dev.driver->owner : NULL;
+ if (!try_module_get(pdata->phy_module)) {
+ dev_err(pdata->dev, "try_module_get failed\n");
+ ret = -EIO;
+ goto err_phy_device;
+ }
+
+ pdata->mii = mii;
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->phy_tx_pause = pdata->tx_pause;
+ pdata->phy_rx_pause = pdata->rx_pause;
+
+ ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
+ pdata->phy_mode);
+ if (ret) {
+ netdev_err(netdev, "phy_connect_direct failed\n");
+ goto err_phy_device;
+ }
+
+ if (!phydev->drv || (phydev->drv->phy_id == 0)) {
+ netdev_err(netdev, "phy_id not valid\n");
+ ret = -ENODEV;
+ goto err_phy_connect;
+ }
+ DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
+ dev_name(&phydev->dev), phydev->link);
+
+ phydev->autoneg = pdata->default_autoneg;
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ /* Add settings needed to force speed */
+ phydev->supported |= SUPPORTED_1000baseT_Full;
+ phydev->supported |= SUPPORTED_10000baseT_Full;
+
+ phydev->speed = pdata->default_speed;
+ phydev->duplex = DUPLEX_FULL;
+
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+ }
+
+ pdata->phydev = phydev;
+
+ of_node_put(phy_node);
+
+ DBGPHY_REGS(pdata);
+
+ DBGPR("<--xgbe_mdio_register\n");
+
+ return 0;
+
+err_phy_connect:
+ phy_disconnect(phydev);
+
+err_phy_device:
+ phy_device_free(phydev);
+
+err_mdiobus_register:
+ mdiobus_unregister(mii);
+
+err_mdiobus_alloc:
+ mdiobus_free(mii);
+
+err_node_get:
+ of_node_put(phy_node);
+
+ return ret;
+}
+
+void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_mdio_unregister\n");
+
+ phy_disconnect(pdata->phydev);
+ pdata->phydev = NULL;
+
+ module_put(pdata->phy_module);
+ pdata->phy_module = NULL;
+
+ mdiobus_unregister(pdata->mii);
+ pdata->mii->priv = NULL;
+
+ mdiobus_free(pdata->mii);
+ pdata->mii = NULL;
+
+ DBGPR("<--xgbe_mdio_unregister\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
new file mode 100644
index 000000000000..ab0627162c01
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -0,0 +1,676 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_H__
+#define __XGBE_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+
+
+#define XGBE_DRV_NAME "amd-xgbe"
+#define XGBE_DRV_VERSION "1.0.0-a"
+#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
+
+/* Descriptor related defines */
+#define TX_DESC_CNT 512
+#define TX_DESC_MIN_FREE (TX_DESC_CNT >> 3)
+#define TX_DESC_MAX_PROC (TX_DESC_CNT >> 1)
+#define RX_DESC_CNT 512
+
+#define TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+
+#define RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define RX_BUF_ALIGN 64
+
+#define XGBE_MAX_DMA_CHANNELS 16
+#define DMA_ARDOMAIN_SETTING 0x2
+#define DMA_ARCACHE_SETTING 0xb
+#define DMA_AWDOMAIN_SETTING 0x2
+#define DMA_AWCACHE_SETTING 0x7
+#define DMA_INTERRUPT_MASK 0x31c7
+
+#define XGMAC_MIN_PACKET 60
+#define XGMAC_STD_PACKET_MTU 1500
+#define XGMAC_MAX_STD_PACKET 1518
+#define XGMAC_JUMBO_PACKET_MTU 9000
+#define XGMAC_MAX_JUMBO_PACKET 9018
+
+#define MAX_MULTICAST_LIST 14
+#define TX_FLAGS_IP_PKT 0x00000001
+#define TX_FLAGS_TCP_PKT 0x00000002
+
+/* MDIO bus phy name */
+#define XGBE_PHY_NAME "amd_xgbe_phy"
+#define XGBE_PRTAD 0
+
+/* Driver PMT macros */
+#define XGMAC_DRIVER_CONTEXT 1
+#define XGMAC_IOCTL_CONTEXT 2
+
+#define FIFO_SIZE_B(x) (x)
+#define FIFO_SIZE_KB(x) (x * 1024)
+
+#define XGBE_TC_CNT 2
+
+/* Helper macro for descriptor handling
+ * Always use GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+
+/* Default coalescing parameters */
+#define XGMAC_INIT_DMA_TX_USECS 100
+#define XGMAC_INIT_DMA_TX_FRAMES 16
+
+#define XGMAC_MAX_DMA_RIWT 0xff
+#define XGMAC_INIT_DMA_RX_USECS 100
+#define XGMAC_INIT_DMA_RX_FRAMES 16
+
+/* Flow control queue count */
+#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+
+struct xgbe_prv_data;
+
+struct xgbe_packet_data {
+ unsigned int attributes;
+
+ unsigned int errors;
+
+ unsigned int rdesc_count;
+ unsigned int length;
+
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+};
+
+/* Common Rx and Tx descriptor mapping */
+struct xgbe_ring_desc {
+ unsigned int desc0;
+ unsigned int desc1;
+ unsigned int desc2;
+ unsigned int desc3;
+};
+
+/* Structure used to hold information related to the descriptor
+ * and the packet associated with the descriptor (always use
+ * use the GET_DESC_DATA macro to access this data from the ring)
+ */
+struct xgbe_ring_data {
+ struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
+ dma_addr_t rdesc_dma; /* DMA address of descriptor */
+
+ struct sk_buff *skb; /* Virtual address of SKB */
+ dma_addr_t skb_dma; /* DMA address of SKB data */
+ unsigned int skb_dma_len; /* Length of SKB DMA area */
+ unsigned int tso_header; /* TSO header indicator */
+
+ unsigned short len; /* Length of received Rx packet */
+
+ unsigned int interrupt; /* Interrupt indicator */
+
+ unsigned int mapped_as_page;
+};
+
+struct xgbe_ring {
+ /* Ring lock - used just for TX rings at the moment */
+ spinlock_t lock;
+
+ /* Per packet related information */
+ struct xgbe_packet_data packet_data;
+
+ /* Virtual/DMA addresses and count of allocated descriptor memory */
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int rdesc_count;
+
+ /* Array of descriptor data corresponding the descriptor memory
+ * (always use the GET_DESC_DATA macro to access this data)
+ */
+ struct xgbe_ring_data *rdata;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+ * Rx: count of descriptors in which a packet has been received
+ * (used with skb_realloc_index to refresh the ring)
+ */
+ unsigned int cur;
+ unsigned int dirty;
+
+ /* Coalesce frame count used for interrupt bit setting */
+ unsigned int coalesce_count;
+
+ union {
+ struct {
+ unsigned int queue_stopped;
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+
+ struct {
+ unsigned int realloc_index;
+ unsigned int realloc_threshold;
+ } rx;
+ };
+} ____cacheline_aligned;
+
+/* Structure used to describe the descriptor rings associated with
+ * a DMA channel.
+ */
+struct xgbe_channel {
+ char name[16];
+
+ /* Address of private data area for device */
+ struct xgbe_prv_data *pdata;
+
+ /* Queue index and base address of queue's DMA registers */
+ unsigned int queue_index;
+ void __iomem *dma_regs;
+
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+ struct hrtimer tx_timer;
+
+ struct xgbe_ring *tx_ring;
+ struct xgbe_ring *rx_ring;
+} ____cacheline_aligned;
+
+enum xgbe_int {
+ XGMAC_INT_DMA_ISR_DC0IS,
+ XGMAC_INT_DMA_CH_SR_TI,
+ XGMAC_INT_DMA_CH_SR_TPS,
+ XGMAC_INT_DMA_CH_SR_TBU,
+ XGMAC_INT_DMA_CH_SR_RI,
+ XGMAC_INT_DMA_CH_SR_RBU,
+ XGMAC_INT_DMA_CH_SR_RPS,
+ XGMAC_INT_DMA_CH_SR_FBE,
+ XGMAC_INT_DMA_ALL,
+};
+
+enum xgbe_int_state {
+ XGMAC_INT_STATE_SAVE,
+ XGMAC_INT_STATE_RESTORE,
+};
+
+enum xgbe_mtl_fifo_size {
+ XGMAC_MTL_FIFO_SIZE_256 = 0x00,
+ XGMAC_MTL_FIFO_SIZE_512 = 0x01,
+ XGMAC_MTL_FIFO_SIZE_1K = 0x03,
+ XGMAC_MTL_FIFO_SIZE_2K = 0x07,
+ XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
+ XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
+ XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
+ XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
+ XGMAC_MTL_FIFO_SIZE_64K = 0xff,
+ XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
+ XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
+};
+
+struct xgbe_mmc_stats {
+ /* Tx Stats */
+ u64 txoctetcount_gb;
+ u64 txframecount_gb;
+ u64 txbroadcastframes_g;
+ u64 txmulticastframes_g;
+ u64 tx64octets_gb;
+ u64 tx65to127octets_gb;
+ u64 tx128to255octets_gb;
+ u64 tx256to511octets_gb;
+ u64 tx512to1023octets_gb;
+ u64 tx1024tomaxoctets_gb;
+ u64 txunicastframes_gb;
+ u64 txmulticastframes_gb;
+ u64 txbroadcastframes_gb;
+ u64 txunderflowerror;
+ u64 txoctetcount_g;
+ u64 txframecount_g;
+ u64 txpauseframes;
+ u64 txvlanframes_g;
+
+ /* Rx Stats */
+ u64 rxframecount_gb;
+ u64 rxoctetcount_gb;
+ u64 rxoctetcount_g;
+ u64 rxbroadcastframes_g;
+ u64 rxmulticastframes_g;
+ u64 rxcrcerror;
+ u64 rxrunterror;
+ u64 rxjabbererror;
+ u64 rxundersize_g;
+ u64 rxoversize_g;
+ u64 rx64octets_gb;
+ u64 rx65to127octets_gb;
+ u64 rx128to255octets_gb;
+ u64 rx256to511octets_gb;
+ u64 rx512to1023octets_gb;
+ u64 rx1024tomaxoctets_gb;
+ u64 rxunicastframes_g;
+ u64 rxlengtherror;
+ u64 rxoutofrangetype;
+ u64 rxpauseframes;
+ u64 rxfifooverflow;
+ u64 rxvlanframes_gb;
+ u64 rxwatchdogerror;
+};
+
+struct xgbe_hw_if {
+ int (*tx_complete)(struct xgbe_ring_desc *);
+
+ int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
+ int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+
+ int (*enable_rx_csum)(struct xgbe_prv_data *);
+ int (*disable_rx_csum)(struct xgbe_prv_data *);
+
+ int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
+
+ int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
+ void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
+ int (*set_gmii_speed)(struct xgbe_prv_data *);
+ int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
+ int (*set_xgmii_speed)(struct xgbe_prv_data *);
+
+ void (*enable_tx)(struct xgbe_prv_data *);
+ void (*disable_tx)(struct xgbe_prv_data *);
+ void (*enable_rx)(struct xgbe_prv_data *);
+ void (*disable_rx)(struct xgbe_prv_data *);
+
+ void (*powerup_tx)(struct xgbe_prv_data *);
+ void (*powerdown_tx)(struct xgbe_prv_data *);
+ void (*powerup_rx)(struct xgbe_prv_data *);
+ void (*powerdown_rx)(struct xgbe_prv_data *);
+
+ int (*init)(struct xgbe_prv_data *);
+ int (*exit)(struct xgbe_prv_data *);
+
+ int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
+ int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
+ void (*pre_xmit)(struct xgbe_channel *);
+ int (*dev_read)(struct xgbe_channel *);
+ void (*tx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_reset)(struct xgbe_ring_data *);
+ void (*tx_desc_reset)(struct xgbe_ring_data *);
+ int (*is_last_desc)(struct xgbe_ring_desc *);
+ int (*is_context_desc)(struct xgbe_ring_desc *);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct xgbe_prv_data *);
+ int (*config_rx_flow_control)(struct xgbe_prv_data *);
+
+ /* For RX coalescing */
+ int (*config_rx_coalesce)(struct xgbe_prv_data *);
+ int (*config_tx_coalesce)(struct xgbe_prv_data *);
+ unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int);
+ unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX threshold config */
+ int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX Store and Forward Mode config */
+ int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int);
+
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xgbe_prv_data *);
+
+ /* For RX and TX PBL config */
+ int (*config_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_pblx8)(struct xgbe_prv_data *);
+
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xgbe_prv_data *);
+ void (*tx_mmc_int)(struct xgbe_prv_data *);
+ void (*read_mmc_stats)(struct xgbe_prv_data *);
+};
+
+struct xgbe_desc_if {
+ int (*alloc_ring_resources)(struct xgbe_prv_data *);
+ void (*free_ring_resources)(struct xgbe_prv_data *);
+ int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
+ void (*realloc_skb)(struct xgbe_channel *);
+ void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+ void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
+ void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xgbe_hw_features {
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct xgbe_prv_data {
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ struct device *dev;
+
+ /* XGMAC/XPCS related mmio registers */
+ void __iomem *xgmac_regs; /* XGMAC CSRs */
+ void __iomem *xpcs_regs; /* XPCS MMD registers */
+
+ /* Overall device lock */
+ spinlock_t lock;
+
+ /* XPCS indirect addressing mutex */
+ struct mutex xpcs_mutex;
+
+ int irq_number;
+
+ struct xgbe_hw_if hw_if;
+ struct xgbe_desc_if desc_if;
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xgbe_channel *channel;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+ unsigned int tx_frames;
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
+ unsigned int rx_frames;
+
+ /* Current MTU */
+ unsigned int rx_buf_size;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+
+ /* MDIO settings */
+ struct module *phy_module;
+ char *mii_bus_id;
+ struct mii_bus *mii;
+ int mdio_mmd;
+ struct phy_device *phydev;
+ int default_autoneg;
+ int default_speed;
+
+ /* Current PHY settings */
+ phy_interface_t phy_mode;
+ int phy_link;
+ int phy_speed;
+ unsigned int phy_tx_pause;
+ unsigned int phy_rx_pause;
+
+ /* Netdev related settings */
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+ struct xgbe_mmc_stats mmc_stats;
+
+ /* System clock value used for Rx watchdog */
+ struct clk *sysclock;
+
+ /* Hardware features of the device */
+ struct xgbe_hw_features hw_feat;
+
+ /* Device restart work structure */
+ struct work_struct restart_work;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *xgbe_debugfs;
+
+ unsigned int debugfs_xgmac_reg;
+
+ unsigned int debugfs_xpcs_mmd;
+ unsigned int debugfs_xpcs_reg;
+#endif
+};
+
+/* Function prototypes*/
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
+struct net_device_ops *xgbe_get_netdev_ops(void);
+struct ethtool_ops *xgbe_get_ethtool_ops(void);
+
+int xgbe_mdio_register(struct xgbe_prv_data *);
+void xgbe_mdio_unregister(struct xgbe_prv_data *);
+void xgbe_dump_phy_registers(struct xgbe_prv_data *);
+void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
+ unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+ unsigned int);
+void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
+void xgbe_get_all_hw_features(struct xgbe_prv_data *);
+int xgbe_powerup(struct net_device *, unsigned int);
+int xgbe_powerdown(struct net_device *, unsigned int);
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
+
+#ifdef CONFIG_DEBUG_FS
+void xgbe_debugfs_init(struct xgbe_prv_data *);
+void xgbe_debugfs_exit(struct xgbe_prv_data *);
+#else
+static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
+static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_DESC_DUMP
+#define XGMAC_ENABLE_RX_DESC_DUMP
+#endif
+
+/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_PKT_DUMP
+#define XGMAC_ENABLE_RX_PKT_DUMP
+#endif
+
+/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
+#if 0
+#define YDEBUG
+#define YDEBUG_MDIO
+#endif
+
+/* For debug prints */
+#ifdef YDEBUG
+#define DBGPR(x...) pr_alert(x)
+#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
+#else
+#define DBGPR(x...) do { } while (0)
+#define DBGPHY_REGS(x...) do { } while (0)
+#endif
+
+#ifdef YDEBUG_MDIO
+#define DBGPR_MDIO(x...) pr_alert(x)
+#else
+#define DBGPR_MDIO(x...) do { } while (0)
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index d647a7d115ac..18e2faccebb0 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -13,6 +13,7 @@
* Vineet Gupta
*/
+#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -362,6 +363,15 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void arc_emac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ arc_emac_intr(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
/**
* arc_emac_open - Open the network device.
* @ndev: Pointer to the network device.
@@ -451,6 +461,41 @@ static int arc_emac_open(struct net_device *ndev)
}
/**
+ * arc_emac_set_rx_mode - Change the receive filtering mode.
+ * @ndev: Pointer to the network device.
+ *
+ * This function enables/disables promiscuous or all-multicast mode
+ * and updates the multicast filtering list of the network device.
+ */
+static void arc_emac_set_rx_mode(struct net_device *ndev)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+
+ if (ndev->flags & IFF_PROMISC) {
+ arc_reg_or(priv, R_CTRL, PROM_MASK);
+ } else {
+ arc_reg_clr(priv, R_CTRL, PROM_MASK);
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ arc_reg_set(priv, R_LAFL, ~0);
+ arc_reg_set(priv, R_LAFH, ~0);
+ } else {
+ struct netdev_hw_addr *ha;
+ unsigned int filter[2] = { 0, 0 };
+ int bit;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
+ filter[bit >> 5] |= 1 << (bit & 31);
+ }
+
+ arc_reg_set(priv, R_LAFL, filter[0]);
+ arc_reg_set(priv, R_LAFH, filter[1]);
+ }
+ }
+}
+
+/**
* arc_emac_stop - Close the network device.
* @ndev: Pointer to the network device.
*
@@ -620,6 +665,10 @@ static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_start_xmit = arc_emac_tx,
.ndo_set_mac_address = arc_emac_set_address,
.ndo_get_stats = arc_emac_stats,
+ .ndo_set_rx_mode = arc_emac_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = arc_emac_poll_controller,
+#endif
};
static int arc_emac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 17bb9ce96260..49faa97a30c3 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1302,7 +1302,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev->netdev_ops = &alx_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+ netdev->ethtool_ops = &alx_ethtool_ops;
netdev->irq = pdev->irq;
netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 859ea844ba0f..48694c239d5c 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -56,8 +56,8 @@ static int atl1c_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
@@ -305,5 +305,5 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
void atl1c_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops);
+ netdev->ethtool_ops = &atl1c_ethtool_ops;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 82b23861bf55..1be072f4afc2 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -57,8 +57,8 @@ static int atl1e_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
@@ -388,5 +388,5 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
void atl1e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops);
+ netdev->ethtool_ops = &atl1e_ethtool_ops;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index dfd0e91fa726..b460db7919a2 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3258,8 +3258,8 @@ static int atl1_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL)
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 78befb522a52..6746bd717146 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1396,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_setup_pcicmd(pdev);
netdev->netdev_ops = &atl2_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
+ netdev->ethtool_ops = &atl2_ethtool_ops;
netdev->watchdog_timeo = 5 * HZ;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -1769,8 +1769,8 @@ static int atl2_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 85dbddd03722..3e488094b073 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -150,4 +150,15 @@ config BGMAC
In case of using this driver on BCM4706 it's also requires to enable
BCMA_DRIVER_GMAC_CMN to make it work.
+config SYSTEMPORT
+ tristate "Broadcom SYSTEMPORT internal MAC support"
+ depends on OF
+ select MII
+ select PHYLIB
+ select FIXED_PHY if SYSTEMPORT=y
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset using an internal
+ Ethernet switch.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index fd639a0d4c7d..e2a958a657e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 05ba62589017..ca5a20a48b14 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2380,7 +2380,7 @@ static int b44_init_one(struct ssb_device *sdev,
netif_napi_add(dev, &bp->napi, b44_poll, 64);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->irq = sdev->irq;
- SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+ dev->ethtool_ops = &b44_ethtool_ops;
err = ssb_bus_powerup(sdev->bus, 0);
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7d11f5565d6..3e8d1a88ed3d 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
};
-#define BCM_ENET_STATS_LEN \
- (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
+#define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
static const u32 unused_mib_regs[] = {
ETH_MIB_TX_ALL_OCTETS,
@@ -1898,7 +1897,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
dev->netdev_ops = &bcm_enet_ops;
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
- SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
+ dev->ethtool_ops = &bcm_enet_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_netdev(dev);
@@ -2784,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
/* register netdevice */
dev->netdev_ops = &bcm_enetsw_ops;
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
- SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
+ dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
spin_lock_init(&priv->enetsw_mdio_lock);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644
index 000000000000..141160ef249a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -0,0 +1,1654 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmsysport.h"
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{ \
+ u32 reg = __raw_readl(priv->base + offset + off); \
+ return reg; \
+} \
+static inline void name##_writel(struct bcm_sysport_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ __raw_writel(val, priv->base + offset + off); \
+} \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
+/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
+ * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
+ */
+#define BCM_SYSPORT_INTR_L2(which) \
+static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
+ priv->irq##which##_mask &= ~(mask); \
+} \
+static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
+ priv->irq##which##_mask |= (mask); \
+} \
+
+BCM_SYSPORT_INTR_L2(0)
+BCM_SYSPORT_INTR_L2(1)
+
+/* Register accesses to GISB/RBUS registers are expensive (few hundred
+ * nanoseconds), so keep the check for 64-bits explicit here to save
+ * one register write per-packet on 32-bits platforms.
+ */
+static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
+ void __iomem *d,
+ dma_addr_t addr)
+{
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
+ d + DESC_ADDR_HI_STATUS_LEN);
+#endif
+ __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
+}
+
+static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
+ struct dma_desc *desc,
+ unsigned int port)
+{
+ /* Ports are latched, so write upper address first */
+ tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
+ tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
+}
+
+/* Ethtool operations */
+static int bcm_sysport_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ if (priv->rx_csum_en)
+ reg |= RXCHK_EN;
+ else
+ reg &= ~RXCHK_EN;
+
+ /* If UniMAC forwards CRC, we need to skip over it to get
+ * a valid CHK bit to be set in the per-packet status word
+ */
+ if (priv->rx_csum_en && priv->crc_fwd)
+ reg |= RXCHK_SKIP_FCS;
+ else
+ reg &= ~RXCHK_SKIP_FCS;
+
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ /* Hardware transmit checksum requires us to enable the Transmit status
+ * block prepended to the packet contents
+ */
+ priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (priv->tsb_en)
+ reg |= TSB_EN;
+ else
+ reg &= ~TSB_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ netdev_features_t wanted = dev->wanted_features;
+ int ret = 0;
+
+ if (changed & NETIF_F_RXCSUM)
+ ret = bcm_sysport_set_rx_csum(dev, wanted);
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ ret = bcm_sysport_set_tx_csum(dev, wanted);
+
+ return ret;
+}
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
+ /* general stats */
+ STAT_NETDEV(rx_packets),
+ STAT_NETDEV(tx_packets),
+ STAT_NETDEV(rx_bytes),
+ STAT_NETDEV(tx_bytes),
+ STAT_NETDEV(rx_errors),
+ STAT_NETDEV(tx_errors),
+ STAT_NETDEV(rx_dropped),
+ STAT_NETDEV(tx_dropped),
+ STAT_NETDEV(multicast),
+ /* UniMAC RSV counters */
+ STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+ STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+ STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+ STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+ STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+ STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+ STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+ STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+ STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+ STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+ STAT_MIB_RX("rx_pkts", mib.rx.pkt),
+ STAT_MIB_RX("rx_bytes", mib.rx.bytes),
+ STAT_MIB_RX("rx_multicast", mib.rx.mca),
+ STAT_MIB_RX("rx_broadcast", mib.rx.bca),
+ STAT_MIB_RX("rx_fcs", mib.rx.fcs),
+ STAT_MIB_RX("rx_control", mib.rx.cf),
+ STAT_MIB_RX("rx_pause", mib.rx.pf),
+ STAT_MIB_RX("rx_unknown", mib.rx.uo),
+ STAT_MIB_RX("rx_align", mib.rx.aln),
+ STAT_MIB_RX("rx_outrange", mib.rx.flr),
+ STAT_MIB_RX("rx_code", mib.rx.cde),
+ STAT_MIB_RX("rx_carrier", mib.rx.fcr),
+ STAT_MIB_RX("rx_oversize", mib.rx.ovr),
+ STAT_MIB_RX("rx_jabber", mib.rx.jbr),
+ STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
+ STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
+ STAT_MIB_RX("rx_unicast", mib.rx.uc),
+ STAT_MIB_RX("rx_ppp", mib.rx.ppp),
+ STAT_MIB_RX("rx_crc", mib.rx.rcrc),
+ /* UniMAC TSV counters */
+ STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+ STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+ STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+ STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+ STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+ STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+ STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+ STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+ STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+ STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+ STAT_MIB_TX("tx_pkts", mib.tx.pkts),
+ STAT_MIB_TX("tx_multicast", mib.tx.mca),
+ STAT_MIB_TX("tx_broadcast", mib.tx.bca),
+ STAT_MIB_TX("tx_pause", mib.tx.pf),
+ STAT_MIB_TX("tx_control", mib.tx.cf),
+ STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
+ STAT_MIB_TX("tx_oversize", mib.tx.ovr),
+ STAT_MIB_TX("tx_defer", mib.tx.drf),
+ STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
+ STAT_MIB_TX("tx_single_col", mib.tx.scl),
+ STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
+ STAT_MIB_TX("tx_late_col", mib.tx.lcl),
+ STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
+ STAT_MIB_TX("tx_frags", mib.tx.frg),
+ STAT_MIB_TX("tx_total_col", mib.tx.ncl),
+ STAT_MIB_TX("tx_jabber", mib.tx.jbr),
+ STAT_MIB_TX("tx_bytes", mib.tx.bytes),
+ STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
+ STAT_MIB_TX("tx_unicast", mib.tx.uc),
+ /* UniMAC RUNT counters */
+ STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+ STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+ STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+ STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ /* RXCHK misc statistics */
+ STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
+ STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
+ RXCHK_OTHER_DISC_CNTR),
+ /* RBUF misc statistics */
+ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
+ STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+};
+
+#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
+
+static void bcm_sysport_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, "0.1", sizeof(info->version));
+ strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ info->n_stats = BCM_SYSPORT_STATS_LEN;
+}
+
+static u32 bcm_sysport_get_msglvl(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = enable;
+}
+
+static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCM_SYSPORT_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcm_sysport_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcm_sysport_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
+{
+ int i, j = 0;
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ u8 offset = 0;
+ u32 val = 0;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ switch (s->type) {
+ case BCM_SYSPORT_STAT_NETDEV:
+ continue;
+ case BCM_SYSPORT_STAT_MIB_RX:
+ case BCM_SYSPORT_STAT_MIB_TX:
+ case BCM_SYSPORT_STAT_RUNT:
+ if (s->type != BCM_SYSPORT_STAT_MIB_RX)
+ offset = UMAC_MIB_STAT_OFFSET;
+ val = umac_readl(priv, UMAC_MIB_START + j + offset);
+ break;
+ case BCM_SYSPORT_STAT_RXCHK:
+ val = rxchk_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rxchk_writel(priv, 0, s->reg_offset);
+ break;
+ case BCM_SYSPORT_STAT_RBUF:
+ val = rbuf_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rbuf_writel(priv, 0, s->reg_offset);
+ break;
+ }
+
+ j += s->stat_sizeof;
+ p = (char *)priv + s->stat_offset;
+ *(u32 *)p = val;
+ }
+
+ netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
+}
+
+static void bcm_sysport_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_running(dev))
+ bcm_sysport_update_mib_counters(priv);
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ if (s->type == BCM_SYSPORT_STAT_NETDEV)
+ p = (char *)&dev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = *(u32 *)p;
+ }
+}
+
+static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
+{
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ dma_addr_t mapping;
+ int ret;
+
+ cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ if (!cb->skb) {
+ netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mapping = dma_map_single(kdev, cb->skb->data,
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ bcm_sysport_free_cb(cb);
+ netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
+ return ret;
+ }
+
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+ priv->rx_bd_assign_index++;
+ priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+ priv->rx_bd_assign_ptr = priv->rx_bds +
+ (priv->rx_bd_assign_index * DESC_SIZE);
+
+ netif_dbg(priv, rx_status, ndev, "RX refill\n");
+
+ return 0;
+}
+
+static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+ if (cb->skb)
+ continue;
+
+ ret = bcm_sysport_rx_refill(priv, cb);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* Poll the hardware for up to budget packets to process */
+static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
+ unsigned int budget)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ unsigned int processed = 0, to_process;
+ struct bcm_sysport_cb *cb;
+ struct sk_buff *skb;
+ unsigned int p_index;
+ u16 len, status;
+ struct bcm_rsb *rsb;
+
+ /* Determine how much we should process since last call */
+ p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ p_index &= RDMA_PROD_INDEX_MASK;
+
+ if (p_index < priv->rx_c_index)
+ to_process = (RDMA_CONS_INDEX_MASK + 1) -
+ priv->rx_c_index + p_index;
+ else
+ to_process = p_index - priv->rx_c_index;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p_index=%d rx_c_index=%d to_process=%d\n",
+ p_index, priv->rx_c_index, to_process);
+
+ while ((processed < to_process) &&
+ (processed < budget)) {
+
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+
+ /* Extract the Receive Status Block prepended */
+ rsb = (struct bcm_rsb *)skb->data;
+ len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
+ status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
+ DESC_STATUS_MASK;
+
+ processed++;
+ priv->rx_read_ptr++;
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+ p_index, priv->rx_c_index, priv->rx_read_ptr,
+ len, status);
+
+ if (unlikely(!skb)) {
+ netif_err(priv, rx_err, ndev, "out of memory!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ goto refill;
+ }
+
+ if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
+ netif_err(priv, rx_status, ndev, "fragmented packet!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
+ netif_err(priv, rx_err, ndev, "error packet\n");
+ if (status & RX_STATUS_OVFLOW)
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ skb_put(skb, len);
+
+ /* Hardware validated our checksum */
+ if (likely(status & DESC_L4_CSUM))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Hardware pre-pends packets with 2bytes before Ethernet
+ * header plus we have the Receive Status Block, strip off all
+ * of this from the SKB.
+ */
+ skb_pull(skb, sizeof(*rsb) + 2);
+ len -= (sizeof(*rsb) + 2);
+
+ /* UniMAC may forward CRC */
+ if (priv->crc_fwd) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+
+ napi_gro_receive(&priv->napi, skb);
+refill:
+ bcm_sysport_rx_refill(priv, cb);
+ }
+
+ return processed;
+}
+
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb,
+ unsigned int *bytes_compl,
+ unsigned int *pkts_compl)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+
+ if (cb->skb) {
+ ndev->stats.tx_bytes += cb->skb->len;
+ *bytes_compl += cb->skb->len;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ ndev->stats.tx_packets++;
+ (*pkts_compl)++;
+ bcm_sysport_free_cb(cb);
+ /* SKB fragment */
+ } else if (dma_unmap_addr(cb, dma_addr)) {
+ ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+ dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+}
+
+/* Reclaim queued SKBs for transmission completion, lockless version */
+static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ struct net_device *ndev = priv->netdev;
+ unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ u32 hw_ind;
+
+ txq = netdev_get_tx_queue(ndev, ring->index);
+
+ /* Compute how many descriptors have been processed since last call */
+ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+ c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+ ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
+
+ last_c_index = ring->c_index;
+ num_tx_cbs = ring->size;
+
+ c_index &= (num_tx_cbs - 1);
+
+ if (c_index >= last_c_index)
+ last_tx_cn = c_index - last_c_index;
+ else
+ last_tx_cn = num_tx_cbs - last_c_index + c_index;
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+ ring->index, c_index, last_tx_cn, last_c_index);
+
+ while (last_tx_cn-- > 0) {
+ cb = ring->cbs + last_c_index;
+ bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+
+ ring->desc_count++;
+ last_c_index++;
+ last_c_index &= (num_tx_cbs - 1);
+ }
+
+ ring->c_index = c_index;
+
+ if (netif_tx_queue_stopped(txq) && pkts_compl)
+ netif_tx_wake_queue(txq);
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+ ring->index, ring->c_index, pkts_compl, bytes_compl);
+
+ return pkts_compl;
+}
+
+/* Locked version of the per-ring TX reclaim routine */
+static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ unsigned int released;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ released = __bcm_sysport_tx_reclaim(priv, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return released;
+}
+
+static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_tx_ring *ring =
+ container_of(napi, struct bcm_sysport_tx_ring, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable TX interrupt */
+ intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ }
+
+ return work_done;
+}
+
+static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
+{
+ unsigned int q;
+
+ for (q = 0; q < priv->netdev->num_tx_queues; q++)
+ bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
+}
+
+static int bcm_sysport_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_priv *priv =
+ container_of(napi, struct bcm_sysport_priv, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_desc_rx(priv, budget);
+
+ priv->rx_c_index += work_done;
+ priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
+ rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable RX interrupts */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
+ }
+
+ return work_done;
+}
+
+
+/* RX and misc interrupt routine */
+static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq0_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious RX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ /* disable RX interrupts */
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ /* TX ring is full, perform a full reclaim since we do not know
+ * which one would trigger this interrupt
+ */
+ if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+ bcm_sysport_tx_reclaim_all(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* TX interrupt service routine */
+static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *txr;
+ unsigned int ring;
+
+ priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq1_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious TX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (ring = 0; ring < dev->num_tx_queues; ring++) {
+ if (!(priv->irq1_stat & BIT(ring)))
+ continue;
+
+ txr = &priv->tx_rings[ring];
+
+ if (likely(napi_schedule_prep(&txr->napi))) {
+ intrl2_1_mask_set(priv, BIT(ring));
+ __napi_schedule(&txr->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *nskb;
+ struct bcm_tsb *tsb;
+ u32 csum_info;
+ u8 ip_proto;
+ u16 csum_start;
+ u16 ip_ver;
+
+ /* Re-allocate SKB if needed */
+ if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
+ nskb = skb_realloc_headroom(skb, sizeof(*tsb));
+ dev_kfree_skb(skb);
+ if (!nskb) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ skb = nskb;
+ }
+
+ tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
+ /* Zero-out TSB by default */
+ memset(tsb, 0, sizeof(*tsb));
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip_ver = htons(skb->protocol);
+ switch (ip_ver) {
+ case ETH_P_IP:
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return 0;
+ }
+
+ /* Get the checksum offset and the L4 (transport) offset */
+ csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
+ csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
+ csum_info |= (csum_start << L4_PTR_SHIFT);
+
+ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+ csum_info |= L4_LENGTH_VALID;
+ if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+ csum_info |= L4_UDP;
+ } else
+ csum_info = 0;
+
+ tsb->l4_ptr_dest_map = csum_info;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ struct dma_desc *desc;
+ unsigned int skb_len;
+ unsigned long flags;
+ dma_addr_t mapping;
+ u32 len_status;
+ u16 queue;
+ int ret;
+
+ queue = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, queue);
+ ring = &priv->tx_rings[queue];
+
+ /* lock against tx reclaim in BH context and TX ring full interrupt */
+ spin_lock_irqsave(&ring->lock, flags);
+ if (unlikely(ring->desc_count == 0)) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev, "queue %d awake and ring full!\n", queue);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Insert TSB and checksum infos */
+ if (priv->tsb_en) {
+ ret = bcm_sysport_insert_tsb(skb, dev);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 68 bytes
+ * (including FCS and tag) because the length verification is done after
+ * the Broadcom tag is stripped off the ingress packet.
+ */
+ if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
+ ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+
+ mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(kdev, mapping)) {
+ netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+ skb->data, skb_len);
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* Remember the SKB for future freeing */
+ cb = &ring->cbs[ring->curr_desc];
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_unmap_len_set(cb, dma_len, skb_len);
+
+ /* Fetch a descriptor entry from our pool */
+ desc = ring->desc_cpu;
+
+ desc->addr_lo = lower_32_bits(mapping);
+ len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
+ len_status |= (skb_len << DESC_LEN_SHIFT);
+ len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
+ DESC_STATUS_SHIFT;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
+
+ ring->curr_desc++;
+ if (ring->curr_desc == ring->size)
+ ring->curr_desc = 0;
+ ring->desc_count--;
+
+ /* Ensure write completion of the descriptor status/length
+ * in DRAM before the System Port WRITE_PORT register latches
+ * the value
+ */
+ wmb();
+ desc->addr_status_len = len_status;
+ wmb();
+
+ /* Write this descriptor address to the RING write port */
+ tdma_port_write_desc_addr(priv, desc, ring->index);
+
+ /* Check ring space and update SW control flow */
+ if (ring->desc_count == 0)
+ netif_tx_stop_queue(txq);
+
+ netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
+ ring->index, ring->desc_count, ring->curr_desc);
+
+ ret = NETDEV_TX_OK;
+out:
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return ret;
+}
+
+static void bcm_sysport_tx_timeout(struct net_device *dev)
+{
+ netdev_warn(dev, "transmit timeout!\n");
+
+ dev->trans_start = jiffies;
+ dev->stats.tx_errors++;
+
+ netif_tx_wake_all_queues(dev);
+}
+
+/* phylib adjust link callback */
+static void bcm_sysport_adj_link(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned int changed = 0;
+ u32 cmd_bits = 0, reg;
+
+ if (priv->old_link != phydev->link) {
+ changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ if (priv->old_duplex != phydev->duplex) {
+ changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ cmd_bits = CMD_SPEED_2500;
+ break;
+ case SPEED_1000:
+ cmd_bits = CMD_SPEED_1000;
+ break;
+ case SPEED_100:
+ cmd_bits = CMD_SPEED_100;
+ break;
+ case SPEED_10:
+ cmd_bits = CMD_SPEED_10;
+ break;
+ default:
+ break;
+ }
+ cmd_bits <<= CMD_SPEED_SHIFT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ cmd_bits |= CMD_HD_EN;
+
+ if (priv->old_pause != phydev->pause) {
+ changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ if (!phydev->pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ if (changed) {
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
+ CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ phy_print_status(priv->phydev);
+ }
+}
+
+static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ size_t size;
+ void *p;
+ u32 reg;
+
+ /* Simple descriptors partitioning for now */
+ size = 256;
+
+ /* We just need one DMA descriptor which is DMA-able, since writing to
+ * the port will allocate a new descriptor in its internal linked-list
+ */
+ p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+ if (!p) {
+ netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
+ if (!ring->cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize SW view of the ring */
+ spin_lock_init(&ring->lock);
+ ring->priv = priv;
+ netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+ ring->index = index;
+ ring->size = size;
+ ring->alloc_size = ring->size;
+ ring->desc_cpu = p;
+ ring->desc_count = ring->size;
+ ring->curr_desc = 0;
+
+ /* Initialize HW ring */
+ tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
+ tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
+ tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+
+ /* Program the number of descriptors as MAX_THRESHOLD and half of
+ * its size for the hysteresis trigger
+ */
+ tdma_writel(priv, ring->size |
+ 1 << RING_HYST_THRESH_SHIFT,
+ TDMA_DESC_RING_MAX_HYST(index));
+
+ /* Enable the ring queue in the arbiter */
+ reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
+ reg |= (1 << index);
+ tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
+
+ napi_enable(&ring->napi);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "TDMA cfg, size=%d, desc_cpu=%p\n",
+ ring->size, ring->desc_cpu);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ u32 reg;
+
+ /* Caller should stop the TDMA engine */
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!(reg & TDMA_DISABLED))
+ netdev_warn(priv->netdev, "TDMA not stopped!\n");
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
+
+ bcm_sysport_tx_reclaim(priv, ring);
+
+ kfree(ring->cbs);
+ ring->cbs = NULL;
+
+ if (ring->desc_dma) {
+ dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+ ring->desc_dma = 0;
+ }
+ ring->size = 0;
+ ring->alloc_size = 0;
+
+ netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
+}
+
+/* RDMA helper */
+static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = rdma_readl(priv, RDMA_CONTROL);
+ if (enable)
+ reg |= RDMA_EN;
+ else
+ reg &= ~RDMA_EN;
+ rdma_writel(priv, reg, RDMA_CONTROL);
+
+ /* Poll for RMDA disabling completion */
+ do {
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!!(reg & RDMA_DISABLED) == !enable)
+ return 0;
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+/* TDMA helper */
+static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (enable)
+ reg |= TDMA_EN;
+ else
+ reg &= ~TDMA_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ /* Poll for TMDA disabling completion */
+ do {
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!!(reg & TDMA_DISABLED) == !enable)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
+{
+ u32 reg;
+ int ret;
+
+ /* Initialize SW view of the RX ring */
+ priv->num_rx_bds = NUM_RX_DESC;
+ priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
+ priv->rx_bd_assign_ptr = priv->rx_bds;
+ priv->rx_bd_assign_index = 0;
+ priv->rx_c_index = 0;
+ priv->rx_read_ptr = 0;
+ priv->rx_cbs = kzalloc(priv->num_rx_bds *
+ sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+ if (!priv->rx_cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = bcm_sysport_alloc_rx_bufs(priv);
+ if (ret) {
+ netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
+ return ret;
+ }
+
+ /* Initialize HW, ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ rdma_enable_set(priv, 0);
+
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
+ rdma_writel(priv, 0, RDMA_PROD_INDEX);
+ rdma_writel(priv, 0, RDMA_CONS_INDEX);
+ rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
+ RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
+ /* Operate the queue in ring mode */
+ rdma_writel(priv, 0, RDMA_START_ADDR_HI);
+ rdma_writel(priv, 0, RDMA_START_ADDR_LO);
+ rdma_writel(priv, 0, RDMA_END_ADDR_HI);
+ rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+
+ rdma_writel(priv, 1, RDMA_MBDONE_INTR);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+ priv->num_rx_bds, priv->rx_bds);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ unsigned int i;
+ u32 reg;
+
+ /* Caller should ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ netdev_warn(priv->netdev, "RDMA not stopped!\n");
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[i];
+ if (dma_unmap_addr(cb, dma_addr))
+ dma_unmap_single(&priv->pdev->dev,
+ dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ bcm_sysport_free_cb(cb);
+ }
+
+ kfree(priv->rx_cbs);
+ priv->rx_cbs = NULL;
+
+ netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
+}
+
+static void bcm_sysport_set_rx_mode(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (dev->flags & IFF_PROMISC)
+ reg |= CMD_PROMISC;
+ else
+ reg &= ~CMD_PROMISC;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* No support for ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI)
+ return;
+}
+
+static inline void umac_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (enable)
+ reg |= CMD_RX_EN | CMD_TX_EN;
+ else
+ reg &= ~(CMD_RX_EN | CMD_TX_EN);
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* UniMAC stops on a packet boundary, wait for a full-sized packet
+ * to be processed (1 msec).
+ */
+ if (enable == 0)
+ usleep_range(1000, 2000);
+}
+
+static inline int umac_reset(struct bcm_sysport_priv *priv)
+{
+ unsigned int timeout = 0;
+ u32 reg;
+ int ret = 0;
+
+ umac_writel(priv, 0, UMAC_CMD);
+ while (timeout++ < 1000) {
+ reg = umac_readl(priv, UMAC_CMD);
+ if (!(reg & CMD_SW_RESET))
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == 1000) {
+ dev_err(&priv->pdev->dev,
+ "timeout waiting for MAC to come out of reset\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
+ unsigned char *addr)
+{
+ umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3], UMAC_MAC0);
+ umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static void topctrl_flush(struct bcm_sysport_priv *priv)
+{
+ topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+ topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+ mdelay(1);
+ topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+ topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+}
+
+static int bcm_sysport_open(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* Reset UniMAC */
+ ret = umac_reset(priv);
+ if (ret) {
+ netdev_err(dev, "UniMAC reset failed\n");
+ return ret;
+ }
+
+ /* Flush TX and RX FIFOs at TOPCTRL level */
+ topctrl_flush(priv);
+
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(priv, 0);
+
+ /* Enable RBUF 2bytes alignment and Receive Status Block */
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+
+ /* Set maximum frame length */
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+ /* Set MAC address */
+ umac_set_hw_addr(priv, dev->dev_addr);
+
+ /* Read CRC forward */
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+
+ priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+ 0, priv->phy_interface);
+ if (!priv->phydev) {
+ netdev_err(dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ /* Reset house keeping link status */
+ priv->old_duplex = -1;
+ priv->old_link = -1;
+ priv->old_pause = -1;
+
+ /* mask all interrupts and request them */
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+ ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request RX interrupt\n");
+ goto out_phy_disconnect;
+ }
+
+ ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request TX interrupt\n");
+ goto out_free_irq0;
+ }
+
+ /* Initialize both hardware and software ring */
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ ret = bcm_sysport_init_tx_ring(priv, i);
+ if (ret) {
+ netdev_err(dev, "failed to initialize TX ring %d\n",
+ i);
+ goto out_free_tx_ring;
+ }
+ }
+
+ /* Initialize linked-list */
+ tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+ /* Initialize RX ring */
+ ret = bcm_sysport_init_rx_ring(priv);
+ if (ret) {
+ netdev_err(dev, "failed to initialize RX ring\n");
+ goto out_free_rx_ring;
+ }
+
+ /* Turn on RDMA */
+ ret = rdma_enable_set(priv, 1);
+ if (ret)
+ goto out_free_rx_ring;
+
+ /* Enable RX interrupt and TX ring full interrupt */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+ /* Turn on TDMA */
+ ret = tdma_enable_set(priv, 1);
+ if (ret)
+ goto out_clear_rx_int;
+
+ /* Enable NAPI */
+ napi_enable(&priv->napi);
+
+ /* Turn on UniMAC TX/RX */
+ umac_enable_set(priv, 1);
+
+ phy_start(priv->phydev);
+
+ /* Enable TX interrupts for the 32 TXQs */
+ intrl2_1_mask_clear(priv, 0xffffffff);
+
+ /* Last call before we start the real business */
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+out_clear_rx_int:
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+out_free_rx_ring:
+ bcm_sysport_fini_rx_ring(priv);
+out_free_tx_ring:
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ free_irq(priv->irq1, dev);
+out_free_irq0:
+ free_irq(priv->irq0, dev);
+out_phy_disconnect:
+ phy_disconnect(priv->phydev);
+ return ret;
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* stop all software from updating hardware */
+ netif_tx_stop_all_queues(dev);
+ napi_disable(&priv->napi);
+ phy_stop(priv->phydev);
+
+ /* mask all interrupts */
+ intrl2_0_mask_set(priv, 0xffffffff);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_mask_set(priv, 0xffffffff);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ /* Disable UniMAC RX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_RX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ ret = tdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling RDMA\n");
+ return ret;
+ }
+
+ /* Wait for a maximum packet size to be drained */
+ usleep_range(2000, 3000);
+
+ ret = rdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling TDMA\n");
+ return ret;
+ }
+
+ /* Disable UniMAC TX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_TX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* Free RX/TX rings SW structures */
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ bcm_sysport_fini_rx_ring(priv);
+
+ free_irq(priv->irq0, dev);
+ free_irq(priv->irq1, dev);
+
+ /* Disconnect from PHY */
+ phy_disconnect(priv->phydev);
+
+ return 0;
+}
+
+static struct ethtool_ops bcm_sysport_ethtool_ops = {
+ .get_settings = bcm_sysport_get_settings,
+ .set_settings = bcm_sysport_set_settings,
+ .get_drvinfo = bcm_sysport_get_drvinfo,
+ .get_msglevel = bcm_sysport_get_msglvl,
+ .set_msglevel = bcm_sysport_set_msglvl,
+ .get_link = ethtool_op_get_link,
+ .get_strings = bcm_sysport_get_strings,
+ .get_ethtool_stats = bcm_sysport_get_stats,
+ .get_sset_count = bcm_sysport_get_sset_count,
+};
+
+static const struct net_device_ops bcm_sysport_netdev_ops = {
+ .ndo_start_xmit = bcm_sysport_xmit,
+ .ndo_tx_timeout = bcm_sysport_tx_timeout,
+ .ndo_open = bcm_sysport_open,
+ .ndo_stop = bcm_sysport_stop,
+ .ndo_set_features = bcm_sysport_set_features,
+ .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
+};
+
+#define REV_FMT "v%2x.%02x"
+
+static int bcm_sysport_probe(struct platform_device *pdev)
+{
+ struct bcm_sysport_priv *priv;
+ struct device_node *dn;
+ struct net_device *dev;
+ const void *macaddr;
+ struct resource *r;
+ u32 txq, rxq;
+ int ret;
+
+ dn = pdev->dev.of_node;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* Read the Transmit/Receive Queue properties */
+ if (of_property_read_u32(dn, "systemport,num-txq", &txq))
+ txq = TDMA_NUM_RINGS;
+ if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
+ rxq = 1;
+
+ dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
+ if (!dev)
+ return -ENOMEM;
+
+ /* Initialize private members */
+ priv = netdev_priv(dev);
+
+ priv->irq0 = platform_get_irq(pdev, 0);
+ priv->irq1 = platform_get_irq(pdev, 1);
+ if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+ dev_err(&pdev->dev, "invalid interrupts\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ priv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto err;
+ }
+
+ priv->netdev = dev;
+ priv->pdev = pdev;
+
+ priv->phy_interface = of_get_phy_mode(dn);
+ /* Default to GMII interface mode */
+ if (priv->phy_interface < 0)
+ priv->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register fixed PHY\n");
+ goto err;
+ }
+
+ priv->phy_dn = dn;
+ }
+
+ /* Initialize netdevice members */
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr || !is_valid_ether_addr(macaddr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ random_ether_addr(dev->dev_addr);
+ } else {
+ ether_addr_copy(dev->dev_addr, macaddr);
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev_set_drvdata(&pdev->dev, dev);
+ dev->ethtool_ops = &bcm_sysport_ethtool_ops;
+ dev->netdev_ops = &bcm_sysport_netdev_ops;
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+
+ /* HW supported features, none enabled by default */
+ dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ /* Set the needed headroom once and for all */
+ BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
+ dev->needed_headroom += sizeof(struct bcm_tsb);
+
+ /* We are interfaced to a switch which handles the multicast
+ * filtering for us, so we do not support programming any
+ * multicast hash table in this Ethernet MAC.
+ */
+ dev->flags &= ~IFF_MULTICAST;
+
+ /* libphy will adjust the link state accordingly */
+ netif_carrier_off(dev);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register net_device\n");
+ goto err;
+ }
+
+ priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+ dev_info(&pdev->dev,
+ "Broadcom SYSTEMPORT" REV_FMT
+ " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+ priv->base, priv->irq0, priv->irq1, txq, rxq);
+
+ return 0;
+err:
+ free_netdev(dev);
+ return ret;
+}
+
+static int bcm_sysport_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+
+ /* Not much to do, ndo_close has been called
+ * and we use managed allocations
+ */
+ unregister_netdev(dev);
+ free_netdev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+ { .compatible = "brcm,systemport-v1.00" },
+ { .compatible = "brcm,systemport" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm_sysport_driver = {
+ .probe = bcm_sysport_probe,
+ .remove = bcm_sysport_remove,
+ .driver = {
+ .name = "brcm-systemport",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_sysport_of_match,
+ },
+};
+module_platform_driver(bcm_sysport_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
+MODULE_ALIAS("platform:brcm-systemport");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644
index 000000000000..281c08246037
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -0,0 +1,678 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BCM_SYSPORT_H
+#define __BCM_SYSPORT_H
+
+#include <linux/if_vlan.h>
+
+/* Receive/transmit descriptor format */
+#define DESC_ADDR_HI_STATUS_LEN 0x00
+#define DESC_ADDR_HI_SHIFT 0
+#define DESC_ADDR_HI_MASK 0xff
+#define DESC_STATUS_SHIFT 8
+#define DESC_STATUS_MASK 0x3ff
+#define DESC_LEN_SHIFT 18
+#define DESC_LEN_MASK 0x7fff
+#define DESC_ADDR_LO 0x04
+
+/* HW supports 40-bit addressing hence the */
+#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32))
+
+/* Default RX buffer allocation size */
+#define RX_BUF_LENGTH 2048
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN 4
+#define ENET_PAD 10
+#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+ ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+
+/* Transmit status block */
+struct bcm_tsb {
+ u32 pcp_dei_vid;
+#define PCP_DEI_MASK 0xf
+#define VID_SHIFT 4
+#define VID_MASK 0xfff
+ u32 l4_ptr_dest_map;
+#define L4_CSUM_PTR_MASK 0x1ff
+#define L4_PTR_SHIFT 9
+#define L4_PTR_MASK 0x1ff
+#define L4_UDP (1 << 18)
+#define L4_LENGTH_VALID (1 << 19)
+#define DEST_MAP_SHIFT 20
+#define DEST_MAP_MASK 0x1ff
+};
+
+/* Receive status block uses the same
+ * definitions as the DMA descriptor
+ */
+struct bcm_rsb {
+ u32 rx_status_len;
+ u32 brcm_egress_tag;
+};
+
+/* Common Receive/Transmit status bits */
+#define DESC_L4_CSUM (1 << 7)
+#define DESC_SOP (1 << 8)
+#define DESC_EOP (1 << 9)
+
+/* Receive Status bits */
+#define RX_STATUS_UCAST 0
+#define RX_STATUS_BCAST 0x04
+#define RX_STATUS_MCAST 0x08
+#define RX_STATUS_L2_MCAST 0x0c
+#define RX_STATUS_ERR (1 << 4)
+#define RX_STATUS_OVFLOW (1 << 5)
+#define RX_STATUS_PARSE_FAIL (1 << 6)
+
+/* Transmit Status bits */
+#define TX_STATUS_VLAN_NO_ACT 0x00
+#define TX_STATUS_VLAN_PCP_TSB 0x01
+#define TX_STATUS_VLAN_QUEUE 0x02
+#define TX_STATUS_VLAN_VID_TSB 0x03
+#define TX_STATUS_OWR_CRC (1 << 2)
+#define TX_STATUS_APP_CRC (1 << 3)
+#define TX_STATUS_BRCM_TAG_NO_ACT 0
+#define TX_STATUS_BRCM_TAG_ZERO 0x10
+#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20
+#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30
+#define TX_STATUS_SKIP_BYTES (1 << 6)
+
+/* Specific register definitions */
+#define SYS_PORT_TOPCTRL_OFFSET 0
+#define REV_CNTL 0x00
+#define REV_MASK 0xffff
+
+#define RX_FLUSH_CNTL 0x04
+#define RX_FLUSH (1 << 0)
+
+#define TX_FLUSH_CNTL 0x08
+#define TX_FLUSH (1 << 0)
+
+#define MISC_CNTL 0x0c
+#define SYS_CLK_SEL (1 << 0)
+#define TDMA_EOP_SEL (1 << 1)
+
+/* Level-2 Interrupt controller offsets and defines */
+#define SYS_PORT_INTRL2_0_OFFSET 0x200
+#define SYS_PORT_INTRL2_1_OFFSET 0x240
+#define INTRL2_CPU_STATUS 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0c
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* Level-2 instance 0 interrupt bits */
+#define INTRL2_0_GISB_ERR (1 << 0)
+#define INTRL2_0_RBUF_OVFLOW (1 << 1)
+#define INTRL2_0_TBUF_UNDFLOW (1 << 2)
+#define INTRL2_0_MPD (1 << 3)
+#define INTRL2_0_BRCM_MATCH_TAG (1 << 4)
+#define INTRL2_0_RDMA_MBDONE (1 << 5)
+#define INTRL2_0_OVER_MAX_THRESH (1 << 6)
+#define INTRL2_0_BELOW_HYST_THRESH (1 << 7)
+#define INTRL2_0_FREE_LIST_EMPTY (1 << 8)
+#define INTRL2_0_TX_RING_FULL (1 << 9)
+#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
+#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
+
+/* RXCHK offset and defines */
+#define SYS_PORT_RXCHK_OFFSET 0x300
+
+#define RXCHK_CONTROL 0x00
+#define RXCHK_EN (1 << 0)
+#define RXCHK_SKIP_FCS (1 << 1)
+#define RXCHK_BAD_CSUM_DIS (1 << 2)
+#define RXCHK_BRCM_TAG_EN (1 << 3)
+#define RXCHK_BRCM_TAG_MATCH_SHIFT 4
+#define RXCHK_BRCM_TAG_MATCH_MASK 0xff
+#define RXCHK_PARSE_TNL (1 << 12)
+#define RXCHK_VIOL_EN (1 << 13)
+#define RXCHK_VIOL_DIS (1 << 14)
+#define RXCHK_INCOM_PKT (1 << 15)
+#define RXCHK_V6_DUPEXT_EN (1 << 16)
+#define RXCHK_V6_DUPEXT_DIS (1 << 17)
+#define RXCHK_ETHERTYPE_DIS (1 << 18)
+#define RXCHK_L2_HDR_DIS (1 << 19)
+#define RXCHK_L3_HDR_DIS (1 << 20)
+#define RXCHK_MAC_RX_ERR_DIS (1 << 21)
+#define RXCHK_PARSE_AUTH (1 << 22)
+
+#define RXCHK_BRCM_TAG0 0x04
+#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG0_MASK 0x24
+#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
+#define RXCHK_ETHERTYPE 0x48
+#define RXCHK_BAD_CSUM_CNTR 0x4C
+#define RXCHK_OTHER_DISC_CNTR 0x50
+
+/* TXCHCK offsets and defines */
+#define SYS_PORT_TXCHK_OFFSET 0x380
+#define TXCHK_PKT_RDY_THRESH 0x00
+
+/* Receive buffer offset and defines */
+#define SYS_PORT_RBUF_OFFSET 0x400
+
+#define RBUF_CONTROL 0x00
+#define RBUF_RSB_EN (1 << 0)
+#define RBUF_4B_ALGN (1 << 1)
+#define RBUF_BRCM_TAG_STRIP (1 << 2)
+#define RBUF_BAD_PKT_DISC (1 << 3)
+#define RBUF_RESUME_THRESH_SHIFT 4
+#define RBUF_RESUME_THRESH_MASK 0xff
+#define RBUF_OK_TO_SEND_SHIFT 12
+#define RBUF_OK_TO_SEND_MASK 0xff
+#define RBUF_CRC_REPLACE (1 << 20)
+#define RBUF_OK_TO_SEND_MODE (1 << 21)
+#define RBUF_RSB_SWAP (1 << 22)
+#define RBUF_ACPI_EN (1 << 23)
+
+#define RBUF_PKT_RDY_THRESH 0x04
+
+#define RBUF_STATUS 0x08
+#define RBUF_WOL_MODE (1 << 0)
+#define RBUF_MPD (1 << 1)
+#define RBUF_ACPI (1 << 2)
+
+#define RBUF_OVFL_DISC_CNTR 0x0c
+#define RBUF_ERR_PKT_CNTR 0x10
+
+/* Transmit buffer offset and defines */
+#define SYS_PORT_TBUF_OFFSET 0x600
+
+#define TBUF_CONTROL 0x00
+#define TBUF_BP_EN (1 << 0)
+#define TBUF_MAX_PKT_THRESH_SHIFT 1
+#define TBUF_MAX_PKT_THRESH_MASK 0x1f
+#define TBUF_FULL_THRESH_SHIFT 8
+#define TBUF_FULL_THRESH_MASK 0x1f
+
+/* UniMAC offset and defines */
+#define SYS_PORT_UMAC_OFFSET 0x800
+
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+
+#define UMAC_TX_FLUSH 0x334
+
+#define UMAC_MIB_START 0x400
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define UMAC_MIB_STAT_OFFSET 0xc
+
+#define UMAC_MIB_CTRL 0x580
+#define MIB_RX_CNT_RST (1 << 0)
+#define MIB_RUNT_CNT_RST (1 << 1)
+#define MIB_TX_CNT_RST (1 << 2)
+#define UMAC_MDF_CTRL 0x650
+#define UMAC_MDF_ADDR 0x654
+
+/* Receive DMA offset and defines */
+#define SYS_PORT_RDMA_OFFSET 0x2000
+
+#define RDMA_CONTROL 0x1000
+#define RDMA_EN (1 << 0)
+#define RDMA_RING_CFG (1 << 1)
+#define RDMA_DISC_EN (1 << 2)
+#define RDMA_BUF_DATA_OFFSET_SHIFT 4
+#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff
+
+#define RDMA_STATUS 0x1004
+#define RDMA_DISABLED (1 << 0)
+#define RDMA_DESC_RAM_INIT_BUSY (1 << 1)
+#define RDMA_BP_STATUS (1 << 2)
+
+#define RDMA_SCB_BURST_SIZE 0x1008
+
+#define RDMA_RING_BUF_SIZE 0x100c
+#define RDMA_RING_SIZE_SHIFT 16
+
+#define RDMA_WRITE_PTR_HI 0x1010
+#define RDMA_WRITE_PTR_LO 0x1014
+#define RDMA_PROD_INDEX 0x1018
+#define RDMA_PROD_INDEX_MASK 0xffff
+
+#define RDMA_CONS_INDEX 0x101c
+#define RDMA_CONS_INDEX_MASK 0xffff
+
+#define RDMA_START_ADDR_HI 0x1020
+#define RDMA_START_ADDR_LO 0x1024
+#define RDMA_END_ADDR_HI 0x1028
+#define RDMA_END_ADDR_LO 0x102c
+
+#define RDMA_MBDONE_INTR 0x1030
+#define RDMA_INTR_THRESH_MASK 0xff
+#define RDMA_TIMEOUT_SHIFT 16
+#define RDMA_TIMEOUT_MASK 0xffff
+
+#define RDMA_XON_XOFF_THRESH 0x1034
+#define RDMA_XON_XOFF_THRESH_MASK 0xffff
+#define RDMA_XOFF_THRESH_SHIFT 16
+
+#define RDMA_READ_PTR_HI 0x1038
+#define RDMA_READ_PTR_LO 0x103c
+
+#define RDMA_OVERRIDE 0x1040
+#define RDMA_LE_MODE (1 << 0)
+#define RDMA_REG_MODE (1 << 1)
+
+#define RDMA_TEST 0x1044
+#define RDMA_TP_OUT_SEL (1 << 0)
+#define RDMA_MEM_SEL (1 << 1)
+
+#define RDMA_DEBUG 0x1048
+
+/* Transmit DMA offset and defines */
+#define TDMA_NUM_RINGS 32 /* rings = queues */
+#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */
+
+#define SYS_PORT_TDMA_OFFSET 0x4000
+#define TDMA_WRITE_PORT_OFFSET 0x0000
+#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \
+ (i) * sizeof(u32))
+
+#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \
+ (TDMA_NUM_RINGS * sizeof(u32)))
+
+/* Register offsets and defines relatives to a specific ring number */
+#define RING_HEAD_TAIL_PTR 0x00
+#define RING_HEAD_MASK 0x7ff
+#define RING_TAIL_SHIFT 11
+#define RING_TAIL_MASK 0x7ff
+#define RING_FLUSH (1 << 24)
+#define RING_EN (1 << 25)
+
+#define RING_COUNT 0x04
+#define RING_COUNT_MASK 0x7ff
+#define RING_BUFF_DONE_SHIFT 11
+#define RING_BUFF_DONE_MASK 0x7ff
+
+#define RING_MAX_HYST 0x08
+#define RING_MAX_THRESH_MASK 0x7ff
+#define RING_HYST_THRESH_SHIFT 11
+#define RING_HYST_THRESH_MASK 0x7ff
+
+#define RING_INTR_CONTROL 0x0c
+#define RING_INTR_THRESH_MASK 0x7ff
+#define RING_EMPTY_INTR_EN (1 << 15)
+#define RING_TIMEOUT_SHIFT 16
+#define RING_TIMEOUT_MASK 0xffff
+
+#define RING_PROD_CONS_INDEX 0x10
+#define RING_PROD_INDEX_MASK 0xffff
+#define RING_CONS_INDEX_SHIFT 16
+#define RING_CONS_INDEX_MASK 0xffff
+
+#define RING_MAPPING 0x14
+#define RING_QID_MASK 0x3
+#define RING_PORT_ID_SHIFT 3
+#define RING_PORT_ID_MASK 0x7
+#define RING_IGNORE_STATUS (1 << 6)
+#define RING_FAILOVER_EN (1 << 7)
+#define RING_CREDIT_SHIFT 8
+#define RING_CREDIT_MASK 0xffff
+
+#define RING_PCP_DEI_VID 0x18
+#define RING_VID_MASK 0x7ff
+#define RING_DEI (1 << 12)
+#define RING_PCP_SHIFT 13
+#define RING_PCP_MASK 0x7
+#define RING_PKT_SIZE_ADJ_SHIFT 16
+#define RING_PKT_SIZE_ADJ_MASK 0xf
+
+#define TDMA_DESC_RING_SIZE 28
+
+/* Defininition for a given TX ring base address */
+#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \
+ ((i) * TDMA_DESC_RING_SIZE))
+
+/* Ring indexed register addreses */
+#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_HEAD_TAIL_PTR)
+#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_COUNT)
+#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAX_HYST)
+#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_INTR_CONTROL)
+#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
+ (TDMA_DESC_RING_BASE(i) + \
+ RING_PROD_CONS_INDEX)
+#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAPPING)
+#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_PCP_DEI_VID)
+
+#define TDMA_CONTROL 0x600
+#define TDMA_EN (1 << 0)
+#define TSB_EN (1 << 1)
+#define TSB_SWAP (1 << 2)
+#define ACB_ALGO (1 << 3)
+#define BUF_DATA_OFFSET_SHIFT 4
+#define BUF_DATA_OFFSET_MASK 0x3ff
+#define VLAN_EN (1 << 14)
+#define SW_BRCM_TAG (1 << 15)
+#define WNC_KPT_SIZE_UPDATE (1 << 16)
+#define SYNC_PKT_SIZE (1 << 17)
+#define ACH_TXDONE_DELAY_SHIFT 18
+#define ACH_TXDONE_DELAY_MASK 0xff
+
+#define TDMA_STATUS 0x604
+#define TDMA_DISABLED (1 << 0)
+#define TDMA_LL_RAM_INIT_BUSY (1 << 1)
+
+#define TDMA_SCB_BURST_SIZE 0x608
+#define TDMA_OVER_MAX_THRESH_STATUS 0x60c
+#define TDMA_OVER_HYST_THRESH_STATUS 0x610
+#define TDMA_TPID 0x614
+
+#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618
+#define TDMA_FREE_HEAD_MASK 0x7ff
+#define TDMA_FREE_TAIL_SHIFT 11
+#define TDMA_FREE_TAIL_MASK 0x7ff
+
+#define TDMA_FREE_LIST_COUNT 0x61c
+#define TDMA_FREE_LIST_COUNT_MASK 0x7ff
+
+#define TDMA_TIER2_ARB_CTRL 0x620
+#define TDMA_ARB_MODE_RR 0
+#define TDMA_ARB_MODE_WEIGHT_RR 0x1
+#define TDMA_ARB_MODE_STRICT 0x2
+#define TDMA_ARB_MODE_DEFICIT_RR 0x3
+#define TDMA_CREDIT_SHIFT 4
+#define TDMA_CREDIT_MASK 0xffff
+
+#define TDMA_TIER1_ARB_0_CTRL 0x624
+#define TDMA_ARB_EN (1 << 0)
+
+#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628
+#define TDMA_TIER1_ARB_1_CTRL 0x62c
+#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630
+#define TDMA_TIER1_ARB_2_CTRL 0x634
+#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638
+#define TDMA_TIER1_ARB_3_CTRL 0x63c
+#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640
+
+#define TDMA_SCB_ENDIAN_OVERRIDE 0x644
+#define TDMA_LE_MODE (1 << 0)
+#define TDMA_REG_MODE (1 << 1)
+
+#define TDMA_TEST 0x648
+#define TDMA_TP_OUT_SEL (1 << 0)
+#define TDMA_MEM_TM (1 << 1)
+
+#define TDMA_DEBUG 0x64c
+
+/* Transmit/Receive descriptor */
+struct dma_desc {
+ u32 addr_status_len;
+ u32 addr_lo;
+};
+
+/* Number of Receive hardware descriptor words */
+#define NUM_HW_RX_DESC_WORDS 1024
+/* Real number of usable descriptors */
+#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+
+/* Internal linked-list RAM has up to 1536 entries */
+#define NUM_TX_DESC 1536
+
+#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
+
+/* Rx/Tx common counter group.*/
+struct bcm_sysport_pkt_counters {
+ u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
+ u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
+ u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
+ u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
+ u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
+ u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
+ u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
+ u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
+ u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
+ u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcm_sysport_rx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkt; /* RO (0x428) Received pkt count*/
+ u32 bytes; /* RO Received byte count */
+ u32 mca; /* RO # of Received multicast pkt */
+ u32 bca; /* RO # of Receive broadcast pkt */
+ u32 fcs; /* RO # of Received FCS error */
+ u32 cf; /* RO # of Received control frame pkt*/
+ u32 pf; /* RO # of Received pause frame pkt */
+ u32 uo; /* RO # of unknown op code pkt */
+ u32 aln; /* RO # of alignment error count */
+ u32 flr; /* RO # of frame length out of range count */
+ u32 cde; /* RO # of code error pkt */
+ u32 fcr; /* RO # of carrier sense error pkt */
+ u32 ovr; /* RO # of oversize pkt*/
+ u32 jbr; /* RO # of jabber count */
+ u32 mtue; /* RO # of MTU error pkt*/
+ u32 pok; /* RO # of Received good pkt */
+ u32 uc; /* RO # of unicast pkt */
+ u32 ppp; /* RO # of PPP pkt */
+ u32 rcrc; /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcm_sysport_tx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkts; /* RO (0x4a8) Transmited pkt */
+ u32 mca; /* RO # of xmited multicast pkt */
+ u32 bca; /* RO # of xmited broadcast pkt */
+ u32 pf; /* RO # of xmited pause frame count */
+ u32 cf; /* RO # of xmited control frame count */
+ u32 fcs; /* RO # of xmited FCS error count */
+ u32 ovr; /* RO # of xmited oversize pkt */
+ u32 drf; /* RO # of xmited deferral pkt */
+ u32 edf; /* RO # of xmited Excessive deferral pkt*/
+ u32 scl; /* RO # of xmited single collision pkt */
+ u32 mcl; /* RO # of xmited multiple collision pkt*/
+ u32 lcl; /* RO # of xmited late collision pkt */
+ u32 ecl; /* RO # of xmited excessive collision pkt*/
+ u32 frg; /* RO # of xmited fragments pkt*/
+ u32 ncl; /* RO # of xmited total collision count */
+ u32 jbr; /* RO # of xmited jabber count*/
+ u32 bytes; /* RO # of xmited byte count */
+ u32 pok; /* RO # of xmited good pkt */
+ u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcm_sysport_mib {
+ struct bcm_sysport_rx_counters rx;
+ struct bcm_sysport_tx_counters tx;
+ u32 rx_runt_cnt;
+ u32 rx_runt_fcs;
+ u32 rx_runt_fcs_align;
+ u32 rx_runt_bytes;
+ u32 rxchk_bad_csum;
+ u32 rxchk_other_pkt_disc;
+ u32 rbuf_ovflow_cnt;
+ u32 rbuf_err_cnt;
+};
+
+/* HW maintains a large list of counters */
+enum bcm_sysport_stat_type {
+ BCM_SYSPORT_STAT_NETDEV = -1,
+ BCM_SYSPORT_STAT_MIB_RX,
+ BCM_SYSPORT_STAT_MIB_TX,
+ BCM_SYSPORT_STAT_RUNT,
+ BCM_SYSPORT_STAT_RXCHK,
+ BCM_SYSPORT_STAT_RBUF,
+};
+
+/* Macros to help define ethtool statistics */
+#define STAT_NETDEV(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+ .stat_offset = offsetof(struct net_device_stats, m), \
+ .type = BCM_SYSPORT_STAT_NETDEV, \
+}
+
+#define STAT_MIB(str, m, _type) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = _type, \
+}
+
+#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
+#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
+#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+
+#define STAT_RXCHK(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RXCHK, \
+ .reg_offset = ofs, \
+}
+
+#define STAT_RBUF(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RBUF, \
+ .reg_offset = ofs, \
+}
+
+struct bcm_sysport_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_sizeof;
+ int stat_offset;
+ enum bcm_sysport_stat_type type;
+ /* reg offset from UMAC base for misc counters */
+ u16 reg_offset;
+};
+
+/* Software house keeping helper structure */
+struct bcm_sysport_cb {
+ struct sk_buff *skb; /* SKB for RX packets */
+ void __iomem *bd_addr; /* Buffer descriptor PHYS addr */
+
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* Software view of the TX ring */
+struct bcm_sysport_tx_ring {
+ spinlock_t lock; /* Ring lock for tx reclaim/xmit */
+ struct napi_struct napi; /* NAPI per tx queue */
+ dma_addr_t desc_dma; /* DMA cookie */
+ unsigned int index; /* Ring index */
+ unsigned int size; /* Ring current size */
+ unsigned int alloc_size; /* Ring one-time allocated size */
+ unsigned int desc_count; /* Number of descriptors */
+ unsigned int curr_desc; /* Current descriptor */
+ unsigned int c_index; /* Last consumer index */
+ unsigned int p_index; /* Current producer index */
+ struct bcm_sysport_cb *cbs; /* Transmit control blocks */
+ struct dma_desc *desc_cpu; /* CPU view of the descriptor */
+ struct bcm_sysport_priv *priv; /* private context backpointer */
+};
+
+/* Driver private structure */
+struct bcm_sysport_priv {
+ void __iomem *base;
+ u32 irq0_stat;
+ u32 irq0_mask;
+ u32 irq1_stat;
+ u32 irq1_mask;
+ struct napi_struct napi ____cacheline_aligned;
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ int irq0;
+ int irq1;
+
+ /* Transmit rings */
+ struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+
+ /* Receive queue */
+ void __iomem *rx_bds;
+ void __iomem *rx_bd_assign_ptr;
+ unsigned int rx_bd_assign_index;
+ struct bcm_sysport_cb *rx_cbs;
+ unsigned int num_rx_bds;
+ unsigned int rx_read_ptr;
+ unsigned int rx_c_index;
+
+ /* PHY device */
+ struct device_node *phy_dn;
+ struct phy_device *phydev;
+ phy_interface_t phy_interface;
+ int old_pause;
+ int old_link;
+ int old_duplex;
+
+ /* Misc fields */
+ unsigned int rx_csum_en:1;
+ unsigned int tsb_en:1;
+ unsigned int crc_fwd:1;
+ u16 rev;
+
+ /* MIB related fields */
+ struct bcm_sysport_mib mib;
+
+ /* Ethtool */
+ u32 msg_enable;
+};
+#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0297a79a38e1..05c6af6c418f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1436,7 +1436,7 @@ static int bgmac_probe(struct bcma_device *core)
return -ENOMEM;
net_dev->netdev_ops = &bgmac_netdev_ops;
net_dev->irq = core->irq;
- SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
+ net_dev->ethtool_ops = &bgmac_ethtool_ops;
bgmac = netdev_priv(net_dev);
bgmac->net_dev = net_dev;
bgmac->core = core;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 0ab83708b6a1..67d2b0047371 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6916,8 +6916,8 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
}
else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
spin_unlock_bh(&bp->phy_lock);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4d8f8aba0ea5..4cab09d3f807 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dd57c7c5a3da..47c5814114e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -906,6 +906,18 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
+ /* A rmb() is required to ensure that the CQE is not read
+ * before it is written by the adapter DMA. PCI ordering
+ * rules will make sure the other fields are written before
+ * the marker at the end of struct eth_fast_path_rx_cqe
+ * but without rmb() a weakly ordered processor can process
+ * stale data. Without the barrier TPA state-machine might
+ * enter inconsistent state and kernel stack might be
+ * provided with incorrect packet description - these lead
+ * to various kernel crashed.
+ */
+ rmb();
+
cqe_fp_flags = cqe_fp->type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 3448cc033ca5..571427c7226b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 97ea5421dd96..51a952c51cb1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 804b8f64463e..c6939ecb02c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index b6de05e3149b..bd0600cf7266 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -3316,7 +3316,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
+static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -3340,14 +3340,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
return 0;
}
-static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
- * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+ * The same as in bnx2x_get_rxfh: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
* while indir->ring_index points to an array of u32.
*
@@ -3471,8 +3472,8 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_rxnfc = bnx2x_get_rxnfc,
.set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
- .get_rxfh_indir = bnx2x_get_rxfh_indir,
- .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3498,16 +3499,14 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_rxnfc = bnx2x_get_rxnfc,
.set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
- .get_rxfh_indir = bnx2x_get_rxfh_indir,
- .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
};
void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- if (IS_PF(bp))
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
- else /* vf */
- SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
+ netdev->ethtool_ops = (IS_PF(bp)) ?
+ &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f572ae164fce..8aafd9b5d6a2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -6,8 +6,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
* Based on the original idea of John Wright <john.wright@hp.com>.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c2dfea7968f4..bd90e50bd8e6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -7,9 +7,9 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
- * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Modified by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 8ab0dd900960..5669ed2e87d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -8,8 +8,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_OPS_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9b6b3d7304b6..53fb4fa61b40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -2218,7 +2218,6 @@ int bnx2x_update_pfc(struct link_params *params,
*/
u32 val;
struct bnx2x *bp = params->bp;
- int bnx2x_status = 0;
u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
@@ -2232,7 +2231,7 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_pfc_nig(params, vars, pfc_params);
if (!vars->link_up)
- return bnx2x_status;
+ return 0;
DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
@@ -2246,7 +2245,7 @@ int bnx2x_update_pfc(struct link_params *params,
== 0) {
DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
bnx2x_emac_enable(params, vars, 0);
- return bnx2x_status;
+ return 0;
}
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
@@ -2260,7 +2259,7 @@ int bnx2x_update_pfc(struct link_params *params,
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
}
- return bnx2x_status;
+ return 0;
}
static int bnx2x_bmac1_enable(struct link_params *params,
@@ -3703,7 +3702,8 @@ static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 lane, i, cl72_ctrl, an_adv = 0;
+ u16 lane, i, cl72_ctrl, an_adv = 0, val;
+ u32 wc_lane_config;
struct bnx2x *bp = params->bp;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3822,15 +3822,27 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Enable Auto-Detect to support 1G over CL37 as well */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
-
+ wc_lane_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ shared_hw_config.wc_lane_config));
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
/* Force cl48 sync_status LOW to avoid getting stuck in CL73
* parallel-detect loop when CL73 and CL37 are enabled.
*/
- CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, 0);
+ val |= 1 << 11;
+
+ /* Restore Polarity settings in case it was run over by
+ * previous link owner
+ */
+ if (wc_lane_config &
+ (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
+ val |= 3 << 2;
+ else
+ val &= ~(3 << 2);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
- bnx2x_set_aer_mmd(params, phy);
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
+ val);
bnx2x_disable_kr2(params, vars, phy);
}
@@ -6473,7 +6485,6 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
static int bnx2x_link_initialize(struct link_params *params,
struct link_vars *vars)
{
- int rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
/* In case of external phy existence, the line speed would be the
@@ -6546,7 +6557,7 @@ static int bnx2x_link_initialize(struct link_params *params,
NIG_STATUS_XGXS0_LINK_STATUS |
NIG_STATUS_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- return rc;
+ return 0;
}
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
@@ -12461,6 +12472,7 @@ static int bnx2x_avoid_link_flap(struct link_params *params,
u32 dont_clear_stat, lfa_sts;
struct bnx2x *bp = params->bp;
+ bnx2x_set_mdio_emac_per_phy(bp, params);
/* Sync the link parameters */
bnx2x_link_status_update(params, vars);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3a8e51ed5bec..2887034523e0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -10053,6 +10053,24 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BCM_5710_UNDI_FW_MF_VERS (0x05)
#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
+
+static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
+{
+ /* UNDI marks its presence in DORQ -
+ * it initializes CID offset for normal bell to 0x7
+ */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_DORQ))
+ return false;
+
+ if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
+ BNX2X_DEV_INFO("UNDI previously loaded\n");
+ return true;
+ }
+
+ return false;
+}
+
static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
{
u8 major, minor, version;
@@ -10302,6 +10320,10 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
BNX2X_DEV_INFO("Path is unmarked\n");
+ /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
+ if (bnx2x_prev_is_after_undi(bp))
+ goto out;
+
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
@@ -10322,6 +10344,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
BNX2X_DEV_INFO("Could not FLR\n");
+out:
/* Close the MCP request, return failure*/
rc = bnx2x_prev_mcp_done(bp);
if (!rc)
@@ -10360,19 +10383,13 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* close LLH filters towards the BRB */
bnx2x_set_rx_filter(&bp->link_params, 0);
- /* Check if the UNDI driver was previously loaded
- * UNDI driver initializes CID offset for normal bell to 0x7
- */
- if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
- tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
- if (tmp_reg == 0x7) {
- BNX2X_DEV_INFO("UNDI previously loaded\n");
- prev_undi = true;
- /* clear the UNDI indication */
- REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
- /* clear possible idle check errors */
- REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
- }
+ /* Check if the UNDI driver was previously loaded */
+ if (bnx2x_prev_is_after_undi(bp)) {
+ prev_undi = true;
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
if (!CHIP_IS_E1x(bp))
/* block FW from writing to host */
@@ -13283,8 +13300,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
- cancel_delayed_work(&bp->sp_task);
- cancel_delayed_work(&bp->period_task);
+ cancel_delayed_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->period_task);
spin_lock_bh(&bp->stats_lock);
bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index d725317c4277..b1936044767a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 80f6c790ed88..718ecd294661 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index faf01488d26e..eda8583f6fc0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*
*/
#include "bnx2x.h"
@@ -1071,8 +1071,10 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
- /* set the VF doorbell threshold */
- REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
+ /* set the VF doorbell threshold. This threshold represents the amount
+ * of doorbells allowed in the main DORQ fifo for a specific VF.
+ */
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
}
void bnx2x_iov_init_dmae(struct bnx2x *bp)
@@ -2576,7 +2578,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->vf = vfidx;
ivi->qos = 0;
- ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->min_tx_rate = 0;
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 6929adba52f9..96c575e147a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef BNX2X_SRIOV_H
#define BNX2X_SRIOV_H
@@ -571,7 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
return NULL;
}
-static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; }
+static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 3b75070411aa..ca47665f94bf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index f35845006cdd..2beceaefdeea 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 784c7155b98a..d712d0ddd719 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*/
#include "bnx2x.h"
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index c922b81170e5..e21e706762c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -12,8 +12,8 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef VF_PF_IF_H
#define VF_PF_IF_H
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 4dd48d2fa804..8244e2b14bb4 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
+
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
}
mutex_unlock(&cnic_lock);
- if (ulp_type == CNIC_ULP_ISCSI)
- cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
- else if (ulp_type == CNIC_ULP_FCOE)
+ if (ulp_type == CNIC_ULP_FCOE)
dev->fcoe_cap = NULL;
synchronize_rcu();
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev;
- read_lock(&cnic_dev_lock);
list_for_each_entry(udev, &cnic_udev_list, list) {
if (udev->pdev == dev->pcidev) {
udev->dev = dev;
if (__cnic_alloc_uio_rings(udev, pages)) {
udev->dev = NULL;
- read_unlock(&cnic_dev_lock);
return -ENOMEM;
}
cp->udev = udev;
- read_unlock(&cnic_dev_lock);
return 0;
}
}
- read_unlock(&cnic_dev_lock);
udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
if (!udev)
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
if (__cnic_alloc_uio_rings(udev, pages))
goto err_udev;
- write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list);
- write_unlock(&cnic_dev_lock);
pci_dev_get(udev->pdev);
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
{
int if_type;
- rcu_read_lock();
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
void *ctx;
- ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
- if (!ulp_ops || !ulp_ops->indicate_netevent)
+ mutex_lock(&cnic_lock);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
+ if (!ulp_ops || !ulp_ops->indicate_netevent) {
+ mutex_unlock(&cnic_lock);
continue;
+ }
ctx = cp->ulp_handle[if_type];
+ set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ mutex_unlock(&cnic_lock);
+
ulp_ops->indicate_netevent(ctx, event, vlan_id);
+
+ clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
- rcu_read_unlock();
}
/* netdev event handler */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0966bd04375f..5ba1cfbd60da 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2481,7 +2481,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, dev);
ether_addr_copy(dev->dev_addr, macaddr);
dev->watchdog_timeo = 2 * HZ;
- SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+ dev->ethtool_ops = &bcmgenet_ethtool_ops;
dev->netdev_ops = &bcmgenet_netdev_ops;
netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4608673beaff..add8d8596084 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -298,6 +298,7 @@ int bcmgenet_mii_config(struct net_device *dev)
static int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
struct phy_device *phydev;
unsigned int phy_flags;
int ret;
@@ -307,15 +308,19 @@ static int bcmgenet_mii_probe(struct net_device *dev)
return 0;
}
- if (priv->phy_dn)
- phydev = of_phy_connect(dev, priv->phy_dn,
- bcmgenet_mii_setup, 0,
- priv->phy_interface);
- else
- phydev = of_phy_connect_fixed_link(dev,
- bcmgenet_mii_setup,
- priv->phy_interface);
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret)
+ return ret;
+
+ priv->phy_dn = dn;
+ }
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0,
+ priv->phy_interface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e5d95c5ce1ad..8afa579e7c40 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2013 Broadcom Corporation.
+ * Copyright (C) 2005-2014 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 136
+#define TG3_MIN_NUM 137
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Jan 03, 2014"
+#define DRV_MODULE_RELDATE "May 11, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
return 0;
}
-#define NVRAM_CMD_TIMEOUT 10000
+#define NVRAM_CMD_TIMEOUT 5000
static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
{
@@ -3232,7 +3232,7 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
tw32(NVRAM_CMD, nvram_cmd);
for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
- udelay(10);
+ usleep_range(10, 40);
if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
udelay(10);
break;
@@ -7854,8 +7854,8 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
netif_wake_queue(tp->dev);
}
- segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
- if (IS_ERR(segs))
+ segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
+ if (IS_ERR(segs) || !segs)
goto tg3_tso_bug_end;
do {
@@ -7871,9 +7871,7 @@ tg3_tso_bug_end:
return NETDEV_TX_OK;
}
-/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
- * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
- */
+/* hard_start_xmit for all devices */
static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7884,6 +7882,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tg3_napi *tnapi;
struct netdev_queue *txq;
unsigned int last;
+ struct iphdr *iph = NULL;
+ struct tcphdr *tcph = NULL;
+ __sum16 tcp_csum = 0, ip_csum = 0;
+ __be16 ip_tot_len = 0;
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -7915,7 +7917,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_shinfo(skb)->gso_size;
if (mss) {
- struct iphdr *iph;
u32 tcp_opt_len, hdr_len;
if (skb_cow_head(skb, 0))
@@ -7927,27 +7928,31 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
if (!skb_is_gso_v6(skb)) {
+ if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+ tg3_flag(tp, TSO_BUG))
+ return tg3_tso_bug(tp, skb);
+
+ ip_csum = iph->check;
+ ip_tot_len = iph->tot_len;
iph->check = 0;
iph->tot_len = htons(mss + hdr_len);
}
- if (unlikely((ETH_HLEN + hdr_len) > 80) &&
- tg3_flag(tp, TSO_BUG))
- return tg3_tso_bug(tp, skb);
-
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
+ tcph = tcp_hdr(skb);
+ tcp_csum = tcph->check;
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3)) {
- tcp_hdr(skb)->check = 0;
+ tcph->check = 0;
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
- } else
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ } else {
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
if (tg3_flag(tp, HW_TSO_3)) {
mss |= (hdr_len & 0xc) << 12;
@@ -8047,6 +8052,18 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (would_hit_hwbug) {
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+ if (mss) {
+ /* If it's a TSO packet, do GSO instead of
+ * allocating and copying to a large linear SKB
+ */
+ if (ip_tot_len) {
+ iph->check = ip_csum;
+ iph->tot_len = ip_tot_len;
+ }
+ tcph->check = tcp_csum;
+ return tg3_tso_bug(tp, skb);
+ }
+
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
@@ -11876,9 +11893,9 @@ static int tg3_get_eeprom_len(struct net_device *dev)
static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
- int ret;
+ int ret, cpmu_restore = 0;
u8 *pd;
- u32 i, offset, len, b_offset, b_count;
+ u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
__be32 val;
if (tg3_flag(tp, NO_NVRAM))
@@ -11890,6 +11907,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
eeprom->magic = TG3_EEPROM_MAGIC;
+ /* Override clock, link aware and link idle modes */
+ if (tg3_flag(tp, CPMU_PRESENT)) {
+ cpmu_val = tr32(TG3_CPMU_CTRL);
+ if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
+ CPMU_CTRL_LINK_IDLE_MODE)) {
+ tw32(TG3_CPMU_CTRL, cpmu_val &
+ ~(CPMU_CTRL_LINK_AWARE_MODE |
+ CPMU_CTRL_LINK_IDLE_MODE));
+ cpmu_restore = 1;
+ }
+ }
+ tg3_override_clk(tp);
+
if (offset & 3) {
/* adjustments to start on required 4 byte boundary */
b_offset = offset & 3;
@@ -11900,7 +11930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
}
ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
if (ret)
- return ret;
+ goto eeprom_done;
memcpy(data, ((char *)&val) + b_offset, b_count);
len -= b_count;
offset += b_count;
@@ -11912,10 +11942,20 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
for (i = 0; i < (len - (len & 3)); i += 4) {
ret = tg3_nvram_read_be32(tp, offset + i, &val);
if (ret) {
+ if (i)
+ i -= 4;
eeprom->len += i;
- return ret;
+ goto eeprom_done;
}
memcpy(pd + i, &val, 4);
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ eeprom->len += i;
+ ret = -EINTR;
+ goto eeprom_done;
+ }
+ cond_resched();
+ }
}
eeprom->len += i;
@@ -11926,11 +11966,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
b_offset = offset + len - b_count;
ret = tg3_nvram_read_be32(tp, b_offset, &val);
if (ret)
- return ret;
+ goto eeprom_done;
memcpy(pd, &val, b_count);
eeprom->len += b_count;
}
- return 0;
+ ret = 0;
+
+eeprom_done:
+ /* Restore clock, link aware and link idle modes */
+ tg3_restore_clk(tp);
+ if (cpmu_restore)
+ tw32(TG3_CPMU_CTRL, cpmu_val);
+
+ return ret;
}
static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
@@ -12484,7 +12532,7 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
-static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
{
struct tg3 *tp = netdev_priv(dev);
int i;
@@ -12495,7 +12543,7 @@ static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
return 0;
}
-static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
@@ -14027,8 +14075,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_sset_count = tg3_get_sset_count,
.get_rxnfc = tg3_get_rxnfc,
.get_rxfh_indir_size = tg3_get_rxfh_indir_size,
- .get_rxfh_indir = tg3_get_rxfh_indir,
- .set_rxfh_indir = tg3_set_rxfh_indir,
+ .get_rxfh = tg3_get_rxfh,
+ .set_rxfh = tg3_set_rxfh,
.get_channels = tg3_get_channels,
.set_channels = tg3_set_channels,
.get_ts_info = tg3_get_ts_info,
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 04321e5a356e..461accaf0aa4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2013 Broadcom Corporation.
+ * Copyright (C) 2007-2014 Broadcom Corporation.
*/
#ifndef _T3_H
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index f9e150825bb5..882cad71ad62 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -266,8 +266,8 @@ bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, SPEED_10000);
cmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
cmd->transceiver = XCVR_EXTERNAL;
cmd->maxtxpkt = 0;
@@ -1137,5 +1137,5 @@ static const struct ethtool_ops bnad_ethtool_ops = {
void
bnad_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+ netdev->ethtool_ops = &bnad_ethtool_ops;
}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 521dfea44b83..25d6b2a10e4e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1737,7 +1737,7 @@ static int xgmac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
ether_setup(ndev);
ndev->netdev_ops = &xgmac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+ ndev->ethtool_ops = &xgmac_ethtool_ops;
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 05613a85ce61..186566bfdbc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -580,8 +580,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, p->link_config.speed);
cmd->duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -1100,7 +1100,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
- SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+ netdev->ethtool_ops = &t1_ethtool_ops;
}
if (t1_init_sw_modules(adapter, bi) < 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 07bbb711b7e5..5d9cce053cc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1809,8 +1809,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, p->link_config.speed);
cmd->duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -3291,7 +3291,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= NETIF_F_HIGHDMA;
netdev->netdev_ops = &cxgb_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ netdev->ethtool_ops = &cxgb_ethtool_ops;
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index c0a9dd55f4e5..b0cbb2b7fd48 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
if (ether_addr_equal(dev->dev_addr, mac)) {
rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
- dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
+ dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
} else if (netif_is_bond_slave(dev)) {
struct net_device *upper_dev;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 32db37709263..f503dce4ab17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -357,11 +357,17 @@ enum {
MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
+ MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
+ MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
};
enum {
- MAX_EGRQ = 128, /* max # of egress queues, including FLs */
- MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
+ INGQ_EXTRAS = 2, /* firmware event queue and */
+ /* forwarded interrupts */
+ MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
+ + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
+ MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
+ + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
};
struct adapter;
@@ -538,6 +544,7 @@ struct sge {
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
+ struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
@@ -548,8 +555,10 @@ struct sge {
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active offload queue sets */
u16 rdmaqs; /* # of available RDMA Rx queues */
+ u16 rdmaciqs; /* # of available RDMA concentrator IQs */
u16 ofld_rxq[MAX_OFLD_QSETS];
u16 rdma_rxq[NCHAN];
+ u16 rdma_ciq[NCHAN];
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */
@@ -577,6 +586,7 @@ struct sge {
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
+#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
struct l2t_data;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 24e16e3301e0..a83271cf17c3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -818,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap)
for_each_rdmarxq(&adap->sge, i)
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
adap->port[0]->name, i);
+
+ for_each_rdmaciq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
+ adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
+ int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
+ int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
adap->msix_info[1].desc, &s->fw_evtq);
@@ -857,9 +862,21 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
+ for_each_rdmaciq(s, rdmaciqqidx) {
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
+ &s->rdmaciq[rdmaciqqidx].rspq);
+ if (err)
+ goto unwind;
+ msi_index++;
+ }
return 0;
unwind:
+ while (--rdmaciqqidx >= 0)
+ free_irq(adap->msix_info[--msi_index].vec,
+ &s->rdmaciq[rdmaciqqidx].rspq);
while (--rdmaqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
@@ -885,6 +902,8 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
for_each_rdmarxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
+ for_each_rdmaciq(s, i)
+ free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
}
/**
@@ -1047,7 +1066,8 @@ freeout: t4_free_sge_resources(adap);
if (msi_idx > 0)
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
- &q->fl, uldrx_handler);
+ q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
if (err)
goto freeout;
memset(&q->stats, 0, sizeof(q->stats));
@@ -1064,13 +1084,28 @@ freeout: t4_free_sge_resources(adap);
if (msi_idx > 0)
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
- msi_idx, &q->fl, uldrx_handler);
+ msi_idx, q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
if (err)
goto freeout;
memset(&q->stats, 0, sizeof(q->stats));
s->rdma_rxq[i] = q->rspq.abs_id;
}
+ for_each_rdmaciq(s, i) {
+ struct sge_ofld_rxq *q = &s->rdmaciq[i];
+
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
+ msi_idx, q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
+ if (err)
+ goto freeout;
+ memset(&q->stats, 0, sizeof(q->stats));
+ s->rdma_ciq[i] = q->rspq.abs_id;
+ }
+
for_each_port(adap, i) {
/*
* Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
@@ -2252,12 +2287,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
p->port_type == FW_PORT_TYPE_FIBER_XAUI)
cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_SFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
cmd->port = PORT_DA;
else
- cmd->port = PORT_FIBRE;
+ cmd->port = PORT_OTHER;
} else
cmd->port = PORT_OTHER;
@@ -2461,8 +2503,7 @@ static unsigned int qtimer_val(const struct adapter *adap,
}
/**
- * set_rxq_intr_params - set a queue's interrupt holdoff parameters
- * @adap: the adapter
+ * set_rspq_intr_params - set a queue's interrupt holdoff parameters
* @q: the Rx queue
* @us: the hold-off time in us, or 0 to disable timer
* @cnt: the hold-off packet count, or 0 to disable counter
@@ -2470,9 +2511,11 @@ static unsigned int qtimer_val(const struct adapter *adap,
* Sets an Rx queue's interrupt hold-off time and packet count. At least
* one of the two needs to be enabled for the queue to generate interrupts.
*/
-static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
- unsigned int us, unsigned int cnt)
+static int set_rspq_intr_params(struct sge_rspq *q,
+ unsigned int us, unsigned int cnt)
{
+ struct adapter *adap = q->adap;
+
if ((us | cnt) == 0)
cnt = 1;
@@ -2499,24 +2542,34 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
return 0;
}
-static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+/**
+ * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
+ * @dev: the network device
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Set the RX interrupt hold-off parameters for a network device.
+ */
+static int set_rx_intr_params(struct net_device *dev,
+ unsigned int us, unsigned int cnt)
{
- const struct port_info *pi = netdev_priv(dev);
+ int i, err;
+ struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- struct sge_rspq *q;
- int i;
- int r = 0;
-
- for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
- q = &adap->sge.ethrxq[i].rspq;
- r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
- c->rx_max_coalesced_frames);
- if (r) {
- dev_err(&dev->dev, "failed to set coalesce %d\n", r);
- break;
- }
+ struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+ for (i = 0; i < pi->nqsets; i++, q++) {
+ err = set_rspq_intr_params(&q->rspq, us, cnt);
+ if (err)
+ return err;
}
- return r;
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ return set_rx_intr_params(dev, c->rx_coalesce_usecs,
+ c->rx_max_coalesced_frames);
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -2732,7 +2785,7 @@ static u32 get_rss_table_size(struct net_device *dev)
return pi->rss_size;
}
-static int get_rss_table(struct net_device *dev, u32 *p)
+static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
@@ -2742,7 +2795,7 @@ static int get_rss_table(struct net_device *dev, u32 *p)
return 0;
}
-static int set_rss_table(struct net_device *dev, const u32 *p)
+static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
@@ -2844,8 +2897,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_wol = set_wol,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir_size = get_rss_table_size,
- .get_rxfh_indir = get_rss_table,
- .set_rxfh_indir = set_rss_table,
+ .get_rxfh = get_rss_table,
+ .set_rxfh = set_rss_table,
.flash_device = set_flash,
};
@@ -3386,6 +3439,77 @@ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
EXPORT_SYMBOL(cxgb4_best_mtu);
/**
+ * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
+ * @mtus: the HW MTU table
+ * @header_size: Header Size
+ * @data_size_max: maximum Data Segment Size
+ * @data_size_align: desired Data Segment Size Alignment (2^N)
+ * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
+ *
+ * Similar to cxgb4_best_mtu() but instead of searching the Hardware
+ * MTU Table based solely on a Maximum MTU parameter, we break that
+ * parameter up into a Header Size and Maximum Data Segment Size, and
+ * provide a desired Data Segment Size Alignment. If we find an MTU in
+ * the Hardware MTU Table which will result in a Data Segment Size with
+ * the requested alignment _and_ that MTU isn't "too far" from the
+ * closest MTU, then we'll return that rather than the closest MTU.
+ */
+unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
+ unsigned short header_size,
+ unsigned short data_size_max,
+ unsigned short data_size_align,
+ unsigned int *mtu_idxp)
+{
+ unsigned short max_mtu = header_size + data_size_max;
+ unsigned short data_size_align_mask = data_size_align - 1;
+ int mtu_idx, aligned_mtu_idx;
+
+ /* Scan the MTU Table till we find an MTU which is larger than our
+ * Maximum MTU or we reach the end of the table. Along the way,
+ * record the last MTU found, if any, which will result in a Data
+ * Segment Length matching the requested alignment.
+ */
+ for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
+ unsigned short data_size = mtus[mtu_idx] - header_size;
+
+ /* If this MTU minus the Header Size would result in a
+ * Data Segment Size of the desired alignment, remember it.
+ */
+ if ((data_size & data_size_align_mask) == 0)
+ aligned_mtu_idx = mtu_idx;
+
+ /* If we're not at the end of the Hardware MTU Table and the
+ * next element is larger than our Maximum MTU, drop out of
+ * the loop.
+ */
+ if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
+ break;
+ }
+
+ /* If we fell out of the loop because we ran to the end of the table,
+ * then we just have to use the last [largest] entry.
+ */
+ if (mtu_idx == NMTUS)
+ mtu_idx--;
+
+ /* If we found an MTU which resulted in the requested Data Segment
+ * Length alignment and that's "not far" from the largest MTU which is
+ * less than or equal to the maximum MTU, then use that.
+ */
+ if (aligned_mtu_idx >= 0 &&
+ mtu_idx - aligned_mtu_idx <= 1)
+ mtu_idx = aligned_mtu_idx;
+
+ /* If the caller has passed in an MTU Index pointer, pass the
+ * MTU Index back. Return the MTU value.
+ */
+ if (mtu_idxp)
+ *mtu_idxp = mtu_idx;
+ return mtus[mtu_idx];
+}
+EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
+
+/**
* cxgb4_port_chan - get the HW channel of a port
* @dev: the net device for the port
*
@@ -3782,7 +3906,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.mtus = adap->params.mtus;
if (uld == CXGB4_ULD_RDMA) {
lli.rxq_ids = adap->sge.rdma_rxq;
+ lli.ciq_ids = adap->sge.rdma_ciq;
lli.nrxq = adap->sge.rdmaqs;
+ lli.nciq = adap->sge.rdmaciqs;
} else if (uld == CXGB4_ULD_ISCSI) {
lli.rxq_ids = adap->sge.ofld_rxq;
lli.nrxq = adap->sge.ofldqsets;
@@ -3931,22 +4057,19 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
EXPORT_SYMBOL(cxgb4_unregister_uld);
/* Check if netdev on which event is occured belongs to us or not. Return
- * suceess (1) if it belongs otherwise failure (0).
+ * success (true) if it belongs otherwise failure (false).
+ * Called with rcu_read_lock() held.
*/
-static int cxgb4_netdev(struct net_device *netdev)
+static bool cxgb4_netdev(const struct net_device *netdev)
{
struct adapter *adap;
int i;
- spin_lock(&adap_rcu_lock);
list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
for (i = 0; i < MAX_NPORTS; i++)
- if (adap->port[i] == netdev) {
- spin_unlock(&adap_rcu_lock);
- return 1;
- }
- spin_unlock(&adap_rcu_lock);
- return 0;
+ if (adap->port[i] == netdev)
+ return true;
+ return false;
}
static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
@@ -4061,7 +4184,7 @@ static int update_root_dev_clip(struct net_device *dev)
/* Parse all bond and vlan devices layered on top of the physical dev */
for (i = 0; i < VLAN_N_VID; i++) {
- root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
+ root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
if (!root_dev)
continue;
@@ -5528,13 +5651,41 @@ static int adap_init0(struct adapter *adap)
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
- /*
- * These are finalized by FW initialization, load their values now.
+ /* The MTU/MSS Table is initialized by now, so load their values. If
+ * we're initializing the adapter, then we'll make any modifications
+ * we want to the MTU/MSS Table and also initialize the congestion
+ * parameters.
*/
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
- t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
- adap->params.b_wnd);
+ if (state != DEV_STATE_INIT) {
+ int i;
+ /* The default MTU Table contains values 1492 and 1500.
+ * However, for TCP, it's better to have two values which are
+ * a multiple of 8 +/- 4 bytes apart near this popular MTU.
+ * This allows us to have a TCP Data Payload which is a
+ * multiple of 8 regardless of what combination of TCP Options
+ * are in use (always a multiple of 4 bytes) which is
+ * important for performance reasons. For instance, if no
+ * options are in use, then we have a 20-byte IP header and a
+ * 20-byte TCP header. In this case, a 1500-byte MSS would
+ * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
+ * which is not a multiple of 8. So using an MSS of 1488 in
+ * this case results in a TCP Data Payload of 1448 bytes which
+ * is a multiple of 8. On the other hand, if 12-byte TCP Time
+ * Stamps have been negotiated, then an MTU of 1500 bytes
+ * results in a TCP Data Payload of 1448 bytes which, as
+ * above, is a multiple of 8 bytes ...
+ */
+ for (i = 0; i < NMTUS; i++)
+ if (adap->params.mtus[i] == 1492) {
+ adap->params.mtus[i] = 1488;
+ break;
+ }
+
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ }
t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;
@@ -5669,12 +5820,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
}
-static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
+static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt,
unsigned int size, unsigned int iqe_size)
{
- q->intr_params = QINTR_TIMER_IDX(timer_idx) |
- (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
- q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
+ q->adap = adap;
+ set_rspq_intr_params(q, us, cnt);
q->iqe_len = iqe_size;
q->size = size;
}
@@ -5688,6 +5839,7 @@ static void cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
int i, q10g = 0, n10g = 0, qidx = 0;
+ int ciq_size;
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
@@ -5726,12 +5878,13 @@ static void cfg_queues(struct adapter *adap)
s->ofldqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
+ s->rdmaciqs = adap->params.nports;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
- init_rspq(&r->rspq, 0, 0, 1024, 64);
+ init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
r->fl.size = 72;
}
@@ -5747,7 +5900,7 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
struct sge_ofld_rxq *r = &s->ofldrxq[i];
- init_rspq(&r->rspq, 0, 0, 1024, 64);
+ init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
r->rspq.uld = CXGB4_ULD_ISCSI;
r->fl.size = 72;
}
@@ -5755,13 +5908,26 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
struct sge_ofld_rxq *r = &s->rdmarxq[i];
- init_rspq(&r->rspq, 0, 0, 511, 64);
+ init_rspq(adap, &r->rspq, 5, 1, 511, 64);
r->rspq.uld = CXGB4_ULD_RDMA;
r->fl.size = 72;
}
- init_rspq(&s->fw_evtq, 6, 0, 512, 64);
- init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
+ ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
+ if (ciq_size > SGE_MAX_IQ_SIZE) {
+ CH_WARN(adap, "CIQ size too small for available IQs\n");
+ ciq_size = SGE_MAX_IQ_SIZE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
+ struct sge_ofld_rxq *r = &s->rdmaciq[i];
+
+ init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
+ r->rspq.uld = CXGB4_ULD_RDMA;
+ }
+
+ init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
+ init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
}
/*
@@ -5808,9 +5974,9 @@ static int enable_msix(struct adapter *adap)
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->ofldqsets;
+ want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
/* need nchan for each possible ULD */
- ofld_need = 2 * nchan;
+ ofld_need = 3 * nchan;
}
need = adap->params.nports + EXTRA_VECS + ofld_need;
@@ -6076,7 +6242,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->netdev_ops = &cxgb4_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ netdev->ethtool_ops = &cxgb_ethtool_ops;
}
pci_set_drvdata(pdev, adapter);
@@ -6227,6 +6393,7 @@ static void remove_one(struct pci_dev *pdev)
adapter->flags &= ~DEV_ENABLED;
}
pci_release_regions(pdev);
+ synchronize_rcu();
kfree(adapter);
} else
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e274a047528f..55e9daf7f9d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -232,8 +232,10 @@ struct cxgb4_lld_info {
const struct cxgb4_virt_res *vr; /* assorted HW resources */
const unsigned short *mtus; /* MTU table */
const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
+ const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */
unsigned short nrxq; /* # of Rx queues */
unsigned short ntxq; /* # of Tx queues */
+ unsigned short nciq; /* # of concentrator IQ */
unsigned char nchan:4; /* # of channels */
unsigned char nports:4; /* # of ports */
unsigned char wr_cred; /* WR 16-byte credits */
@@ -274,6 +276,11 @@ unsigned int cxgb4_port_viid(const struct net_device *dev);
unsigned int cxgb4_port_idx(const struct net_device *dev);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
+unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
+ unsigned short header_size,
+ unsigned short data_size_max,
+ unsigned short data_size_align,
+ unsigned int *mtu_idxp);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e249528c8e60..dd4355d248e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
- csum_ok = pkt->csum_calc && !pkt->err_vec;
+ csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
- if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
- (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++;
@@ -2215,7 +2215,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->cntxt_id = ntohs(c.iqid);
iq->abs_id = ntohs(c.physiqid);
iq->size--; /* subtract status entry */
- iq->adap = adap;
iq->netdev = dev;
iq->handler = hnd;
@@ -2515,6 +2514,10 @@ void t4_free_sge_resources(struct adapter *adap)
if (oq->rspq.desc)
free_rspq_fl(adap, &oq->rspq, &oq->fl);
}
+ for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
+ if (oq->rspq.desc)
+ free_rspq_fl(adap, &oq->rspq, &oq->fl);
+ }
/* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index bba67681aeaa..931478e7bd28 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3962,6 +3962,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
p->lport = j;
p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+ adap->port[i]->dev_port = j;
ret = ntohl(c.u.info.lstatus_to_modtype);
p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 1d1623be9f1e..71b799b5b0f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -68,6 +68,7 @@ enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+ SGE_MAX_IQ_SIZE = 65520,
SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index f2738c710789..973eb11aa98a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -227,6 +227,7 @@ struct cpl_pass_open_req {
#define DELACK(x) ((x) << 5)
#define ULP_MODE(x) ((x) << 8)
#define RCV_BUFSIZ(x) ((x) << 12)
+#define RCV_BUFSIZ_MASK 0x3FFU
#define DSCP(x) ((x) << 22)
#define SMAC_SEL(x) ((u64)(x) << 28)
#define L2T_IDX(x) ((u64)(x) << 36)
@@ -278,6 +279,15 @@ struct cpl_pass_accept_rpl {
__be64 opt0;
};
+struct cpl_t5_pass_accept_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 opt2;
+ __be64 opt0;
+ __be32 iss;
+ __be32 rsvd;
+};
+
struct cpl_act_open_req {
WR_HDR;
union opcode_tid ot;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 52859288de7b..ff1cdd1788b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2664,7 +2664,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->netdev_ops = &cxgb4vf_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
+ netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
/*
* Initialize the hardware/software state for the port.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9d88c1d50b49..bdfa80ca5e31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
const struct cpl_rx_pkt *pkt = (void *)rsp;
- bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+ bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (rspq->netdev->features & NETIF_F_RXCSUM);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
skb_record_rx_queue(skb, rspq->idx);
rxq->stats.pkts++;
- if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
- !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (csum_ok && !pkt->err_vec &&
+ (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else {
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index e35c8e0202ad..14f465f239d6 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_AIC_LARGE_PKT_DIFF 3
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ];
@@ -50,6 +52,33 @@ struct enic_msix_entry {
void *devid;
};
+/* Store only the lower range. Higher range is given by fw. */
+struct enic_intr_mod_range {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+};
+
+struct enic_intr_mod_table {
+ u32 rx_rate;
+ u32 range_percent;
+};
+
+#define ENIC_MAX_LINK_SPEEDS 3
+#define ENIC_LINK_SPEED_10G 10000
+#define ENIC_LINK_SPEED_4G 4000
+#define ENIC_LINK_40G_INDEX 2
+#define ENIC_LINK_10G_INDEX 1
+#define ENIC_LINK_4G_INDEX 0
+#define ENIC_RX_COALESCE_RANGE_END 125
+#define ENIC_AIC_TS_BREAK 100
+
+struct enic_rx_coal {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+ u32 range_end;
+ u32 use_adaptive_rx_coalesce;
+};
+
/* priv_flags */
#define ENIC_SRIOV_ENABLED (1 << 0)
@@ -85,13 +114,12 @@ struct enic {
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
- u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int flags;
unsigned int priv_flags;
unsigned int mc_count;
unsigned int uc_count;
u32 port_mtu;
+ struct enic_rx_coal rx_coalesce_setting;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 4b6e5695b263..3e27df522847 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -88,7 +88,7 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
return err;
}
-int enic_dev_add_addr(struct enic *enic, u8 *addr)
+int enic_dev_add_addr(struct enic *enic, const u8 *addr)
{
int err;
@@ -99,7 +99,7 @@ int enic_dev_add_addr(struct enic *enic, u8 *addr)
return err;
}
-int enic_dev_del_addr(struct enic *enic, u8 *addr)
+int enic_dev_del_addr(struct enic *enic, const u8 *addr)
{
int err;
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 129b14a4efb0..36ea1ab25f6a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -45,8 +45,8 @@ int enic_dev_add_station_addr(struct enic *enic);
int enic_dev_del_station_addr(struct enic *enic);
int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-int enic_dev_add_addr(struct enic *enic, u8 *addr);
-int enic_dev_del_addr(struct enic *enic, u8 *addr);
+int enic_dev_add_addr(struct enic *enic, const u8 *addr);
+int enic_dev_del_addr(struct enic *enic, const u8 *addr);
int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int enic_dev_notify_unset(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 47e3562f4866..2e50b5489d20 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -79,6 +79,17 @@ static const struct enic_stat enic_rx_stats[] = {
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
+{
+ int i;
+ int intr;
+
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ }
+}
+
static int enic_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -93,8 +104,8 @@ static int enic_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -178,9 +189,14 @@ static int enic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ecmd)
{
struct enic *enic = netdev_priv(netdev);
+ struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+ if (rxcoal->use_adaptive_rx_coalesce)
+ ecmd->use_adaptive_rx_coalesce = 1;
+ ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
+ ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
return 0;
}
@@ -191,17 +207,31 @@ static int enic_set_coalesce(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
+ u32 rx_coalesce_usecs_low;
+ u32 rx_coalesce_usecs_high;
+ u32 coalesce_usecs_max;
unsigned int i, intr;
+ struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
+ coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ coalesce_usecs_max);
rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ coalesce_usecs_max);
+
+ rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
+ coalesce_usecs_max);
+ rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
+ coalesce_usecs_max);
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
if (tx_coalesce_usecs != rx_coalesce_usecs)
return -EINVAL;
+ if (ecmd->use_adaptive_rx_coalesce ||
+ ecmd->rx_coalesce_usecs_low ||
+ ecmd->rx_coalesce_usecs_high)
+ return -EOPNOTSUPP;
intr = enic_legacy_io_intr();
vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -210,6 +240,10 @@ static int enic_set_coalesce(struct net_device *netdev,
case VNIC_DEV_INTR_MODE_MSI:
if (tx_coalesce_usecs != rx_coalesce_usecs)
return -EINVAL;
+ if (ecmd->use_adaptive_rx_coalesce ||
+ ecmd->rx_coalesce_usecs_low ||
+ ecmd->rx_coalesce_usecs_high)
+ return -EOPNOTSUPP;
vnic_intr_coalescing_timer_set(&enic->intr[0],
tx_coalesce_usecs);
@@ -221,12 +255,27 @@ static int enic_set_coalesce(struct net_device *netdev,
tx_coalesce_usecs);
}
- for (i = 0; i < enic->rq_count; i++) {
- intr = enic_msix_rq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- rx_coalesce_usecs);
+ if (rxcoal->use_adaptive_rx_coalesce) {
+ if (!ecmd->use_adaptive_rx_coalesce) {
+ rxcoal->use_adaptive_rx_coalesce = 0;
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+ }
+ } else {
+ if (ecmd->use_adaptive_rx_coalesce)
+ rxcoal->use_adaptive_rx_coalesce = 1;
+ else
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
}
+ if (ecmd->rx_coalesce_usecs_high) {
+ if (rx_coalesce_usecs_high <
+ (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+ return -EINVAL;
+ rxcoal->range_end = rx_coalesce_usecs_high;
+ rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+ rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+ ENIC_AIC_LARGE_PKT_DIFF;
+ }
break;
default:
break;
@@ -253,5 +302,5 @@ static const struct ethtool_ops enic_ethtool_ops = {
void enic_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
+ netdev->ethtool_ops = &enic_ethtool_ops;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 2945718ce806..f32f828b7f3d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -38,6 +38,7 @@
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
+#include <linux/ktime.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -72,6 +73,35 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+#define ENIC_MAX_COALESCE_TIMERS 10
+/* Interrupt moderation table, which will be used to decide the
+ * coalescing timer values
+ * {rx_rate in Mbps, mapping percentage of the range}
+ */
+struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
+ {4000, 0},
+ {4400, 10},
+ {5060, 20},
+ {5230, 30},
+ {5540, 40},
+ {5820, 50},
+ {6120, 60},
+ {6435, 70},
+ {6745, 80},
+ {7000, 90},
+ {0xFFFFFFFF, 100}
+};
+
+/* This table helps the driver to pick different ranges for rx coalescing
+ * timer depending on the link speed.
+ */
+struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
+ {0, 0}, /* 0 - 4 Gbps */
+ {0, 3}, /* 4 - 10 Gbps */
+ {3, 6}, /* 10 - 40 Gbps */
+};
+
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -586,8 +616,71 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
return net_stats;
}
+static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
+ unsigned int mc_count = netdev_mc_count(netdev);
+
+ netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
+ ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
+
+ return -ENOSPC;
+ }
+
+ enic_dev_add_addr(enic, mc_addr);
+ enic->mc_count++;
+
+ return 0;
+}
+
+static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ enic_dev_del_addr(enic, mc_addr);
+ enic->mc_count--;
+
+ return 0;
+}
+
+static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
+ unsigned int uc_count = netdev_uc_count(netdev);
+
+ netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
+ ENIC_UNICAST_PERFECT_FILTERS, uc_count);
+
+ return -ENOSPC;
+ }
+
+ enic_dev_add_addr(enic, uc_addr);
+ enic->uc_count++;
+
+ return 0;
+}
+
+static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ enic_dev_del_addr(enic, uc_addr);
+ enic->uc_count--;
+
+ return 0;
+}
+
void enic_reset_addr_lists(struct enic *enic)
{
+ struct net_device *netdev = enic->netdev;
+
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+
enic->mc_count = 0;
enic->uc_count = 0;
enic->flags = 0;
@@ -654,112 +747,6 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
return enic_dev_add_station_addr(enic);
}
-static void enic_update_multicast_addr_list(struct enic *enic)
-{
- struct net_device *netdev = enic->netdev;
- struct netdev_hw_addr *ha;
- unsigned int mc_count = netdev_mc_count(netdev);
- u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int i, j;
-
- if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
- netdev_warn(netdev, "Registering only %d out of %d "
- "multicast addresses\n",
- ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
- mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
- }
-
- /* Is there an easier way? Trying to minimize to
- * calls to add/del multicast addrs. We keep the
- * addrs from the last call in enic->mc_addr and
- * look for changes to add/del.
- */
-
- i = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- if (i == mc_count)
- break;
- memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
- }
-
- for (i = 0; i < enic->mc_count; i++) {
- for (j = 0; j < mc_count; j++)
- if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
- break;
- if (j == mc_count)
- enic_dev_del_addr(enic, enic->mc_addr[i]);
- }
-
- for (i = 0; i < mc_count; i++) {
- for (j = 0; j < enic->mc_count; j++)
- if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
- break;
- if (j == enic->mc_count)
- enic_dev_add_addr(enic, mc_addr[i]);
- }
-
- /* Save the list to compare against next time
- */
-
- for (i = 0; i < mc_count; i++)
- memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
-
- enic->mc_count = mc_count;
-}
-
-static void enic_update_unicast_addr_list(struct enic *enic)
-{
- struct net_device *netdev = enic->netdev;
- struct netdev_hw_addr *ha;
- unsigned int uc_count = netdev_uc_count(netdev);
- u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int i, j;
-
- if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
- netdev_warn(netdev, "Registering only %d out of %d "
- "unicast addresses\n",
- ENIC_UNICAST_PERFECT_FILTERS, uc_count);
- uc_count = ENIC_UNICAST_PERFECT_FILTERS;
- }
-
- /* Is there an easier way? Trying to minimize to
- * calls to add/del unicast addrs. We keep the
- * addrs from the last call in enic->uc_addr and
- * look for changes to add/del.
- */
-
- i = 0;
- netdev_for_each_uc_addr(ha, netdev) {
- if (i == uc_count)
- break;
- memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
- }
-
- for (i = 0; i < enic->uc_count; i++) {
- for (j = 0; j < uc_count; j++)
- if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
- break;
- if (j == uc_count)
- enic_dev_del_addr(enic, enic->uc_addr[i]);
- }
-
- for (i = 0; i < uc_count; i++) {
- for (j = 0; j < enic->uc_count; j++)
- if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
- break;
- if (j == enic->uc_count)
- enic_dev_add_addr(enic, uc_addr[i]);
- }
-
- /* Save the list to compare against next time
- */
-
- for (i = 0; i < uc_count; i++)
- memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
-
- enic->uc_count = uc_count;
-}
-
/* netif_tx_lock held, BHs disabled */
static void enic_set_rx_mode(struct net_device *netdev)
{
@@ -782,9 +769,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
if (!promisc) {
- enic_update_unicast_addr_list(enic);
+ __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
if (!allmulti)
- enic_update_multicast_addr_list(enic);
+ __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
}
}
@@ -979,6 +966,15 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@@ -986,6 +982,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct enic *enic = vnic_dev_priv(rq->vdev);
struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1056,6 +1053,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
napi_gro_receive(&enic->napi[q_number], skb);
else
netif_receive_skb(skb);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
} else {
/* Buffer overflow
@@ -1134,6 +1134,64 @@ static int enic_poll(struct napi_struct *napi, int budget)
return rq_work_done;
}
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ u32 timer = cq->tobe_rx_coal_timeval;
+
+ if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+ }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+ int index;
+ u32 timer;
+ u32 range_start;
+ u32 traffic;
+ u64 delta;
+ ktime_t now = ktime_get();
+
+ delta = ktime_us_delta(now, cq->prev_ts);
+ if (delta < ENIC_AIC_TS_BREAK)
+ return;
+ cq->prev_ts = now;
+
+ traffic = pkt_size_counter->large_pkt_bytes_cnt +
+ pkt_size_counter->small_pkt_bytes_cnt;
+ /* The table takes Mbps
+ * traffic *= 8 => bits
+ * traffic *= (10^6 / delta) => bps
+ * traffic /= 10^6 => Mbps
+ *
+ * Combining, traffic *= (8 / delta)
+ */
+
+ traffic <<= 3;
+ traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
+
+ for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+ if (traffic < mod_table[index].rx_rate)
+ break;
+ range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+ pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+ rx_coal->small_pkt_range_start :
+ rx_coal->large_pkt_range_start;
+ timer = range_start + ((rx_coal->range_end - range_start) *
+ mod_table[index].range_percent / 100);
+ /* Damping */
+ cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+ pkt_size_counter->large_pkt_bytes_cnt = 0;
+ pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
static int enic_poll_msix(struct napi_struct *napi, int budget)
{
struct net_device *netdev = napi->dev;
@@ -1171,6 +1229,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
if (err)
work_done = work_to_do;
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ /* Call the function which refreshes
+ * the intr coalescing timer value based on
+ * the traffic. This is supported only in
+ * the case of MSI-x mode
+ */
+ enic_calc_int_moderation(enic, &enic->rq[rq]);
if (work_done < work_to_do) {
@@ -1179,6 +1244,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
*/
napi_complete(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
}
@@ -1314,6 +1381,42 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* If intr mode is not MSIX, do not do adaptive coalescing */
+ if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
+ netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
+ return;
+ }
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (ENIC_LINK_SPEED_10G < speed)
+ index = ENIC_LINK_40G_INDEX;
+ else if (ENIC_LINK_SPEED_4G < speed)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -2231,6 +2334,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
enic->notify_timer.function = enic_notify_timer;
enic->notify_timer.data = (unsigned long)enic;
+ enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
@@ -2250,6 +2354,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+ /* rx coalesce time already got initialized. This gets used
+ * if adaptive coal is turned off
+ */
enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 579315cbe803..4e6aa65857f7 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -50,6 +50,11 @@ struct vnic_cq_ctrl {
u32 pad10;
};
+struct vnic_rx_bytes_counter {
+ unsigned int small_pkt_bytes_cnt;
+ unsigned int large_pkt_bytes_cnt;
+};
+
struct vnic_cq {
unsigned int index;
struct vnic_dev *vdev;
@@ -58,6 +63,10 @@ struct vnic_cq {
unsigned int to_clean;
unsigned int last_color;
unsigned int interrupt_offset;
+ struct vnic_rx_bytes_counter pkt_size_counter;
+ unsigned int cur_rx_coal_timeval;
+ unsigned int tobe_rx_coal_timeval;
+ ktime_t prev_ts;
};
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 69dd92598b7e..e86a45cb9e68 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -657,7 +657,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
return err;
}
-int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -674,7 +674,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
return err;
}
-int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index e670029862a1..1f3b301f8225 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -95,8 +95,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
-int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr);
int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
int vnic_dev_notify_unset(struct vnic_dev *vdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8c4b93be333b..13723c96d1a2 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -109,6 +109,7 @@ typedef struct board_info {
u8 imr_all;
unsigned int flags;
+ unsigned int in_timeout:1;
unsigned int in_suspend:1;
unsigned int wake_supported:1;
@@ -187,13 +188,13 @@ dm9000_reset(board_info_t *db)
* The essential point is that we have to do a double reset, and the
* instruction is to set LBK into MAC internal loopback mode.
*/
- iow(db, DM9000_NCR, 0x03);
+ iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
udelay(100); /* Application note says at least 20 us */
if (ior(db, DM9000_NCR) & 1)
dev_err(db->dev, "dm9000 did not respond to first reset\n");
iow(db, DM9000_NCR, 0);
- iow(db, DM9000_NCR, 0x03);
+ iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
udelay(100);
if (ior(db, DM9000_NCR) & 1)
dev_err(db->dev, "dm9000 did not respond to second reset\n");
@@ -273,7 +274,7 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
*/
static void dm9000_msleep(board_info_t *db, unsigned int ms)
{
- if (db->in_suspend)
+ if (db->in_suspend || db->in_timeout)
mdelay(ms);
else
msleep(ms);
@@ -334,7 +335,8 @@ dm9000_phy_write(struct net_device *dev,
unsigned long reg_save;
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
- mutex_lock(&db->addr_lock);
+ if (!db->in_timeout)
+ mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
@@ -365,7 +367,8 @@ dm9000_phy_write(struct net_device *dev,
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
- mutex_unlock(&db->addr_lock);
+ if (!db->in_timeout)
+ mutex_unlock(&db->addr_lock);
}
/* dm9000_set_io
@@ -882,6 +885,18 @@ dm9000_hash_table(struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
}
+static void
+dm9000_mask_interrupts(board_info_t *db)
+{
+ iow(db, DM9000_IMR, IMR_PAR);
+}
+
+static void
+dm9000_unmask_interrupts(board_info_t *db)
+{
+ iow(db, DM9000_IMR, db->imr_all);
+}
+
/*
* Initialize dm9000 board
*/
@@ -894,6 +909,9 @@ dm9000_init_dm9000(struct net_device *dev)
dm9000_dbg(db, 1, "entering %s\n", __func__);
+ dm9000_reset(db);
+ dm9000_mask_interrupts(db);
+
/* I/O mode */
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
@@ -941,9 +959,6 @@ dm9000_init_dm9000(struct net_device *dev)
db->imr_all = imr;
- /* Enable TX/RX interrupt mask */
- iow(db, DM9000_IMR, imr);
-
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
@@ -959,17 +974,19 @@ static void dm9000_timeout(struct net_device *dev)
/* Save previous register address */
spin_lock_irqsave(&db->lock, flags);
+ db->in_timeout = 1;
reg_save = readb(db->io_addr);
netif_stop_queue(dev);
- dm9000_reset(db);
dm9000_init_dm9000(dev);
+ dm9000_unmask_interrupts(db);
/* We can accept TX packets again */
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
+ db->in_timeout = 0;
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1093,7 +1110,6 @@ dm9000_rx(struct net_device *dev)
if (rxbyte & DM9000_PKT_ERR) {
dev_warn(db->dev, "status check fail: %d\n", rxbyte);
iow(db, DM9000_RCR, 0x00); /* Stop Device */
- iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
return;
}
@@ -1193,9 +1209,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
/* Save previous register address */
reg_save = readb(db->io_addr);
- /* Disable all interrupts */
- iow(db, DM9000_IMR, IMR_PAR);
-
+ dm9000_mask_interrupts(db);
/* Got DM9000 interrupt status */
int_status = ior(db, DM9000_ISR); /* Got ISR */
iow(db, DM9000_ISR, int_status); /* Clear ISR status */
@@ -1218,9 +1232,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
}
}
- /* Re-enable interrupt mask */
- iow(db, DM9000_IMR, db->imr_all);
-
+ dm9000_unmask_interrupts(db);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
@@ -1292,6 +1304,9 @@ dm9000_open(struct net_device *dev)
* may work, and tell the user that this is a problem */
if (irqflags == IRQF_TRIGGER_NONE)
+ irqflags = irq_get_trigger_type(dev->irq);
+
+ if (irqflags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
irqflags |= IRQF_SHARED;
@@ -1301,11 +1316,14 @@ dm9000_open(struct net_device *dev)
mdelay(1); /* delay needs by DM9000B */
/* Initialize DM9000 board */
- dm9000_reset(db);
dm9000_init_dm9000(dev);
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
+ /* Now that we have an interrupt handler hooked up we can unmask
+ * our interrupts
+ */
+ dm9000_unmask_interrupts(db);
/* Init driver variable */
db->dbug_cnt = 0;
@@ -1313,7 +1331,8 @@ dm9000_open(struct net_device *dev)
mii_check_media(&db->mii, netif_msg_link(db), 1);
netif_start_queue(dev);
- dm9000_schedule_poll(db);
+ /* Poll initial link status */
+ schedule_delayed_work(&db->phy_poll, 1);
return 0;
}
@@ -1326,7 +1345,7 @@ dm9000_shutdown(struct net_device *dev)
/* RESET device */
dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
- iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
+ dm9000_mask_interrupts(db);
iow(db, DM9000_RCR, 0x00); /* Disable RX */
}
@@ -1547,12 +1566,7 @@ dm9000_probe(struct platform_device *pdev)
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
- /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
- * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
- * while probe stage.
- */
-
- iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
+ dm9000_reset(db);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {
@@ -1695,8 +1709,8 @@ dm9000_drv_resume(struct device *dev)
/* reset if we were not in wake mode to ensure if
* the device was powered off it is in a known state */
if (!db->wake_state) {
- dm9000_reset(db);
dm9000_init_dm9000(ndev);
+ dm9000_unmask_interrupts(db);
}
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 768379b8aee9..523d9dde50a2 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -158,7 +158,7 @@ void comet_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct tulip_private *tp = netdev_priv(dev);
- int next_tick = 60*HZ;
+ int next_tick = 2*HZ;
if (tulip_debug > 1)
netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 1642de78aac8..861660841ce2 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1703,7 +1703,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_TULIP_NAPI
netif_napi_add(dev, &tp->napi, tulip_poll, 16);
#endif
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
if (register_netdev(dev))
goto err_out_free_ring;
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa801a6af7b9..80afec335a11 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -962,8 +962,8 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
}
if(db->link_failed)
{
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
if (db->media_mode & ULI526X_AUTO)
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 4fb756d219f7..1274b6fdac8a 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -227,7 +227,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
}
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
#if 0
dev->features = NETIF_F_IP_CSUM;
#endif
@@ -1185,8 +1185,8 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, np->speed);
cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
if ( np->an_enable)
cmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d9e5ca0d48c1..433c1e185442 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -577,7 +577,7 @@ static int sundance_probe1(struct pci_dev *pdev,
/* The chip-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 4884205e56ee..056b44b93477 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -134,17 +134,17 @@ struct ec_bhf_priv {
struct pci_dev *dev;
- void * __iomem io;
- void * __iomem dma_io;
+ void __iomem *io;
+ void __iomem *dma_io;
struct hrtimer hrtimer;
int tx_dma_chan;
int rx_dma_chan;
- void * __iomem ec_io;
- void * __iomem fifo_io;
- void * __iomem mii_io;
- void * __iomem mac_io;
+ void __iomem *ec_io;
+ void __iomem *fifo_io;
+ void __iomem *mii_io;
+ void __iomem *mac_io;
struct bhf_dma rx_buf;
struct rx_desc *rx_descs;
@@ -297,7 +297,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
{
struct device *dev = PRIV_TO_DEV(priv);
unsigned block_count, i;
- void * __iomem ec_info;
+ void __iomem *ec_info;
dev_dbg(dev, "Info block:\n");
dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
@@ -569,8 +569,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct net_device *net_dev;
struct ec_bhf_priv *priv;
- void * __iomem dma_io;
- void * __iomem io;
+ void __iomem *dma_io;
+ void __iomem *io;
int err = 0;
err = pci_enable_device(dev);
@@ -615,7 +615,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
- if (net_dev == 0) {
+ if (net_dev == NULL) {
err = -ENOMEM;
goto err_unmap_dma_io;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 97db5a7179df..c2f5d2d3b932 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -120,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
+#define RSS_INDIR_TABLE_LEN 128
+#define RSS_HASH_KEY_LEN 40
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -371,6 +374,7 @@ enum vf_state {
#define BE_FLAGS_LINK_STATUS_INIT 1
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
#define BE_FLAGS_VLAN_PROMISC (1 << 4)
+#define BE_FLAGS_MCAST_PROMISC (1 << 5)
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
@@ -409,6 +413,13 @@ struct be_resources {
u32 if_cap_flags;
};
+struct rss_info {
+ u64 rss_flags;
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+ u8 rss_queue[RSS_INDIR_TABLE_LEN];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -445,7 +456,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
u16 vlans_added;
- u8 vlan_tag[VLAN_N_VID];
+ unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio; /* Recommended Priority */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -507,7 +518,7 @@ struct be_adapter {
u32 msg_enable;
int be_get_temp_freq;
u8 pf_number;
- u64 rss_flags;
+ struct rss_info rss_info;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -546,9 +557,7 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
/* Is BE in QNQ multi-channel mode */
-#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \
- adapter->mc_type == vNIC1 || \
- adapter->mc_type == UFP)
+#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d1ec15af0d24..f4ea3490f446 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
}
};
-static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
- u8 subsystem)
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
{
int i;
int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -120,21 +119,28 @@ static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
return (void *)addr;
}
-static int be_mcc_compl_process(struct be_adapter *adapter,
- struct be_mcc_compl *compl)
+static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
{
- u16 compl_status, extd_status;
- struct be_cmd_resp_hdr *resp_hdr;
- u8 opcode = 0, subsystem = 0;
-
- /* Just swap the status to host endian; mcc tag is opaquely copied
- * from mcc_wrb */
- be_dws_le_to_cpu(compl, 4);
-
- compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
+ if (base_status == MCC_STATUS_NOT_SUPPORTED ||
+ base_status == MCC_STATUS_ILLEGAL_REQUEST ||
+ addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+ (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
+ (base_status == MCC_STATUS_ILLEGAL_FIELD ||
+ addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
+ return true;
+ else
+ return false;
+}
- resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+/* Place holder for all the async MCC cmds wherein the caller is not in a busy
+ * loop (has not issued be_mcc_notify_wait())
+ */
+static void be_async_cmd_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl,
+ struct be_cmd_resp_hdr *resp_hdr)
+{
+ enum mcc_base_status base_status = base_status(compl->status);
+ u8 opcode = 0, subsystem = 0;
if (resp_hdr) {
opcode = resp_hdr->opcode;
@@ -144,61 +150,86 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
complete(&adapter->et_cmd_compl);
- return 0;
+ return;
}
- if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
- (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
- (subsystem == CMD_SUBSYSTEM_COMMON)) {
- adapter->flash_status = compl_status;
+ if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
+ opcode == OPCODE_COMMON_WRITE_OBJECT) &&
+ subsystem == CMD_SUBSYSTEM_COMMON) {
+ adapter->flash_status = compl->status;
complete(&adapter->et_cmd_compl);
+ return;
}
- if (compl_status == MCC_STATUS_SUCCESS) {
- if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
- (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
- (subsystem == CMD_SUBSYSTEM_ETH)) {
- be_parse_stats(adapter);
- adapter->stats_cmd_sent = false;
- }
- if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
- subsystem == CMD_SUBSYSTEM_COMMON) {
+ if ((opcode == OPCODE_ETH_GET_STATISTICS ||
+ opcode == OPCODE_ETH_GET_PPORT_STATS) &&
+ subsystem == CMD_SUBSYSTEM_ETH &&
+ base_status == MCC_STATUS_SUCCESS) {
+ be_parse_stats(adapter);
+ adapter->stats_cmd_sent = false;
+ return;
+ }
+
+ if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
+ subsystem == CMD_SUBSYSTEM_COMMON) {
+ if (base_status == MCC_STATUS_SUCCESS) {
struct be_cmd_resp_get_cntl_addnl_attribs *resp =
- (void *)resp_hdr;
+ (void *)resp_hdr;
adapter->drv_stats.be_on_die_temperature =
- resp->on_die_temperature;
- }
- } else {
- if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
+ resp->on_die_temperature;
+ } else {
adapter->be_get_temp_freq = 0;
+ }
+ return;
+ }
+}
+
+static int be_mcc_compl_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ enum mcc_base_status base_status;
+ enum mcc_addl_status addl_status;
+ struct be_cmd_resp_hdr *resp_hdr;
+ u8 opcode = 0, subsystem = 0;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb */
+ be_dws_le_to_cpu(compl, 4);
+
+ base_status = base_status(compl->status);
+ addl_status = addl_status(compl->status);
+
+ resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+ if (resp_hdr) {
+ opcode = resp_hdr->opcode;
+ subsystem = resp_hdr->subsystem;
+ }
+
+ be_async_cmd_process(adapter, compl, resp_hdr);
- if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
- compl_status == MCC_STATUS_ILLEGAL_REQUEST)
- goto done;
+ if (base_status != MCC_STATUS_SUCCESS &&
+ !be_skip_err_log(opcode, base_status, addl_status)) {
- if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+ if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
dev_warn(&adapter->pdev->dev,
"VF is not privileged to issue opcode %d-%d\n",
opcode, subsystem);
} else {
- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
dev_err(&adapter->pdev->dev,
"opcode %d-%d failed:status %d-%d\n",
- opcode, subsystem, compl_status, extd_status);
-
- if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
- return extd_status;
+ opcode, subsystem, base_status, addl_status);
}
}
-done:
- return compl_status;
+ return compl->status;
}
/* Link state evt is a string of bytes; no need for endian swapping */
static void be_async_link_state_process(struct be_adapter *adapter,
- struct be_async_event_link_state *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_link_state *evt =
+ (struct be_async_event_link_state *)compl;
+
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
@@ -221,8 +252,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
- struct be_async_event_grp5_cos_priority *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_grp5_cos_priority *evt =
+ (struct be_async_event_grp5_cos_priority *)compl;
+
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
adapter->recommended_prio &= ~VLAN_PRIO_MASK;
@@ -233,8 +267,11 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
- struct be_async_event_grp5_qos_link_speed *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_grp5_qos_link_speed *evt =
+ (struct be_async_event_grp5_qos_link_speed *)compl;
+
if (adapter->phy.link_speed >= 0 &&
evt->physical_port == adapter->port_num)
adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
@@ -242,8 +279,11 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
- struct be_async_event_grp5_pvid_state *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_grp5_pvid_state *evt =
+ (struct be_async_event_grp5_pvid_state *)compl;
+
if (evt->enabled) {
adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
@@ -253,26 +293,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
}
static void be_async_grp5_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *evt)
+ struct be_mcc_compl *compl)
{
- u8 event_type = 0;
-
- event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
- ASYNC_TRAILER_EVENT_TYPE_MASK;
+ u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
switch (event_type) {
case ASYNC_EVENT_COS_PRIORITY:
- be_async_grp5_cos_priority_process(adapter,
- (struct be_async_event_grp5_cos_priority *)evt);
- break;
+ be_async_grp5_cos_priority_process(adapter, compl);
+ break;
case ASYNC_EVENT_QOS_SPEED:
- be_async_grp5_qos_speed_process(adapter,
- (struct be_async_event_grp5_qos_link_speed *)evt);
- break;
+ be_async_grp5_qos_speed_process(adapter, compl);
+ break;
case ASYNC_EVENT_PVID_STATE:
- be_async_grp5_pvid_state_process(adapter,
- (struct be_async_event_grp5_pvid_state *)evt);
- break;
+ be_async_grp5_pvid_state_process(adapter, compl);
+ break;
default:
dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
event_type);
@@ -281,13 +316,13 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
}
static void be_async_dbg_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *cmp)
+ struct be_mcc_compl *cmp)
{
u8 event_type = 0;
struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
- event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
- ASYNC_TRAILER_EVENT_TYPE_MASK;
+ event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
switch (event_type) {
case ASYNC_DEBUG_EVENT_TYPE_QNQ:
@@ -302,25 +337,33 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
}
}
-static inline bool is_link_state_evt(u32 trailer)
+static inline bool is_link_state_evt(u32 flags)
{
- return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_LINK_STATE;
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE;
}
-static inline bool is_grp5_evt(u32 trailer)
+static inline bool is_grp5_evt(u32 flags)
{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_GRP_5);
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_GRP_5;
}
-static inline bool is_dbg_evt(u32 trailer)
+static inline bool is_dbg_evt(u32 flags)
{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_QNQ);
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_QNQ;
+}
+
+static void be_mcc_event_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter, compl);
+ else if (is_grp5_evt(compl->flags))
+ be_async_grp5_evt_process(adapter, compl);
+ else if (is_dbg_evt(compl->flags))
+ be_async_dbg_evt_process(adapter, compl);
}
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -362,21 +405,13 @@ int be_process_mcc(struct be_adapter *adapter)
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock(&adapter->mcc_cq_lock);
+
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
- /* Interpret flags as an async trailer */
- if (is_link_state_evt(compl->flags))
- be_async_link_state_process(adapter,
- (struct be_async_event_link_state *) compl);
- else if (is_grp5_evt(compl->flags))
- be_async_grp5_evt_process(adapter,
- compl->flags, compl);
- else if (is_dbg_evt(compl->flags))
- be_async_dbg_evt_process(adapter,
- compl->flags, compl);
+ be_mcc_event_process(adapter, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- status = be_mcc_compl_process(adapter, compl);
- atomic_dec(&mcc_obj->q.used);
+ status = be_mcc_compl_process(adapter, compl);
+ atomic_dec(&mcc_obj->q.used);
}
be_mcc_compl_use(compl);
num++;
@@ -436,7 +471,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
if (status == -EIO)
goto out;
- status = resp->status;
+ status = (resp->base_status |
+ ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
+ CQE_ADDL_STATUS_SHIFT));
out:
return status;
}
@@ -560,10 +597,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
- sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
+ sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +665,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
if (stage == POST_STAGE_ARMFW_RDY)
return 0;
- dev_info(dev, "Waiting for POST, %ds elapsed\n",
- timeout);
+ dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
if (msleep_interruptible(2000)) {
dev_err(dev, "Waiting for POST aborted\n");
return -EINTR;
@@ -649,8 +683,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
return &wrb->payload.sgl[0];
}
-static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
- unsigned long addr)
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
{
wrb->tag0 = addr & 0xFFFFFFFF;
wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +692,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
/* Don't touch the hdr after it's prepared */
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len,
- struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
+ u8 subsystem, u8 opcode, int cmd_len,
+ struct be_mcc_wrb *wrb,
+ struct be_dma_mem *mem)
{
struct be_sge *sge;
@@ -683,7 +717,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
- struct be_dma_mem *mem)
+ struct be_dma_mem *mem)
{
int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
u64 dma = (u64)mem->dma;
@@ -868,7 +902,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
+ NULL);
/* Support for EQ_CREATEv2 available only SH-R onwards */
if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +952,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
+ NULL);
req->type = MAC_ADDRESS_TYPE_NETWORK;
if (permanent) {
req->permanent = 1;
@@ -940,7 +976,7 @@ err:
/* Uses synchronous MCCQ */
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain)
+ u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_add *req;
@@ -956,7 +992,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1049,7 @@ err:
/* Uses Mbox */
int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
- struct be_queue_info *eq, bool no_delay, int coalesce_wm)
+ struct be_queue_info *eq, bool no_delay, int coalesce_wm)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_cq_create *req;
@@ -1028,17 +1065,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
- coalesce_wm);
+ coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
- ctxt, no_delay);
+ ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1091,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
- no_delay);
+ no_delay);
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
- ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
- ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
}
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1124,8 @@ static u32 be_encoded_q_len(int q_len)
}
static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1141,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
} else {
req->hdr.version = 1;
@@ -1145,8 +1182,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
}
static int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1199,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1225,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
}
int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq, struct be_queue_info *cq)
{
int status;
@@ -1213,7 +1250,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
if (lancer_chip(adapter)) {
req->hdr.version = 1;
@@ -1250,8 +1287,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
/* Uses MCC */
int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
- u32 if_id, u32 rss, u8 *rss_id)
+ struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+ u32 if_id, u32 rss, u8 *rss_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1305,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
req->cq_id = cpu_to_le16(cq_id);
req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1332,7 @@ err:
* Uses Mbox
*/
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int queue_type)
+ int queue_type)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1371,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
}
be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
- NULL);
+ NULL);
req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1398,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
req->id = cpu_to_le16(q->id);
status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1421,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE,
+ sizeof(*req), &wrb, NULL);
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1460,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
+ sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
@@ -1452,7 +1491,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
hdr = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
+ nonemb_cmd);
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
@@ -1472,7 +1512,7 @@ err:
/* Lancer Stats */
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
@@ -1493,8 +1533,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
- nonemb_cmd);
+ OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
+ wrb, nonemb_cmd);
req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1593,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+ sizeof(*req), wrb, NULL);
/* version 1 of the cmd is not supported only by BE2 */
if (!BE2_chip(adapter))
@@ -1598,8 +1639,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
- wrb, NULL);
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
+ sizeof(*req), wrb, NULL);
be_mcc_notify(adapter);
@@ -1625,7 +1666,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
+ NULL);
req->fat_operation = cpu_to_le32(QUERY_FAT);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1655,8 +1697,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_fat_cmd.size,
- &get_fat_cmd.dma);
+ get_fat_cmd.size,
+ &get_fat_cmd.dma);
if (!get_fat_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
@@ -1679,8 +1721,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
- &get_fat_cmd);
+ OPCODE_COMMON_MANAGE_FAT, payload_len,
+ wrb, &get_fat_cmd);
req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1733,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!status) {
struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
memcpy(buf + offset,
- resp->data_buffer,
- le32_to_cpu(resp->read_log_length));
+ resp->data_buffer,
+ le32_to_cpu(resp->read_log_length));
} else {
dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
goto err;
@@ -1702,14 +1744,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
}
err:
pci_free_consistent(adapter->pdev, get_fat_cmd.size,
- get_fat_cmd.va,
- get_fat_cmd.dma);
+ get_fat_cmd.va, get_fat_cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
}
/* Uses synchronous mcc */
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
- char *fw_on_flash)
+ char *fw_on_flash)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1767,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
+ NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1801,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
+ NULL);
req->num_eq = cpu_to_le32(num);
for (i = 0; i < num; i++) {
@@ -1777,7 +1820,7 @@ err:
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous)
+ u32 num)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1836,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
+ wrb, NULL);
req->interface_id = if_id;
- req->promiscuous = promiscuous;
req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
req->num_vlan = num;
- if (!promiscuous) {
- memcpy(req->normal_vlan, vtag_array,
- req->num_vlan * sizeof(vtag_array[0]));
- }
+ memcpy(req->normal_vlan, vtag_array,
+ req->num_vlan * sizeof(vtag_array[0]));
status = be_mcc_notify_wait(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1827,18 +1867,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
memset(req, 0, sizeof(*req));
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
- wrb, mem);
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
+ wrb, mem);
req->if_id = cpu_to_le32(adapter->if_handle);
if (flags & IFF_PROMISC) {
req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
if (value == ON)
- req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
} else if (flags & IFF_ALLMULTI) {
req->if_flags_mask = req->if_flags =
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1908,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
- req->if_flags_mask) {
+ req->if_flags_mask) {
dev_warn(&adapter->pdev->dev,
"Cannot set rx filter flags 0x%x\n",
req->if_flags_mask);
@@ -1905,7 +1946,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
req->tx_flow_control = cpu_to_le16((u16)tx_fc);
req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1980,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1968,7 +2011,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ sizeof(*req), wrb, NULL);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2011,7 +2055,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
+ NULL);
status = be_mbox_notify_wait(adapter);
@@ -2020,47 +2065,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)
}
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size)
+ u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
- 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
- 0x3ea83c02, 0x4a110304};
int status;
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(rss_hash_opts);
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
- if (lancer_chip(adapter) || skyhawk_chip(adapter))
+ if (!BEx_chip(adapter))
req->hdr.version = 1;
memcpy(req->cpu_table, rsstable, table_size);
- memcpy(req->hash, myhash, sizeof(myhash));
+ memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
be_dws_cpu_to_le(req->hash, sizeof(req->hash));
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
- u8 bcn, u8 sts, u8 state)
+ u8 bcn, u8 sts, u8 state)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_enable_disable_beacon *req;
@@ -2076,7 +2121,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON,
+ sizeof(*req), wrb, NULL);
req->port_num = port_num;
req->beacon_state = state;
@@ -2107,7 +2153,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
+ wrb, NULL);
req->port_num = port_num;
@@ -2146,20 +2193,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_OBJECT,
- sizeof(struct lancer_cmd_req_write_object), wrb,
- NULL);
+ OPCODE_COMMON_WRITE_OBJECT,
+ sizeof(struct lancer_cmd_req_write_object), wrb,
+ NULL);
ctxt = &req->context;
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- write_length, ctxt, data_size);
+ write_length, ctxt, data_size);
if (data_size == 0)
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 1);
+ eof, ctxt, 1);
else
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 0);
+ eof, ctxt, 0);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
@@ -2167,8 +2214,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
- sizeof(struct lancer_cmd_req_write_object))
- & 0xFFFFFFFF);
+ sizeof(struct lancer_cmd_req_write_object))
+ & 0xFFFFFFFF);
req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
sizeof(struct lancer_cmd_req_write_object)));
@@ -2197,8 +2244,8 @@ err_unlock:
}
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_read, u32 *eof, u8 *addn_status)
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_read_object *req;
@@ -2216,9 +2263,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_OBJECT,
- sizeof(struct lancer_cmd_req_read_object), wrb,
- NULL);
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
req->desired_read_len = cpu_to_le32(data_size);
req->read_offset = cpu_to_le32(data_offset);
@@ -2244,7 +2291,7 @@ err_unlock:
}
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_type, u32 flash_opcode, u32 buf_size)
+ u32 flash_type, u32 flash_opcode, u32 buf_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -2261,7 +2308,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
+ OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
+ cmd);
req->params.op_type = cpu_to_le32(flash_type);
req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2284,7 +2332,7 @@ err_unlock:
}
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset)
+ u16 optype, int offset)
{
struct be_mcc_wrb *wrb;
struct be_cmd_read_flash_crc *req;
@@ -2303,7 +2351,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
wrb, NULL);
- req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
+ req->params.op_type = cpu_to_le32(optype);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
req->params.offset = cpu_to_le32(offset);
req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2318,7 +2366,7 @@ err:
}
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2334,8 +2382,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
+ wrb, nonemb_cmd);
memcpy(req->magic_mac, mac, ETH_ALEN);
status = be_mcc_notify_wait(adapter);
@@ -2363,8 +2411,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
- NULL);
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
+ wrb, NULL);
req->src_port = port_num;
req->dest_port = port_num;
@@ -2378,7 +2426,8 @@ err:
}
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_loopback_test *req;
@@ -2396,7 +2445,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
+ NULL);
req->hdr.timeout = cpu_to_le32(15);
req->pattern = cpu_to_le64(pattern);
@@ -2421,7 +2471,7 @@ err:
}
int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd)
+ u32 byte_cnt, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_ddrdma_test *req;
@@ -2437,7 +2487,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
+ cmd);
req->pattern = cpu_to_le64(pattern);
req->byte_count = cpu_to_le32(byte_cnt);
@@ -2465,7 +2516,7 @@ err:
}
int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_seeprom_read *req;
@@ -2481,8 +2532,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
+ nonemb_cmd);
status = be_mcc_notify_wait(adapter);
@@ -2510,8 +2561,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -2521,8 +2571,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
+ wrb, &cmd);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -2544,8 +2594,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
BE_SUPPORTED_SPEED_1GBPS;
}
}
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2568,7 +2617,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2597,10 +2646,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
- &attribs_cmd.dma);
+ &attribs_cmd.dma);
if (!attribs_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -2613,8 +2661,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
req = attribs_cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
- &attribs_cmd);
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
+ wrb, &attribs_cmd);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2649,7 +2697,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
+ sizeof(*req), wrb, NULL);
req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2762,12 +2811,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma);
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure during GET_MAC_LIST\n");
+ "Memory allocation failure during GET_MAC_LIST\n");
return -ENOMEM;
}
@@ -2831,18 +2880,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
/* If no active mac_id found, return first mac addr */
*pmac_id_valid = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
- ETH_ALEN);
+ ETH_ALEN);
}
out:
spin_unlock_bh(&adapter->mcc_lock);
pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
- get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
}
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
- u32 if_handle, bool active, u32 domain)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
+ u8 *mac, u32 if_handle, bool active, u32 domain)
{
if (!active)
@@ -2892,7 +2941,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
- &cmd.dma, GFP_KERNEL);
+ &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -2906,8 +2955,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
req->hdr.domain = domain;
req->mac_count = mac_count;
@@ -2917,8 +2966,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
- dma_free_coherent(&adapter->pdev->dev, cmd.size,
- cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2963,7 +3011,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3009,7 +3058,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3027,10 +3077,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
if (!status) {
struct be_cmd_resp_get_hsw_config *resp =
embedded_payload(wrb);
- be_dws_le_to_cpu(&resp->context,
- sizeof(resp->context));
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
- pvid, &resp->context);
+ pvid, &resp->context);
if (pvid)
*pvid = le16_to_cpu(vid);
if (mode)
@@ -3062,11 +3111,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -3349,8 +3396,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -3396,7 +3442,7 @@ err:
/* Uses mbox */
static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3424,7 +3470,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
/* Uses sync mcc */
static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3484,8 +3530,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le32_to_cpu(resp->desc_count);
- pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
- desc_count);
+ pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+ desc_count);
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
@@ -3548,33 +3594,47 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
nic->cq_count = 0xFFFF;
nic->toe_conn_count = 0xFFFF;
nic->eq_count = 0xFFFF;
+ nic->iface_count = 0xFFFF;
nic->link_param = 0xFF;
+ nic->channel_id_param = cpu_to_le16(0xF000);
nic->acpi_params = 0xFF;
nic->wol_param = 0x0F;
- nic->bw_min = 0xFFFFFFFF;
+ nic->tunnel_iface_count = 0xFFFF;
+ nic->direct_tenant_iface_count = 0xFFFF;
nic->bw_max = 0xFFFFFFFF;
}
-int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain)
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
+ u8 domain)
{
- if (lancer_chip(adapter)) {
- struct be_nic_res_desc nic_desc;
+ struct be_nic_res_desc nic_desc;
+ u32 bw_percent;
+ u16 version = 0;
+
+ if (BE3_chip(adapter))
+ return be_cmd_set_qos(adapter, max_rate / 10, domain);
- be_reset_nic_desc(&nic_desc);
+ be_reset_nic_desc(&nic_desc);
+ nic_desc.pf_num = adapter->pf_number;
+ nic_desc.vf_num = domain;
+ if (lancer_chip(adapter)) {
nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
(1 << NOSV_SHIFT);
- nic_desc.pf_num = adapter->pf_number;
- nic_desc.vf_num = domain;
- nic_desc.bw_max = cpu_to_le32(bps);
-
- return be_cmd_set_profile_config(adapter, &nic_desc,
- RESOURCE_DESC_SIZE_V0,
- 0, domain);
+ nic_desc.bw_max = cpu_to_le32(max_rate / 10);
} else {
- return be_cmd_set_qos(adapter, bps, domain);
+ version = 1;
+ nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
+ nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+ nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+ bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
+ nic_desc.bw_max = cpu_to_le32(bw_percent);
}
+
+ return be_cmd_set_profile_config(adapter, &nic_desc,
+ nic_desc.hdr.desc_len,
+ version, domain);
}
int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3859,7 +3919,7 @@ err:
}
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
- int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
+ int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
struct be_adapter *adapter = netdev_priv(netdev_handle);
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b60e4d53c1c9..59b3c056f329 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -50,7 +50,7 @@ struct be_mcc_wrb {
#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
/* Completion Status */
-enum {
+enum mcc_base_status {
MCC_STATUS_SUCCESS = 0,
MCC_STATUS_FAILED = 1,
MCC_STATUS_ILLEGAL_REQUEST = 2,
@@ -60,12 +60,25 @@ enum {
MCC_STATUS_NOT_SUPPORTED = 66
};
-#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
+/* Additional status */
+enum mcc_addl_status {
+ MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
+ MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
+ MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
+};
+
+#define CQE_BASE_STATUS_MASK 0xFFFF
+#define CQE_BASE_STATUS_SHIFT 0 /* bits 0 - 15 */
+#define CQE_ADDL_STATUS_MASK 0xFF
+#define CQE_ADDL_STATUS_SHIFT 16 /* bits 16 - 31 */
-#define CQE_STATUS_COMPL_MASK 0xFFFF
-#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
-#define CQE_STATUS_EXTD_MASK 0xFFFF
-#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
+#define base_status(status) \
+ ((enum mcc_base_status) \
+ (status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0))
+#define addl_status(status) \
+ ((enum mcc_addl_status) \
+ (status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \
+ CQE_ADDL_STATUS_MASK : 0))
struct be_mcc_compl {
u32 status; /* dword 0 */
@@ -74,13 +87,13 @@ struct be_mcc_compl {
u32 flags; /* dword 3 */
};
-/* When the async bit of mcc_compl is set, the last 4 bytes of
- * mcc_compl is interpreted as follows:
+/* When the async bit of mcc_compl flags is set, flags
+ * is interpreted as follows:
*/
-#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
-#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
-#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
-#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
+#define ASYNC_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_TYPE_SHIFT 16
+#define ASYNC_EVENT_TYPE_MASK 0xFF
#define ASYNC_EVENT_CODE_LINK_STATE 0x1
#define ASYNC_EVENT_CODE_GRP_5 0x5
#define ASYNC_EVENT_QOS_SPEED 0x1
@@ -89,10 +102,6 @@ struct be_mcc_compl {
#define ASYNC_EVENT_CODE_QNQ 0x6
#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
-struct be_async_event_trailer {
- u32 code;
-};
-
enum {
LINK_DOWN = 0x0,
LINK_UP = 0x1
@@ -100,7 +109,7 @@ enum {
#define LINK_STATUS_MASK 0x1
#define LOGICAL_LINK_STATUS_MASK 0x2
-/* When the event code of an async trailer is link-state, the mcc_compl
+/* When the event code of compl->flags is link-state, the mcc_compl
* must be interpreted as follows
*/
struct be_async_event_link_state {
@@ -110,10 +119,10 @@ struct be_async_event_link_state {
u8 port_speed;
u8 port_fault;
u8 rsvd0[7];
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
-/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
+/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED
* the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_qos_link_speed {
@@ -121,10 +130,10 @@ struct be_async_event_grp5_qos_link_speed {
u8 rsvd[5];
u16 qos_link_speed;
u32 event_tag;
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
-/* When the event code of an async trailer is GRP5 and event type is
+/* When the event code of compl->flags is GRP5 and event type is
* CoS-Priority, the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_cos_priority {
@@ -134,10 +143,10 @@ struct be_async_event_grp5_cos_priority {
u8 valid;
u8 rsvd0;
u8 event_tag;
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
-/* When the event code of an async trailer is GRP5 and event type is
+/* When the event code of compl->flags is GRP5 and event type is
* PVID state, the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_pvid_state {
@@ -146,7 +155,7 @@ struct be_async_event_grp5_pvid_state {
u16 tag;
u32 event_tag;
u32 rsvd1;
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
/* async event indicating outer VLAN tag in QnQ */
@@ -156,7 +165,7 @@ struct be_async_event_qnq {
u16 vlan_tag;
u32 event_tag;
u8 rsvd1[4];
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
struct be_mcc_mailbox {
@@ -258,8 +267,8 @@ struct be_cmd_resp_hdr {
u8 opcode; /* dword 0 */
u8 subsystem; /* dword 0 */
u8 rsvd[2]; /* dword 0 */
- u8 status; /* dword 1 */
- u8 add_status; /* dword 1 */
+ u8 base_status; /* dword 1 */
+ u8 addl_status; /* dword 1 */
u8 rsvd1[2]; /* dword 1 */
u32 response_length; /* dword 2 */
u32 actual_resp_len; /* dword 3 */
@@ -1082,7 +1091,7 @@ struct be_cmd_resp_modify_eq_delay {
* based on the skew/IPL.
*/
#define RDMA_ENABLED 0x4
-#define FLEX10_MODE 0x400
+#define QNQ_MODE 0x400
#define VNIC_MODE 0x20000
#define UMC_ENABLED 0x1000000
struct be_cmd_req_query_fw_cfg {
@@ -1186,7 +1195,8 @@ struct be_cmd_read_flash_crc {
struct flashrom_params params;
u8 crc[4];
u8 rsvd[4];
-};
+} __packed;
+
/**************** Lancer Firmware Flash ************/
struct amap_lancer_write_obj_context {
u8 write_length[24];
@@ -1891,16 +1901,20 @@ struct be_nic_res_desc {
u16 cq_count;
u16 toe_conn_count;
u16 eq_count;
- u32 rsvd5;
+ u16 vlan_id;
+ u16 iface_count;
u32 cap_flags;
u8 link_param;
- u8 rsvd6[3];
+ u8 rsvd6;
+ u16 channel_id_param;
u32 bw_min;
u32 bw_max;
u8 acpi_params;
u8 wol_param;
u16 rsvd7;
- u32 rsvd8[7];
+ u16 tunnel_iface_count;
+ u16 direct_tenant_iface_count;
+ u32 rsvd8[6];
} __packed;
/************ Multi-Channel type ***********/
@@ -2060,7 +2074,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
char *fw_on_flash);
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous);
+ u32 num);
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2068,7 +2082,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
u32 *function_mode, u32 *function_caps, u16 *asic_rev);
int be_cmd_reset_function(struct be_adapter *adapter);
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size);
+ u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey);
int be_process_mcc(struct be_adapter *adapter);
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
u8 status, u8 state);
@@ -2084,7 +2098,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset, const char *obj_name,
u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset);
+ u16 optype, int offset);
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd);
int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2101,7 +2115,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
int be_cmd_get_phy_info(struct be_adapter *adapter);
-int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate,
+ u16 link_speed, u8 domain);
void be_detect_error(struct be_adapter *adapter);
int be_cmd_get_die_temperature(struct be_adapter *adapter);
int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 15ba96cba65d..e2da4d20dd3d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_compl)},
+ {DRVSTAT_RX_INFO(rx_compl_err)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
/* Number of page allocation failures while posting receive buffers
* to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
#define BE_NO_LOOPBACK 0xff
static void be_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
drvinfo->eedump_len = 0;
}
-static u32
-lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
{
u32 data_read = 0, eof;
u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
memset(&data_len_cmd, 0, sizeof(data_len_cmd));
/* data_offset and data_size should be 0 to get reg len */
status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
- file_name, &data_read, &eof, &addn_status);
+ file_name, &data_read, &eof,
+ &addn_status);
return data_read;
}
-static int
-lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
- u32 buf_len, void *buf)
+static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
{
struct be_dma_mem read_cmd;
u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
read_cmd.size = LANCER_READ_FILE_CHUNK;
read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
- &read_cmd.dma);
+ &read_cmd.dma);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure while reading dump\n");
+ "Memory allocation failure while reading dump\n");
return -ENOMEM;
}
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
LANCER_READ_FILE_CHUNK);
chunk_size = ALIGN(chunk_size, 4);
status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
- total_read_len, file_name, &read_len,
- &eof, &addn_status);
+ total_read_len, file_name,
+ &read_len, &eof, &addn_status);
if (!status) {
memcpy(buf + total_read_len, read_cmd.va, read_len);
total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
}
}
pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
- read_cmd.dma);
+ read_cmd.dma);
return status;
}
-static int
-be_get_reg_len(struct net_device *netdev)
+static int be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
if (be_physfn(adapter)) {
if (lancer_chip(adapter))
log_size = lancer_cmd_get_file_len(adapter,
- LANCER_FW_DUMP_FILE);
+ LANCER_FW_DUMP_FILE);
else
be_cmd_get_reg_len(adapter, &log_size);
}
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
memset(buf, 0, regs->len);
if (lancer_chip(adapter))
lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
- regs->len, buf);
+ regs->len, buf);
else
be_cmd_get_regs(adapter, regs->len, buf);
}
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
return 0;
}
-static void
-be_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, uint64_t *data)
+static void be_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
}
}
-static void
-be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
- uint8_t *data)
+static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
adapter->rx_fc = ecmd->rx_pause;
status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
+ adapter->tx_fc, adapter->rx_fc);
if (status)
dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
return status;
}
-static int
-be_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
+static int be_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
return status;
}
-static void
-be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
-static int
-be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-static int
-be_test_ddr_dma(struct be_adapter *adapter)
+static int be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
- 4096, &ddrdma_cmd);
+ 4096, &ddrdma_cmd);
if (ret != 0)
goto err;
}
@@ -773,20 +766,17 @@ err:
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
- u64 *status)
+ u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- loopback_type, 1);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
- loopback_type, 1500,
- 2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- BE_NO_LOOPBACK, 1);
+ loopback_type, 1500, 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
return *status;
}
-static void
-be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+ u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
- if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
- &data[0]) != 0)
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
- if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
- &data[1]) != 0)
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
}
-static int
-be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
return be_load_fw(adapter, efl->data);
}
-static int
-be_get_eeprom_len(struct net_device *netdev)
+static int be_get_eeprom_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_PF_FILE);
+ LANCER_VPD_PF_FILE);
else
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_VF_FILE);
+ LANCER_VPD_VF_FILE);
} else {
return BE_READ_SEEPROM_LEN;
}
}
-static int
-be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
- uint8_t *data)
+static int be_read_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
else
return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
}
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -933,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
switch (flow_type) {
case TCP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case TCP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
}
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -992,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
struct be_rx_obj *rxo;
int status = 0, i, j;
u8 rsstable[128];
- u32 rss_flags = adapter->rss_flags;
+ u32 rss_flags = adapter->rss_info.rss_flags;
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1039,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return -EINVAL;
}
- if (rss_flags == adapter->rss_flags)
+ if (rss_flags == adapter->rss_info.rss_flags)
return status;
if (be_multi_rxq(adapter)) {
@@ -1051,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
}
}
}
- status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
+
+ status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
+ rss_flags, 128, adapter->rss_info.rss_hkey);
if (!status)
- adapter->rss_flags = rss_flags;
+ adapter->rss_info.rss_flags = rss_flags;
return status;
}
@@ -1103,6 +1090,69 @@ static int be_set_channels(struct net_device *netdev,
return be_update_queues(adapter);
}
+static u32 be_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return RSS_INDIR_TABLE_LEN;
+}
+
+static u32 be_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_LEN;
+}
+
+static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i;
+ struct rss_info *rss = &adapter->rss_info;
+
+ if (indir) {
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
+ indir[i] = rss->rss_queue[i];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+
+ return 0;
+}
+
+static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *hkey)
+{
+ int rc = 0, i, j;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+
+ if (indir) {
+ struct be_rx_obj *rxo;
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
+ j = indir[i];
+ rxo = &adapter->rx_obj[j];
+ rsstable[i] = rxo->rss_id;
+ adapter->rss_info.rss_queue[i] = j;
+ }
+ } else {
+ memcpy(rsstable, adapter->rss_info.rsstable,
+ RSS_INDIR_TABLE_LEN);
+ }
+
+ if (!hkey)
+ hkey = adapter->rss_info.rss_hkey;
+
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->rss_info.rss_flags,
+ RSS_INDIR_TABLE_LEN, hkey);
+ if (rc) {
+ adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
+ return -EIO;
+ }
+ memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
+ memcpy(adapter->rss_info.rsstable, rsstable,
+ RSS_INDIR_TABLE_LEN);
+ return 0;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1129,6 +1179,10 @@ const struct ethtool_ops be_ethtool_ops = {
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
.set_rxnfc = be_set_rxnfc,
+ .get_rxfh_indir_size = be_get_rxfh_indir_size,
+ .get_rxfh_key_size = be_get_rxfh_key_size,
+ .get_rxfh = be_get_rxfh,
+ .set_rxfh = be_set_rxfh,
.get_channels = be_get_channels,
.set_channels = be_set_channels
};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3bd198550edb..8840c64aaeca 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -188,10 +188,14 @@
#define OPTYPE_FCOE_FW_ACTIVE 10
#define OPTYPE_FCOE_FW_BACKUP 11
#define OPTYPE_NCSI_FW 13
+#define OPTYPE_REDBOOT_DIR 18
+#define OPTYPE_REDBOOT_CONFIG 19
+#define OPTYPE_SH_PHY_FW 21
+#define OPTYPE_FLASHISM_JUMPVECTOR 22
+#define OPTYPE_UFI_DIR 23
#define OPTYPE_PHY_FW 99
#define TN_8022 13
-#define ILLEGAL_IOCTL_REQ 2
#define FLASHROM_OPER_PHY_FLASH 9
#define FLASHROM_OPER_PHY_SAVE 10
#define FLASHROM_OPER_FLASH 1
@@ -250,6 +254,9 @@
#define IMAGE_FIRMWARE_BACKUP_FCoE 178
#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
#define IMAGE_FIRMWARE_PHY 192
+#define IMAGE_REDBOOT_DIR 208
+#define IMAGE_REDBOOT_CONFIG 209
+#define IMAGE_UFI_DIR 210
#define IMAGE_BOOT_CODE 224
/************* Rx Packet Type Encoding **************/
@@ -534,7 +541,8 @@ struct flash_section_entry {
u32 image_size;
u32 cksum;
u32 entry_point;
- u32 rsvd0;
+ u16 optype;
+ u16 rsvd0;
u32 rsvd1;
u8 ver_data[32];
} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index dc19bc5dec77..34a26e42f19d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
- u16 len, u16 entry_size)
+ u16 len, u16 entry_size)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
u32 reg, enabled;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
- &reg);
+ &reg);
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
return;
pci_write_config_dword(adapter->pdev,
- PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}
static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
}
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- bool arm, bool clear_int, u16 num_popped)
+ bool arm, bool clear_int, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
- val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
- DB_EQ_RING_ID_EXT_MASK_SHIFT);
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_error)
return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
- if (be_roce_supported(adapter)) {
+ if (be_roce_supported(adapter)) {
drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
{
struct be_drv_stats *drvs = &adapter->drv_stats;
- struct lancer_pport_stats *pport_stats =
- pport_stats_from_cmd(adapter);
+ struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
}
static void populate_erx_stats(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- u32 erx_stat)
+ struct be_rx_obj *rxo, u32 erx_stat)
{
if (!BEx_chip(adapter))
rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
}
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
}
static void be_tx_stats_update(struct be_tx_obj *txo,
- u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+ u32 wrb_cnt, u32 copied, u32 gso_segs,
+ bool stopped)
{
struct be_tx_stats *stats = tx_stats(txo);
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
/* Determine number of WRB entries needed to xmit data in an skb */
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
- bool *dummy)
+ bool *dummy)
{
int cnt = (skb->len > skb->data_len);
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
}
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
u8 vlan_prio;
u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
}
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, u32 len,
+ bool skip_hw_vlan)
{
u16 vlan_tag, proto;
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
- bool unmap_single)
+ bool unmap_single)
{
dma_addr_t dma;
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
- bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+ bool skip_hw_vlan)
{
dma_addr_t busaddr;
int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[i];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
busaddr = skb_frag_dma_map(dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}
-static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
- struct sk_buff *skb)
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
}
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
*/
if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
- *skip_hw_vlan = true;
+ *skip_hw_vlan = true;
/* HW has a bug wherein it will calculate CSUM for VLAN
* pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
- (ETH_HLEN + ETH_FCS_LEN))) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
- "MTU must be between %d and %d bytes\n",
- BE_MIN_MTU,
- (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+ "MTU must be between %d and %d bytes\n",
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
- netdev->mtu, new_mtu);
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
return 0;
}
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
static int be_vid_config(struct be_adapter *adapter)
{
u16 vids[BE_NUM_VLANS_SUPPORTED];
- u16 num = 0, i;
+ u16 num = 0, i = 0;
int status = 0;
/* No need to further configure vids if in promiscuous mode */
@@ -1109,16 +1105,14 @@ static int be_vid_config(struct be_adapter *adapter)
goto set_vlan_promisc;
/* Construct VLAN Table to give to HW */
- for (i = 0; i < VLAN_N_VID; i++)
- if (adapter->vlan_tag[i])
- vids[num++] = cpu_to_le16(i);
-
- status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vids, num, 0);
+ for_each_set_bit(i, adapter->vids, VLAN_N_VID)
+ vids[num++] = cpu_to_le16(i);
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
if (status) {
/* Set to VLAN promisc mode as setting VLAN filter failed */
- if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+ if (addl_status(status) ==
+ MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
goto set_vlan_promisc;
dev_err(&adapter->pdev->dev,
"Setting HW VLAN filtering failed.\n");
@@ -1160,16 +1154,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
return status;
- if (adapter->vlan_tag[vid])
+ if (test_bit(vid, adapter->vids))
return status;
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
adapter->vlans_added++;
status = be_vid_config(adapter);
if (status) {
adapter->vlans_added--;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
}
return status;
@@ -1184,12 +1178,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
goto ret;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
else
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
ret:
return status;
}
@@ -1197,7 +1191,7 @@ ret:
static void be_clear_promisc(struct be_adapter *adapter)
{
adapter->promiscuous = false;
- adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+ adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
}
@@ -1222,10 +1216,8 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > be_max_mc(adapter)) {
- be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
- goto done;
- }
+ netdev_mc_count(netdev) > be_max_mc(adapter))
+ goto set_mcast_promisc;
if (netdev_uc_count(netdev) != adapter->uc_macs) {
struct netdev_hw_addr *ha;
@@ -1251,13 +1243,22 @@ static void be_set_rx_mode(struct net_device *netdev)
}
status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
-
- /* Set to MCAST promisc mode if setting MULTICAST address fails */
- if (status) {
- dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
- dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
- be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+ if (!status) {
+ if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+ adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
+ goto done;
}
+
+set_mcast_promisc:
+ if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+ return;
+
+ /* Set to MCAST promisc mode if setting MULTICAST address fails
+ * or if num configured exceeds what we support
+ */
+ status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+ if (!status)
+ adapter->flags |= BE_FLAGS_MCAST_PROMISC;
done:
return;
}
@@ -1287,7 +1288,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
- mac, vf);
+ mac, vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -1295,7 +1296,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
}
static int be_get_vf_config(struct net_device *netdev, int vf,
- struct ifla_vf_info *vi)
+ struct ifla_vf_info *vi)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1307,7 +1308,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return -EINVAL;
vi->vf = vf;
- vi->tx_rate = vf_cfg->tx_rate;
+ vi->max_tx_rate = vf_cfg->tx_rate;
+ vi->min_tx_rate = 0;
vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
@@ -1316,8 +1318,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
-static int be_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,11 +1349,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
return status;
}
-static int be_set_vf_tx_rate(struct net_device *netdev,
- int vf, int rate)
+static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
+ struct device *dev = &adapter->pdev->dev;
+ int percent_rate, status = 0;
+ u16 link_speed = 0;
+ u8 link_status;
if (!sriov_enabled(adapter))
return -EPERM;
@@ -1360,18 +1364,50 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
if (vf >= adapter->num_vfs)
return -EINVAL;
- if (rate < 100 || rate > 10000) {
- dev_err(&adapter->pdev->dev,
- "tx rate must be between 100 and 10000 Mbps\n");
+ if (min_tx_rate)
return -EINVAL;
+
+ if (!max_tx_rate)
+ goto config_qos;
+
+ status = be_cmd_link_status_query(adapter, &link_speed,
+ &link_status, 0);
+ if (status)
+ goto err;
+
+ if (!link_status) {
+ dev_err(dev, "TX-rate setting not allowed when link is down\n");
+ status = -EPERM;
+ goto err;
+ }
+
+ if (max_tx_rate < 100 || max_tx_rate > link_speed) {
+ dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
+ link_speed);
+ status = -EINVAL;
+ goto err;
+ }
+
+ /* On Skyhawk the QOS setting must be done only as a % value */
+ percent_rate = link_speed / 100;
+ if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
+ dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
+ percent_rate);
+ status = -EINVAL;
+ goto err;
}
- status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
+config_qos:
+ status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
if (status)
- dev_err(&adapter->pdev->dev,
- "tx rate %d on VF %d failed\n", rate, vf);
- else
- adapter->vf_cfg[vf].tx_rate = rate;
+ goto err;
+
+ adapter->vf_cfg[vf].tx_rate = max_tx_rate;
+ return 0;
+
+err:
+ dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
+ max_tx_rate, vf);
return status;
}
static int be_set_vf_link_state(struct net_device *netdev, int vf,
@@ -1469,7 +1505,7 @@ modify_eqd:
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+ struct be_rx_compl_info *rxcp)
{
struct be_rx_stats *stats = rx_stats(rxo);
@@ -1566,7 +1602,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
page_info->page_offset + hdr_len;
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0],
+ curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
skb->truesize += rx_frag_size;
skb->tail += hdr_len;
@@ -1725,8 +1762,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
rxcp->tunneled =
@@ -1757,8 +1794,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1836,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->vlan_tag = swab16(rxcp->vlan_tag);
if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
- !adapter->vlan_tag[rxcp->vlan_tag])
+ !test_bit(rxcp->vlan_tag, adapter->vids))
rxcp->vlanf = 0;
}
@@ -1915,7 +1952,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
}
static u16 be_tx_compl_process(struct be_adapter *adapter,
- struct be_tx_obj *txo, u16 last_index)
+ struct be_tx_obj *txo, u16 last_index)
{
struct be_queue_info *txq = &txo->q;
struct be_eth_wrb *wrb;
@@ -2122,7 +2159,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry));
+ sizeof(struct be_eq_entry));
if (rc)
return rc;
@@ -2155,7 +2192,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
cq = &adapter->mcc_obj.cq;
if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
- sizeof(struct be_mcc_compl)))
+ sizeof(struct be_mcc_compl)))
goto err;
/* Use the default EQ for MCC completions */
@@ -2275,7 +2312,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
rxo->adapter = adapter;
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
- sizeof(struct be_eth_rx_compl));
+ sizeof(struct be_eth_rx_compl));
if (rc)
return rc;
@@ -2339,7 +2376,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget, int polling)
+ int budget, int polling)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2402,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
* promiscuous mode on some skews
*/
if (unlikely(rxcp->port != adapter->port_num &&
- !lancer_chip(adapter))) {
+ !lancer_chip(adapter))) {
be_rx_compl_discard(rxo, rxcp);
goto loop_continue;
}
@@ -2405,8 +2442,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
if (!txcp)
break;
num_wrbs += be_tx_compl_process(adapter, txo,
- AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp));
+ AMAP_GET_BITS(struct
+ amap_eth_tx_compl,
+ wrb_index, txcp));
}
if (work_done) {
@@ -2416,7 +2454,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
/* As Tx wrbs have been freed up, wake up netdev queue
* if it was stopped due to lack of tx wrbs. */
if (__netif_subqueue_stopped(adapter->netdev, idx) &&
- atomic_read(&txo->q.used) < txo->q.len / 2) {
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
netif_wake_subqueue(adapter->netdev, idx);
}
@@ -2510,9 +2548,9 @@ void be_detect_error(struct be_adapter *adapter)
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
+ SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ SLIPORT_ERROR2_OFFSET);
adapter->hw_error = true;
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2569,13 @@ void be_detect_error(struct be_adapter *adapter)
}
} else {
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_lo);
+ PCICFG_UE_STATUS_LOW, &ue_lo);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_hi);
+ PCICFG_UE_STATUS_HIGH, &ue_hi);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
+ PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+ PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2662,7 @@ fail:
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
- struct be_eq_obj *eqo)
+ struct be_eq_obj *eqo)
{
return adapter->msix_entries[eqo->msix_idx].vector;
}
@@ -2648,7 +2686,7 @@ err_msix:
for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
free_irq(be_msix_vec_get(adapter, eqo), eqo);
dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
- status);
+ status);
be_msix_disable(adapter);
return status;
}
@@ -2774,7 +2812,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
int rc, i, j;
- u8 rsstable[128];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+ struct rss_info *rss = &adapter->rss_info;
for_all_rx_queues(adapter, rxo, i) {
rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2799,31 +2838,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for (j = 0; j < RSS_INDIR_TABLE_LEN;
+ j += adapter->num_rx_qs - 1) {
for_all_rss_queues(adapter, rxo, i) {
- if ((j + i) >= 128)
+ if ((j + i) >= RSS_INDIR_TABLE_LEN)
break;
- rsstable[j + i] = rxo->rss_id;
+ rss->rsstable[j + i] = rxo->rss_id;
+ rss->rss_queue[j + i] = i;
}
}
- adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
- RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+ rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
if (!BEx_chip(adapter))
- adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
- RSS_ENABLE_UDP_IPV6;
+ rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6;
} else {
/* Disable RSS, if only default RX Q is created */
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
}
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 128);
+ get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
+ rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ 128, rss_hkey);
if (rc) {
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
return rc;
}
+ memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
+
/* First time posting */
for_all_rx_queues(adapter, rxo, i)
be_post_rx_frags(rxo, GFP_KERNEL);
@@ -2896,7 +2940,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
if (enable) {
status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+ PCICFG_PM_CONTROL_OFFSET,
+ PCICFG_PM_CONTROL_MASK);
if (status) {
dev_err(&adapter->pdev->dev,
"Could not enable Wake-on-lan\n");
@@ -2905,7 +2950,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
return status;
}
status = be_cmd_enable_magic_wol(adapter,
- adapter->netdev->dev_addr, &cmd);
+ adapter->netdev->dev_addr,
+ &cmd);
pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
} else {
@@ -2944,7 +2990,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
if (status)
dev_err(&adapter->pdev->dev,
- "Mac address assignment failed for VF %d\n", vf);
+ "Mac address assignment failed for VF %d\n",
+ vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -3086,9 +3133,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
/* If a FW profile exists, then cap_flags are updated */
en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- &vf_cfg->if_handle, vf + 1);
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST);
+ status =
+ be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
goto err;
}
@@ -3119,7 +3168,6 @@ static int be_vf_setup(struct be_adapter *adapter)
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
u32 privileges;
- u16 lnk_speed;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -3175,16 +3223,9 @@ static int be_vf_setup(struct be_adapter *adapter)
vf);
}
- /* BE3 FW, by default, caps VF TX-rate to 100mbps.
- * Allow full available bandwidth
- */
- if (BE3_chip(adapter) && !old_vfs)
- be_cmd_config_qos(adapter, 1000, vf + 1);
-
- status = be_cmd_link_status_query(adapter, &lnk_speed,
- NULL, vf + 1);
- if (!status)
- vf_cfg->tx_rate = lnk_speed;
+ /* Allow full available bandwidth */
+ if (!old_vfs)
+ be_cmd_config_qos(adapter, 0, 0, vf + 1);
if (!old_vfs) {
be_cmd_enable_vf(adapter, vf + 1);
@@ -3213,9 +3254,9 @@ err:
static u8 be_convert_mc_type(u32 function_mode)
{
- if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
+ if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
return vNIC1;
- else if (function_mode & FLEX10_MODE)
+ else if (function_mode & QNQ_MODE)
return FLEX10;
else if (function_mode & VNIC_MODE)
return vNIC2;
@@ -3590,35 +3631,7 @@ static void be_netpoll(struct net_device *netdev)
}
#endif
-#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
-
-static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p, u32 img_start, int image_size,
- int hdr_size)
-{
- u32 crc_offset;
- u8 flashed_crc[4];
- int status;
-
- crc_offset = hdr_size + img_start + image_size - 4;
-
- p += crc_offset;
-
- status = be_cmd_get_flash_crc(adapter, flashed_crc,
- (image_size - 4));
- if (status) {
- dev_err(&adapter->pdev->dev,
- "could not get crc from flash, not flashing redboot\n");
- return false;
- }
-
- /*update redboot only if crc does not match*/
- if (!memcmp(flashed_crc, p, 4))
- return false;
- else
- return true;
-}
+static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool phy_flashing_required(struct be_adapter *adapter)
{
@@ -3649,8 +3662,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
}
static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
- int header_size,
- const struct firmware *fw)
+ int header_size,
+ const struct firmware *fw)
{
struct flash_section_info *fsec = NULL;
const u8 *p = fw->data;
@@ -3665,12 +3678,35 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
return NULL;
}
+static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
+ u32 img_offset, u32 img_size, int hdr_size,
+ u16 img_optype, bool *crc_match)
+{
+ u32 crc_offset;
+ int status;
+ u8 crc[4];
+
+ status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
+ if (status)
+ return status;
+
+ crc_offset = hdr_size + img_offset + img_size - 4;
+
+ /* Skip flashing, if crc of flashed region matches */
+ if (!memcmp(crc, p + crc_offset, 4))
+ *crc_match = true;
+ else
+ *crc_match = false;
+
+ return status;
+}
+
static int be_flash(struct be_adapter *adapter, const u8 *img,
- struct be_dma_mem *flash_cmd, int optype, int img_size)
+ struct be_dma_mem *flash_cmd, int optype, int img_size)
{
- u32 total_bytes = 0, flash_op, num_bytes = 0;
- int status = 0;
struct be_cmd_write_flashrom *req = flash_cmd->va;
+ u32 total_bytes, flash_op, num_bytes;
+ int status;
total_bytes = img_size;
while (total_bytes) {
@@ -3693,32 +3729,28 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
memcpy(req->data_buf, img, num_bytes);
img += num_bytes;
status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
- flash_op, num_bytes);
- if (status) {
- if (status == ILLEGAL_IOCTL_REQ &&
- optype == OPTYPE_PHY_FW)
- break;
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed.\n");
+ flash_op, num_bytes);
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
+ optype == OPTYPE_PHY_FW)
+ break;
+ else if (status)
return status;
- }
}
return 0;
}
/* For BE2, BE3 and BE3-R */
static int be_flash_BEx(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd,
- int num_of_images)
-
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
- int status = 0, i, filehdr_size = 0;
int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
- const u8 *p = fw->data;
- const struct flash_comp *pflashcomp;
- int num_comp, redboot;
+ struct device *dev = &adapter->pdev->dev;
struct flash_section_info *fsec = NULL;
+ int status, i, filehdr_size, num_comp;
+ const struct flash_comp *pflashcomp;
+ bool crc_match;
+ const u8 *p;
struct flash_comp gen3_flash_types[] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
@@ -3775,8 +3807,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
/* Get flash section info*/
fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
if (!fsec) {
- dev_err(&adapter->pdev->dev,
- "Invalid Cookie. UFI corrupted ?\n");
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
return -1;
}
for (i = 0; i < num_comp; i++) {
@@ -3792,23 +3823,32 @@ static int be_flash_BEx(struct be_adapter *adapter,
continue;
if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
- redboot = be_flash_redboot(adapter, fw->data,
- pflashcomp[i].offset, pflashcomp[i].size,
- filehdr_size + img_hdrs_size);
- if (!redboot)
+ status = be_check_flash_crc(adapter, fw->data,
+ pflashcomp[i].offset,
+ pflashcomp[i].size,
+ filehdr_size +
+ img_hdrs_size,
+ OPTYPE_REDBOOT, &crc_match);
+ if (status) {
+ dev_err(dev,
+ "Could not get CRC for 0x%x region\n",
+ pflashcomp[i].optype);
+ continue;
+ }
+
+ if (crc_match)
continue;
}
- p = fw->data;
- p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
+ p = fw->data + filehdr_size + pflashcomp[i].offset +
+ img_hdrs_size;
if (p + pflashcomp[i].size > fw->data + fw->size)
return -1;
status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
- pflashcomp[i].size);
+ pflashcomp[i].size);
if (status) {
- dev_err(&adapter->pdev->dev,
- "Flashing section type %d failed.\n",
+ dev_err(dev, "Flashing section type 0x%x failed\n",
pflashcomp[i].img_type);
return status;
}
@@ -3816,80 +3856,142 @@ static int be_flash_BEx(struct be_adapter *adapter,
return 0;
}
+static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
+{
+ u32 img_type = le32_to_cpu(fsec_entry.type);
+ u16 img_optype = le16_to_cpu(fsec_entry.optype);
+
+ if (img_optype != 0xFFFF)
+ return img_optype;
+
+ switch (img_type) {
+ case IMAGE_FIRMWARE_iSCSI:
+ img_optype = OPTYPE_ISCSI_ACTIVE;
+ break;
+ case IMAGE_BOOT_CODE:
+ img_optype = OPTYPE_REDBOOT;
+ break;
+ case IMAGE_OPTION_ROM_ISCSI:
+ img_optype = OPTYPE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_PXE:
+ img_optype = OPTYPE_PXE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_FCoE:
+ img_optype = OPTYPE_FCOE_BIOS;
+ break;
+ case IMAGE_FIRMWARE_BACKUP_iSCSI:
+ img_optype = OPTYPE_ISCSI_BACKUP;
+ break;
+ case IMAGE_NCSI:
+ img_optype = OPTYPE_NCSI_FW;
+ break;
+ case IMAGE_FLASHISM_JUMPVECTOR:
+ img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
+ break;
+ case IMAGE_FIRMWARE_PHY:
+ img_optype = OPTYPE_SH_PHY_FW;
+ break;
+ case IMAGE_REDBOOT_DIR:
+ img_optype = OPTYPE_REDBOOT_DIR;
+ break;
+ case IMAGE_REDBOOT_CONFIG:
+ img_optype = OPTYPE_REDBOOT_CONFIG;
+ break;
+ case IMAGE_UFI_DIR:
+ img_optype = OPTYPE_UFI_DIR;
+ break;
+ default:
+ break;
+ }
+
+ return img_optype;
+}
+
static int be_flash_skyhawk(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd, int num_of_images)
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
- int status = 0, i, filehdr_size = 0;
- int img_offset, img_size, img_optype, redboot;
int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
- const u8 *p = fw->data;
+ struct device *dev = &adapter->pdev->dev;
struct flash_section_info *fsec = NULL;
+ u32 img_offset, img_size, img_type;
+ int status, i, filehdr_size;
+ bool crc_match, old_fw_img;
+ u16 img_optype;
+ const u8 *p;
filehdr_size = sizeof(struct flash_file_hdr_g3);
fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
if (!fsec) {
- dev_err(&adapter->pdev->dev,
- "Invalid Cookie. UFI corrupted ?\n");
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
return -1;
}
for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
+ img_type = le32_to_cpu(fsec->fsec_entry[i].type);
+ img_optype = be_get_img_optype(fsec->fsec_entry[i]);
+ old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
- switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
- case IMAGE_FIRMWARE_iSCSI:
- img_optype = OPTYPE_ISCSI_ACTIVE;
- break;
- case IMAGE_BOOT_CODE:
- img_optype = OPTYPE_REDBOOT;
- break;
- case IMAGE_OPTION_ROM_ISCSI:
- img_optype = OPTYPE_BIOS;
- break;
- case IMAGE_OPTION_ROM_PXE:
- img_optype = OPTYPE_PXE_BIOS;
- break;
- case IMAGE_OPTION_ROM_FCoE:
- img_optype = OPTYPE_FCOE_BIOS;
- break;
- case IMAGE_FIRMWARE_BACKUP_iSCSI:
- img_optype = OPTYPE_ISCSI_BACKUP;
- break;
- case IMAGE_NCSI:
- img_optype = OPTYPE_NCSI_FW;
- break;
- default:
+ if (img_optype == 0xFFFF)
continue;
+ /* Don't bother verifying CRC if an old FW image is being
+ * flashed
+ */
+ if (old_fw_img)
+ goto flash;
+
+ status = be_check_flash_crc(adapter, fw->data, img_offset,
+ img_size, filehdr_size +
+ img_hdrs_size, img_optype,
+ &crc_match);
+ /* The current FW image on the card does not recognize the new
+ * FLASH op_type. The FW download is partially complete.
+ * Reboot the server now to enable FW image to recognize the
+ * new FLASH op_type. To complete the remaining process,
+ * download the same FW again after the reboot.
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+ base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
+ dev_err(dev, "Flash incomplete. Reset the server\n");
+ dev_err(dev, "Download FW image again after reset\n");
+ return -EAGAIN;
+ } else if (status) {
+ dev_err(dev, "Could not get CRC for 0x%x region\n",
+ img_optype);
+ return -EFAULT;
}
- if (img_optype == OPTYPE_REDBOOT) {
- redboot = be_flash_redboot(adapter, fw->data,
- img_offset, img_size,
- filehdr_size + img_hdrs_size);
- if (!redboot)
- continue;
- }
+ if (crc_match)
+ continue;
- p = fw->data;
- p += filehdr_size + img_offset + img_hdrs_size;
+flash:
+ p = fw->data + filehdr_size + img_offset + img_hdrs_size;
if (p + img_size > fw->data + fw->size)
return -1;
status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Flashing section type %d failed.\n",
- fsec->fsec_entry[i].type);
- return status;
+ /* For old FW images ignore ILLEGAL_FIELD error or errors on
+ * UFI_DIR region
+ */
+ if (old_fw_img &&
+ (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
+ (img_optype == OPTYPE_UFI_DIR &&
+ base_status(status) == MCC_STATUS_FAILED))) {
+ continue;
+ } else if (status) {
+ dev_err(dev, "Flashing section type 0x%x failed\n",
+ img_type);
+ return -EFAULT;
}
}
return 0;
}
static int lancer_fw_download(struct be_adapter *adapter,
- const struct firmware *fw)
+ const struct firmware *fw)
{
#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
@@ -3955,7 +4057,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
}
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
+ flash_cmd.dma);
if (status) {
dev_err(&adapter->pdev->dev,
"Firmware load error. "
@@ -3976,9 +4078,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
goto lancer_fw_exit;
}
} else if (change_status != LANCER_NO_RESET_NEEDED) {
- dev_err(&adapter->pdev->dev,
- "System reboot required for new FW"
- " to be active\n");
+ dev_err(&adapter->pdev->dev,
+ "System reboot required for new FW to be active\n");
}
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4042,7 +4143,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
switch (ufi_type) {
case UFI_TYPE4:
status = be_flash_skyhawk(adapter, fw,
- &flash_cmd, num_imgs);
+ &flash_cmd, num_imgs);
break;
case UFI_TYPE3R:
status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4112,8 +4213,7 @@ fw_exit:
return status;
}
-static int be_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
{
struct be_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -4155,8 +4255,7 @@ err:
}
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev,
- u32 filter_mask)
+ struct net_device *dev, u32 filter_mask)
{
struct be_adapter *adapter = netdev_priv(dev);
int status = 0;
@@ -4254,7 +4353,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
.ndo_set_vf_mac = be_set_vf_mac,
.ndo_set_vf_vlan = be_set_vf_vlan,
- .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
+ .ndo_set_vf_rate = be_set_vf_tx_rate,
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4301,7 +4400,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->netdev_ops = &be_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+ netdev->ethtool_ops = &be_ethtool_ops;
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -4870,7 +4969,7 @@ static void be_shutdown(struct pci_dev *pdev)
}
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+ pci_channel_state_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8b70ca7e342b..f3658bdb64cc 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -769,11 +769,6 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(phy, ifr, cmd);
}
-static int ethoc_config(struct net_device *dev, struct ifmap *map)
-{
- return -ENOSYS;
-}
-
static void ethoc_do_set_mac_address(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
@@ -995,7 +990,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
.ndo_do_ioctl = ethoc_ioctl,
- .ndo_set_config = ethoc_config,
.ndo_set_mac_address = ethoc_set_mac_address,
.ndo_set_rx_mode = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 68069eabc4f8..c77fa4a69844 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1210,7 +1210,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
- SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops);
+ netdev->ethtool_ops = &ftgmac100_ethtool_ops;
netdev->netdev_ops = &ftgmac100_netdev_ops;
netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8be5b40c0a12..4ff1adc6bfca 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1085,7 +1085,7 @@ static int ftmac100_probe(struct platform_device *pdev)
}
SET_NETDEV_DEV(netdev, &pdev->dev);
- SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
+ netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
platform_set_drvdata(pdev, netdev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6048dc8604ee..270308315d43 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -67,6 +67,7 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
depends on FSL_SOC
select PHYLIB
+ select OF_MDIO
---help---
This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 3b8d6d19ff05..671d080105a7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -221,7 +221,7 @@ struct bufdesc_ex {
#define BD_ENET_TX_RCMASK ((ushort)0x003c)
#define BD_ENET_TX_UN ((ushort)0x0002)
#define BD_ENET_TX_CSL ((ushort)0x0001)
-#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
+#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
/*enhanced buffer descriptor control/status used by Ethernet transmit*/
#define BD_ENET_TX_INT 0x40000000
@@ -246,8 +246,8 @@ struct bufdesc_ex {
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
-#define TX_RING_SIZE 16 /* Must be power of two */
-#define TX_RING_MOD_MASK 15 /* for this to work */
+#define TX_RING_SIZE 512 /* Must be power of two */
+#define TX_RING_MOD_MASK 511 /* for this to work */
#define BD_ENET_RX_INT 0x00800000
#define BD_ENET_RX_PTP ((ushort)0x0400)
@@ -296,8 +296,15 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
+ unsigned short bufdesc_size;
unsigned short tx_ring_size;
unsigned short rx_ring_size;
+ unsigned short tx_stop_threshold;
+ unsigned short tx_wake_threshold;
+
+ /* Software TSO */
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8d69e439f0c5..77037fd377b8 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -36,6 +36,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
+#include <net/tso.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
@@ -54,6 +55,7 @@
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
#include <linux/if_vlan.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/cacheflush.h>
@@ -172,10 +174,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
-#error "FEC: descriptor ring size constants too large"
-#endif
-
/* Interrupt events/masks. */
#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
@@ -231,6 +229,15 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_PAUSE_FLAG_AUTONEG 0x1
#define FEC_PAUSE_FLAG_ENABLE 0x2
+#define TSO_HEADER_SIZE 128
+/* Max number of allowed TCP segments for software TSO */
+#define FEC_MAX_TSO_SEGS 100
+#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_dma) && \
+ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
static int mii_cnt;
static inline
@@ -286,6 +293,22 @@ struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_priva
return (new_bd < base) ? (new_bd + ring_size) : new_bd;
}
+static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
+ struct fec_enet_private *fep)
+{
+ return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
+}
+
+static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
+{
+ int entries;
+
+ entries = ((const char *)fep->dirty_tx -
+ (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
+
+ return entries > 0 ? entries : entries + fep->tx_ring_size;
+}
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -297,6 +320,11 @@ static void *swap_buffer(void *bufaddr, int len)
return bufaddr;
}
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
static int
fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
{
@@ -307,33 +335,134 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(skb_cow_head(skb, 0)))
return -1;
+ if (is_ipv4_pkt(skb))
+ ip_hdr(skb)->check = 0;
*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
return 0;
}
-static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void
+fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
+{
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc *bdp_pre;
+
+ bdp_pre = fec_enet_get_prevdesc(bdp, fep);
+ if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
+ !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
+ fep->delay_work.trig_tx = true;
+ schedule_delayed_work(&(fep->delay_work.delay_work),
+ msecs_to_jiffies(1));
+ }
+}
+
+static int
+fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- struct bufdesc *bdp, *bdp_pre;
- void *bufaddr;
- unsigned short status;
+ struct bufdesc *bdp = fep->cur_tx;
+ struct bufdesc_ex *ebdp;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int frag, frag_len;
+ unsigned short status;
+ unsigned int estatus = 0;
+ skb_frag_t *this_frag;
unsigned int index;
+ void *bufaddr;
+ int i;
- /* Fill in a Tx ring entry */
+ for (frag = 0; frag < nr_frags; frag++) {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ ebdp = (struct bufdesc_ex *)bdp;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+ frag_len = skb_shinfo(skb)->frags[frag].size;
+
+ /* Handle the last BD specially */
+ if (frag == nr_frags - 1) {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus |= BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+
+ if (fep->bufdesc_ex) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], bufaddr, frag_len);
+ bufaddr = fep->tx_bounce[index];
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, frag_len);
+ }
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
+ frag_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ goto dma_mapping_error;
+ }
+
+ bdp->cbd_datlen = frag_len;
+ bdp->cbd_sc = status;
+ }
+
+ fep->cur_tx = bdp;
+
+ return 0;
+
+dma_mapping_error:
bdp = fep->cur_tx;
+ for (i = 0; i < frag; i++) {
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
+ }
+ return NETDEV_TX_OK;
+}
- status = bdp->cbd_sc;
+static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct bufdesc *bdp, *last_bdp;
+ void *bufaddr;
+ unsigned short status;
+ unsigned short buflen;
+ unsigned int estatus = 0;
+ unsigned int index;
+ int entries_free;
+ int ret;
- if (status & BD_ENET_TX_READY) {
- /* Ooops. All transmit buffers are full. Bail out.
- * This should not happen, since ndev->tbusy should be set.
- */
- netdev_err(ndev, "tx queue full!\n");
- return NETDEV_TX_BUSY;
+ entries_free = fec_enet_get_free_txdesc_num(fep);
+ if (entries_free < MAX_SKB_FRAGS + 1) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for SG!\n");
+ return NETDEV_TX_OK;
}
/* Protocol checksum off-load for TCP and UDP. */
@@ -342,102 +471,300 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
- /* Clear all of the status flags */
+ /* Fill in a Tx ring entry */
+ bdp = fep->cur_tx;
+ status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */
bufaddr = skb->data;
- bdp->cbd_datlen = skb->len;
+ buflen = skb_headlen(skb);
- /*
- * On some FEC implementations data must be aligned on
- * 4-byte boundaries. Use bounce buffers to copy data
- * and get it aligned. Ugh.
- */
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->tx_bd_base;
- else
- index = bdp - fep->tx_bd_base;
-
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
- memcpy(fep->tx_bounce[index], skb->data, skb->len);
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], skb->data, buflen);
bufaddr = fep->tx_bounce[index];
- }
- /*
- * Some design made an incorrect assumption on endian mode of
- * the system that it's running on. As the result, driver has to
- * swap every frame going to and coming from the controller.
- */
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
- swap_buffer(bufaddr, skb->len);
-
- /* Save skb pointer */
- fep->tx_skbuff[index] = skb;
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, buflen);
+ }
/* Push the data cache so the CPM does not get stale memory
* data.
*/
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
- skb->len, DMA_TO_DEVICE);
+ buflen, DMA_TO_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
- bdp->cbd_bufaddr = 0;
- fep->tx_skbuff[index] = NULL;
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_OK;
}
+ if (nr_frags) {
+ ret = fec_enet_txq_submit_frag_skb(skb, ndev);
+ if (ret)
+ return ret;
+ } else {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus = BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_bdu = 0;
+
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- fep->hwts_tx_en)) {
- ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+ fep->hwts_tx_en))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
- ebdp->cbd_esc = BD_ENET_TX_INT;
- /* Enable protocol checksum flags
- * We do not bother with the IP Checksum bits as they
- * are done by the kernel
- */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- ebdp->cbd_esc |= BD_ENET_TX_PINS;
- }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
}
+ last_bdp = fep->cur_tx;
+ index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
+ /* Save skb pointer */
+ fep->tx_skbuff[index] = skb;
+
+ bdp->cbd_datlen = buflen;
+
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
- | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
bdp->cbd_sc = status;
- bdp_pre = fec_enet_get_prevdesc(bdp, fep);
- if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
- !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
- fep->delay_work.trig_tx = true;
- schedule_delayed_work(&(fep->delay_work.delay_work),
- msecs_to_jiffies(1));
- }
+ fec_enet_submit_work(bdp, fep);
/* If this was the last BD in the ring, start at the beginning again. */
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(last_bdp, fep);
skb_tx_timestamp(skb);
fep->cur_tx = bdp;
- if (fep->cur_tx == fep->dirty_tx)
- netif_stop_queue(ndev);
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index, char *data,
+ int size, bool last_tcp, bool is_last)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ unsigned short status;
+ unsigned int estatus = 0;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+ bdp->cbd_datlen = size;
+
+ if (((unsigned long) data) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], data, size);
+ data = fep->tx_bounce[index];
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, size);
+ }
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (fep->bufdesc_ex) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ /* Handle the last BD specially */
+ if (last_tcp)
+ status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ if (is_last) {
+ status |= BD_ENET_TX_INTR;
+ if (fep->bufdesc_ex)
+ ebdp->cbd_esc |= BD_ENET_TX_INT;
+ }
+
+ bdp->cbd_sc = status;
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ void *bufaddr;
+ unsigned long dmabuf;
+ unsigned short status;
+ unsigned int estatus = 0;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+
+ bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
+ dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], skb->data, hdr_len);
+ bufaddr = fep->tx_bounce[index];
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, hdr_len);
+
+ dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
+ hdr_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ bdp->cbd_bufaddr = dmabuf;
+ bdp->cbd_datlen = hdr_len;
+
+ if (fep->bufdesc_ex) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ bdp->cbd_sc = status;
+
+ return 0;
+}
+
+static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int total_len, data_left;
+ struct bufdesc *bdp = fep->cur_tx;
+ struct tso_t tso;
+ unsigned int index = 0;
+ int ret;
+
+ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for TSO!\n");
+ return NETDEV_TX_OK;
+ }
+
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
+ if (ret)
+ goto err_release;
+
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
+ size, size == data_left,
+ total_len == 0);
+ if (ret)
+ goto err_release;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ }
+
+ /* Save skb pointer */
+ fep->tx_skbuff[index] = skb;
+
+ fec_enet_submit_work(bdp, fep);
+
+ skb_tx_timestamp(skb);
+ fep->cur_tx = bdp;
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ return 0;
+
+err_release:
+ /* TODO: Release all used data descriptors for TSO */
+ return ret;
+}
+
+static netdev_tx_t
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int entries_free;
+ int ret;
+
+ if (skb_is_gso(skb))
+ ret = fec_enet_txq_submit_tso(skb, ndev);
+ else
+ ret = fec_enet_txq_submit_skb(skb, ndev);
+ if (ret)
+ return ret;
+
+ entries_free = fec_enet_get_free_txdesc_num(fep);
+ if (entries_free <= fep->tx_stop_threshold)
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
}
@@ -756,6 +1083,7 @@ fec_enet_tx(struct net_device *ndev)
unsigned short status;
struct sk_buff *skb;
int index = 0;
+ int entries_free;
fep = netdev_priv(ndev);
bdp = fep->dirty_tx;
@@ -769,16 +1097,17 @@ fec_enet_tx(struct net_device *ndev)
if (bdp == fep->cur_tx)
break;
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->tx_bd_base;
- else
- index = bdp - fep->tx_bd_base;
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
skb = fep->tx_skbuff[index];
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
- DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
+ if (!skb) {
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ continue;
+ }
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -797,7 +1126,7 @@ fec_enet_tx(struct net_device *ndev)
ndev->stats.tx_carrier_errors++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += bdp->cbd_datlen;
+ ndev->stats.tx_bytes += skb->len;
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
@@ -834,15 +1163,15 @@ fec_enet_tx(struct net_device *ndev)
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (fep->dirty_tx != fep->cur_tx) {
- if (netif_queue_stopped(ndev))
+ if (netif_queue_stopped(ndev)) {
+ entries_free = fec_enet_get_free_txdesc_num(fep);
+ if (entries_free >= fep->tx_wake_threshold)
netif_wake_queue(ndev);
}
}
return;
}
-
/* During a receive, the cur_rx points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
@@ -920,11 +1249,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
pkt_len = bdp->cbd_datlen;
ndev->stats.rx_bytes += pkt_len;
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->rx_bd_base;
- else
- index = bdp - fep->rx_bd_base;
+ index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
data = fep->rx_skbuff[index]->data;
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
@@ -1255,6 +1580,49 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
+static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+
+ if (enable) {
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+ if (fep->clk_ptp) {
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret)
+ goto failed_clk_ptp;
+ }
+ } else {
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
+ }
+
+ return 0;
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+
+ return ret;
+}
+
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1364,7 +1732,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fep->phy_speed--;
fep->phy_speed <<= 1;
@@ -1773,6 +2141,11 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+ return ret;
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1811,6 +2184,8 @@ fec_enet_close(struct net_device *ndev)
phy_disconnect(fep->phy_dev);
}
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
fec_enet_free_buffers(ndev);
return 0;
@@ -1988,13 +2363,35 @@ static int fec_enet_init(struct net_device *ndev)
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *cbd_base;
+ int bd_size;
+
+ /* init the tx & rx ring size */
+ fep->tx_ring_size = TX_RING_SIZE;
+ fep->rx_ring_size = RX_RING_SIZE;
+
+ fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+ fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
+
+ if (fep->bufdesc_ex)
+ fep->bufdesc_size = sizeof(struct bufdesc_ex);
+ else
+ fep->bufdesc_size = sizeof(struct bufdesc);
+ bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
+ fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */
- cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
+ cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
GFP_KERNEL);
if (!cbd_base)
return -ENOMEM;
+ fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
+ &fep->tso_hdrs_dma, GFP_KERNEL);
+ if (!fep->tso_hdrs) {
+ dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
+ return -ENOMEM;
+ }
+
memset(cbd_base, 0, PAGE_SIZE);
fep->netdev = ndev;
@@ -2004,10 +2401,6 @@ static int fec_enet_init(struct net_device *ndev)
/* make sure MAC we just acquired is programmed into the hw */
fec_set_mac_address(ndev, NULL);
- /* init the tx & rx ring size */
- fep->tx_ring_size = TX_RING_SIZE;
- fep->rx_ring_size = RX_RING_SIZE;
-
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
if (fep->bufdesc_ex)
@@ -2024,21 +2417,21 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
- if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
+ if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
+ ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM);
- ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM);
+ | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
+ ndev->hw_features = ndev->features;
+
fec_restart(ndev, 0);
return 0;
@@ -2114,6 +2507,9 @@ fec_probe(struct platform_device *pdev)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fep->hwp = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(fep->hwp)) {
@@ -2164,26 +2560,10 @@ fec_probe(struct platform_device *pdev)
fep->bufdesc_ex = 0;
}
- ret = clk_prepare_enable(fep->clk_ahb);
+ ret = fec_enet_clk_enable(ndev, true);
if (ret)
goto failed_clk;
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
-
- if (fep->clk_enet_out) {
- ret = clk_prepare_enable(fep->clk_enet_out);
- if (ret)
- goto failed_clk_enet_out;
- }
-
- if (fep->clk_ptp) {
- ret = clk_prepare_enable(fep->clk_ptp);
- if (ret)
- goto failed_clk_ptp;
- }
-
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
ret = regulator_enable(fep->reg_phy);
@@ -2225,6 +2605,8 @@ fec_probe(struct platform_device *pdev)
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(ndev);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&pdev->dev);
ret = register_netdev(ndev);
if (ret)
@@ -2244,15 +2626,7 @@ failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
- if (fep->clk_ptp)
- clk_disable_unprepare(fep->clk_ptp);
-failed_clk_ptp:
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
- clk_disable_unprepare(fep->clk_ahb);
+ fec_enet_clk_enable(ndev, false);
failed_clk:
failed_ioremap:
free_netdev(ndev);
@@ -2272,14 +2646,9 @@ fec_drv_remove(struct platform_device *pdev)
del_timer_sync(&fep->time_keep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- if (fep->clk_ptp)
- clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ipg);
- clk_disable_unprepare(fep->clk_ahb);
+ fec_enet_clk_enable(ndev, false);
free_netdev(ndev);
return 0;
@@ -2296,12 +2665,8 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- if (fep->clk_ptp)
- clk_disable_unprepare(fep->clk_ptp);
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ipg);
- clk_disable_unprepare(fep->clk_ahb);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
@@ -2322,25 +2687,10 @@ fec_resume(struct device *dev)
return ret;
}
- ret = clk_prepare_enable(fep->clk_ahb);
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
if (ret)
- goto failed_clk_ahb;
-
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
-
- if (fep->clk_enet_out) {
- ret = clk_prepare_enable(fep->clk_enet_out);
- if (ret)
- goto failed_clk_enet_out;
- }
-
- if (fep->clk_ptp) {
- ret = clk_prepare_enable(fep->clk_ptp);
- if (ret)
- goto failed_clk_ptp;
- }
+ goto failed_clk;
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
@@ -2349,14 +2699,7 @@ fec_resume(struct device *dev)
return 0;
-failed_clk_ptp:
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
- clk_disable_unprepare(fep->clk_ahb);
-failed_clk_ahb:
+failed_clk:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
return ret;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index dc80db41d6b3..cfaf17b70f3f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -792,10 +792,6 @@ static int fs_init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
iface);
if (!phydev) {
- phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
- iface);
- }
- if (!phydev) {
dev_err(&dev->dev, "Could not attach to PHY\n");
return -ENODEV;
}
@@ -1029,9 +1025,16 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->use_napi = 1;
fpi->napi_weight = 17;
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
- if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
- NULL)))
- goto out_free_fpi;
+ if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
+ err = of_phy_register_fixed_link(ofdev->dev.of_node);
+ if (err)
+ goto out_free_fpi;
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ fpi->phy_node = ofdev->dev.of_node;
+ }
if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
phy_connection_type = of_get_property(ofdev->dev.of_node,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ee6ddbd4f252..a6cf40e62f3a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -889,6 +889,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ goto err_grp_init;
+
+ priv->phy_node = np;
+ }
+
/* Find the TBI PHY. If it's not there, we don't support SGMII */
priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
@@ -1231,7 +1242,7 @@ static void gfar_hw_init(struct gfar_private *priv)
gfar_write_isrg(priv);
}
-static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
+static void gfar_init_addr_hash_table(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
@@ -1373,6 +1384,9 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_hw_init(priv);
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
err = register_netdev(dev);
if (err) {
@@ -1380,9 +1394,6 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
-
device_init_wakeup(&dev->dev,
priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1660,9 +1671,6 @@ static int init_phy(struct net_device *dev)
priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
interface);
- if (!priv->phydev)
- priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
- interface);
if (!priv->phydev) {
dev_err(&dev->dev, "could not attach to PHY\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c8299c31b21f..fab39e295441 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1728,9 +1728,6 @@ static int init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
priv->phy_interface);
- if (!phydev)
- phydev = of_phy_connect_fixed_link(dev, &adjust_link,
- priv->phy_interface);
if (!phydev) {
dev_err(&dev->dev, "Could not attach to PHY\n");
return -ENODEV;
@@ -3790,6 +3787,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!ug_info->phy_node) {
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ return err;
+ }
+ ug_info->phy_node = np;
+ }
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 413329eff2ff..cc83350d56ba 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -417,5 +417,5 @@ static const struct ethtool_ops uec_ethtool_ops = {
void uec_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops);
+ netdev->ethtool_ops = &uec_ethtool_ops;
}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index d449fcb90199..0c9d55c862ae 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -162,7 +162,9 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* Return all Fs if nothing was there */
if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
- dev_err(&bus->dev, "MDIO read error\n");
+ dev_err(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
return 0xffff;
}
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 7becab1aa3e4..cfe7a7431730 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -256,7 +256,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
dev->netdev_ops = &fjn_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
return fmvj18x_config(link);
} /* fmvj18x_attach */
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
new file mode 100644
index 000000000000..e9421731b05e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -0,0 +1,27 @@
+#
+# HISILICON device configuration
+#
+
+config NET_VENDOR_HISILICON
+ bool "Hisilicon devices"
+ default y
+ depends on ARM
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Hisilicon devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_HISILICON
+
+config HIX5HD2_GMAC
+ tristate "Hisilicon HIX5HD2 Family Network Device Support"
+ select PHYLIB
+ help
+ This selects the hix5hd2 mac family network device.
+
+endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
new file mode 100644
index 000000000000..9175e84622d4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
new file mode 100644
index 000000000000..0ffdcd381fdd
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -0,0 +1,1066 @@
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/clk.h>
+#include <linux/circ_buf.h>
+
+#define STATION_ADDR_LOW 0x0000
+#define STATION_ADDR_HIGH 0x0004
+#define MAC_DUPLEX_HALF_CTRL 0x0008
+#define MAX_FRM_SIZE 0x003c
+#define PORT_MODE 0x0040
+#define PORT_EN 0x0044
+#define BITS_TX_EN BIT(2)
+#define BITS_RX_EN BIT(1)
+#define REC_FILT_CONTROL 0x0064
+#define BIT_CRC_ERR_PASS BIT(5)
+#define BIT_PAUSE_FRM_PASS BIT(4)
+#define BIT_VLAN_DROP_EN BIT(3)
+#define BIT_BC_DROP_EN BIT(2)
+#define BIT_MC_MATCH_EN BIT(1)
+#define BIT_UC_MATCH_EN BIT(0)
+#define PORT_MC_ADDR_LOW 0x0068
+#define PORT_MC_ADDR_HIGH 0x006C
+#define CF_CRC_STRIP 0x01b0
+#define MODE_CHANGE_EN 0x01b4
+#define BIT_MODE_CHANGE_EN BIT(0)
+#define COL_SLOT_TIME 0x01c0
+#define RECV_CONTROL 0x01e0
+#define BIT_STRIP_PAD_EN BIT(3)
+#define BIT_RUNT_PKT_EN BIT(4)
+#define CONTROL_WORD 0x0214
+#define MDIO_SINGLE_CMD 0x03c0
+#define MDIO_SINGLE_DATA 0x03c4
+#define MDIO_CTRL 0x03cc
+#define MDIO_RDATA_STATUS 0x03d0
+
+#define MDIO_START BIT(20)
+#define MDIO_R_VALID BIT(0)
+#define MDIO_READ (BIT(17) | MDIO_START)
+#define MDIO_WRITE (BIT(16) | MDIO_START)
+
+#define RX_FQ_START_ADDR 0x0500
+#define RX_FQ_DEPTH 0x0504
+#define RX_FQ_WR_ADDR 0x0508
+#define RX_FQ_RD_ADDR 0x050c
+#define RX_FQ_VLDDESC_CNT 0x0510
+#define RX_FQ_ALEMPTY_TH 0x0514
+#define RX_FQ_REG_EN 0x0518
+#define BITS_RX_FQ_START_ADDR_EN BIT(2)
+#define BITS_RX_FQ_DEPTH_EN BIT(1)
+#define BITS_RX_FQ_RD_ADDR_EN BIT(0)
+#define RX_FQ_ALFULL_TH 0x051c
+#define RX_BQ_START_ADDR 0x0520
+#define RX_BQ_DEPTH 0x0524
+#define RX_BQ_WR_ADDR 0x0528
+#define RX_BQ_RD_ADDR 0x052c
+#define RX_BQ_FREE_DESC_CNT 0x0530
+#define RX_BQ_ALEMPTY_TH 0x0534
+#define RX_BQ_REG_EN 0x0538
+#define BITS_RX_BQ_START_ADDR_EN BIT(2)
+#define BITS_RX_BQ_DEPTH_EN BIT(1)
+#define BITS_RX_BQ_WR_ADDR_EN BIT(0)
+#define RX_BQ_ALFULL_TH 0x053c
+#define TX_BQ_START_ADDR 0x0580
+#define TX_BQ_DEPTH 0x0584
+#define TX_BQ_WR_ADDR 0x0588
+#define TX_BQ_RD_ADDR 0x058c
+#define TX_BQ_VLDDESC_CNT 0x0590
+#define TX_BQ_ALEMPTY_TH 0x0594
+#define TX_BQ_REG_EN 0x0598
+#define BITS_TX_BQ_START_ADDR_EN BIT(2)
+#define BITS_TX_BQ_DEPTH_EN BIT(1)
+#define BITS_TX_BQ_RD_ADDR_EN BIT(0)
+#define TX_BQ_ALFULL_TH 0x059c
+#define TX_RQ_START_ADDR 0x05a0
+#define TX_RQ_DEPTH 0x05a4
+#define TX_RQ_WR_ADDR 0x05a8
+#define TX_RQ_RD_ADDR 0x05ac
+#define TX_RQ_FREE_DESC_CNT 0x05b0
+#define TX_RQ_ALEMPTY_TH 0x05b4
+#define TX_RQ_REG_EN 0x05b8
+#define BITS_TX_RQ_START_ADDR_EN BIT(2)
+#define BITS_TX_RQ_DEPTH_EN BIT(1)
+#define BITS_TX_RQ_WR_ADDR_EN BIT(0)
+#define TX_RQ_ALFULL_TH 0x05bc
+#define RAW_PMU_INT 0x05c0
+#define ENA_PMU_INT 0x05c4
+#define STATUS_PMU_INT 0x05c8
+#define MAC_FIFO_ERR_IN BIT(30)
+#define TX_RQ_IN_TIMEOUT_INT BIT(29)
+#define RX_BQ_IN_TIMEOUT_INT BIT(28)
+#define TXOUTCFF_FULL_INT BIT(27)
+#define TXOUTCFF_EMPTY_INT BIT(26)
+#define TXCFF_FULL_INT BIT(25)
+#define TXCFF_EMPTY_INT BIT(24)
+#define RXOUTCFF_FULL_INT BIT(23)
+#define RXOUTCFF_EMPTY_INT BIT(22)
+#define RXCFF_FULL_INT BIT(21)
+#define RXCFF_EMPTY_INT BIT(20)
+#define TX_RQ_IN_INT BIT(19)
+#define TX_BQ_OUT_INT BIT(18)
+#define RX_BQ_IN_INT BIT(17)
+#define RX_FQ_OUT_INT BIT(16)
+#define TX_RQ_EMPTY_INT BIT(15)
+#define TX_RQ_FULL_INT BIT(14)
+#define TX_RQ_ALEMPTY_INT BIT(13)
+#define TX_RQ_ALFULL_INT BIT(12)
+#define TX_BQ_EMPTY_INT BIT(11)
+#define TX_BQ_FULL_INT BIT(10)
+#define TX_BQ_ALEMPTY_INT BIT(9)
+#define TX_BQ_ALFULL_INT BIT(8)
+#define RX_BQ_EMPTY_INT BIT(7)
+#define RX_BQ_FULL_INT BIT(6)
+#define RX_BQ_ALEMPTY_INT BIT(5)
+#define RX_BQ_ALFULL_INT BIT(4)
+#define RX_FQ_EMPTY_INT BIT(3)
+#define RX_FQ_FULL_INT BIT(2)
+#define RX_FQ_ALEMPTY_INT BIT(1)
+#define RX_FQ_ALFULL_INT BIT(0)
+
+#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
+ TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
+
+#define DESC_WR_RD_ENA 0x05cc
+#define IN_QUEUE_TH 0x05d8
+#define OUT_QUEUE_TH 0x05dc
+#define QUEUE_TX_BQ_SHIFT 16
+#define RX_BQ_IN_TIMEOUT_TH 0x05e0
+#define TX_RQ_IN_TIMEOUT_TH 0x05e4
+#define STOP_CMD 0x05e8
+#define BITS_TX_STOP BIT(1)
+#define BITS_RX_STOP BIT(0)
+#define FLUSH_CMD 0x05eC
+#define BITS_TX_FLUSH_CMD BIT(5)
+#define BITS_RX_FLUSH_CMD BIT(4)
+#define BITS_TX_FLUSH_FLAG_DOWN BIT(3)
+#define BITS_TX_FLUSH_FLAG_UP BIT(2)
+#define BITS_RX_FLUSH_FLAG_DOWN BIT(1)
+#define BITS_RX_FLUSH_FLAG_UP BIT(0)
+#define RX_CFF_NUM_REG 0x05f0
+#define PMU_FSM_REG 0x05f8
+#define RX_FIFO_PKT_IN_NUM 0x05fc
+#define RX_FIFO_PKT_OUT_NUM 0x0600
+
+#define RGMII_SPEED_1000 0x2c
+#define RGMII_SPEED_100 0x2f
+#define RGMII_SPEED_10 0x2d
+#define MII_SPEED_100 0x0f
+#define MII_SPEED_10 0x0d
+#define GMAC_SPEED_1000 0x05
+#define GMAC_SPEED_100 0x01
+#define GMAC_SPEED_10 0x00
+#define GMAC_FULL_DUPLEX BIT(4)
+
+#define RX_BQ_INT_THRESHOLD 0x01
+#define TX_RQ_INT_THRESHOLD 0x01
+#define RX_BQ_IN_TIMEOUT 0x10000
+#define TX_RQ_IN_TIMEOUT 0x50000
+
+#define MAC_MAX_FRAME_SIZE 1600
+#define DESC_SIZE 32
+#define RX_DESC_NUM 1024
+#define TX_DESC_NUM 1024
+
+#define DESC_VLD_FREE 0
+#define DESC_VLD_BUSY 0x80000000
+#define DESC_FL_MID 0
+#define DESC_FL_LAST 0x20000000
+#define DESC_FL_FIRST 0x40000000
+#define DESC_FL_FULL 0x60000000
+#define DESC_DATA_LEN_OFF 16
+#define DESC_BUFF_LEN_OFF 0
+#define DESC_DATA_MASK 0x7ff
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
+#define dma_cnt(n) ((n) >> 5)
+#define dma_byte(n) ((n) << 5)
+
+struct hix5hd2_desc {
+ __le32 buff_addr;
+ __le32 cmd;
+} __aligned(32);
+
+struct hix5hd2_desc_sw {
+ struct hix5hd2_desc *desc;
+ dma_addr_t phys_addr;
+ unsigned int count;
+ unsigned int size;
+};
+
+#define QUEUE_NUMS 4
+struct hix5hd2_priv {
+ struct hix5hd2_desc_sw pool[QUEUE_NUMS];
+#define rx_fq pool[0]
+#define rx_bq pool[1]
+#define tx_bq pool[2]
+#define tx_rq pool[3]
+
+ void __iomem *base;
+ void __iomem *ctrl_base;
+
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ struct sk_buff *rx_skb[RX_DESC_NUM];
+
+ struct device *dev;
+ struct net_device *netdev;
+
+ struct phy_device *phy;
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+
+ unsigned int speed;
+ unsigned int duplex;
+
+ struct clk *clk;
+ struct mii_bus *bus;
+ struct napi_struct napi;
+ struct work_struct tx_timeout_task;
+};
+
+static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ u32 val;
+
+ priv->speed = speed;
+ priv->duplex = duplex;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ if (speed == SPEED_1000)
+ val = RGMII_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = RGMII_SPEED_100;
+ else
+ val = RGMII_SPEED_10;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (speed == SPEED_100)
+ val = MII_SPEED_100;
+ else
+ val = MII_SPEED_10;
+ break;
+ default:
+ netdev_warn(dev, "not supported mode\n");
+ val = MII_SPEED_10;
+ break;
+ }
+
+ if (duplex)
+ val |= GMAC_FULL_DUPLEX;
+ writel_relaxed(val, priv->ctrl_base);
+
+ writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
+ if (speed == SPEED_1000)
+ val = GMAC_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = GMAC_SPEED_100;
+ else
+ val = GMAC_SPEED_10;
+ writel_relaxed(val, priv->base + PORT_MODE);
+ writel_relaxed(0, priv->base + MODE_CHANGE_EN);
+ writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL);
+}
+
+static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx)
+{
+ writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+
+ writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv)
+{
+ hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr);
+ hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr);
+ hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr);
+ hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr);
+}
+
+static void hix5hd2_hw_init(struct hix5hd2_priv *priv)
+{
+ u32 val;
+
+ /* disable and clear all interrupts */
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+ writel_relaxed(~0, priv->base + RAW_PMU_INT);
+
+ writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL);
+ writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD);
+ writel_relaxed(0, priv->base + COL_SLOT_TIME);
+
+ val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT;
+ writel_relaxed(val, priv->base + IN_QUEUE_TH);
+
+ writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH);
+ writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH);
+
+ hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);
+ hix5hd2_set_desc_addr(priv);
+}
+
+static void hix5hd2_irq_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_irq_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA);
+ writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN);
+}
+
+static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
+ writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
+}
+
+static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ unsigned char *mac = dev->dev_addr;
+ u32 val;
+
+ val = mac[1] | (mac[0] << 8);
+ writel_relaxed(val, priv->base + STATION_ADDR_HIGH);
+
+ val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel_relaxed(val, priv->base + STATION_ADDR_LOW);
+}
+
+static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (!ret)
+ hix5hd2_hw_set_mac_addr(dev);
+
+ return ret;
+}
+
+static void hix5hd2_adjust_link(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phy;
+
+ if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+ hix5hd2_config_port(dev, phy->speed, phy->duplex);
+ phy_print_status(phy);
+ }
+}
+
+static void hix5hd2_rx_refill(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ struct sk_buff *skb;
+ u32 start, end, num, pos, i;
+ u32 len = MAC_MAX_FRAME_SIZE;
+ dma_addr_t addr;
+
+ /* software write pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR));
+ /* logic read pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR));
+ num = CIRC_SPACE(start, end, RX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ if (priv->rx_skb[pos]) {
+ break;
+ } else {
+ skb = netdev_alloc_skb_ip_align(priv->netdev, len);
+ if (unlikely(skb == NULL))
+ break;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ desc = priv->rx_fq.desc + pos;
+ desc->buff_addr = cpu_to_le32(addr);
+ priv->rx_skb[pos] = skb;
+ desc->cmd = cpu_to_le32(DESC_VLD_FREE |
+ (len - 1) << DESC_BUFF_LEN_OFF);
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ /* ensure desc updated */
+ wmb();
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR);
+}
+
+static int hix5hd2_rx(struct net_device *dev, int limit)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 start, end, num, pos, i, len;
+
+ /* software read pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR));
+ /* logic write pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR));
+ num = CIRC_CNT(end, start, RX_DESC_NUM);
+ if (num > limit)
+ num = limit;
+
+ /* ensure get updated desc */
+ rmb();
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->rx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent rx_skb\n");
+ break;
+ }
+ priv->rx_skb[pos] = NULL;
+
+ desc = priv->rx_bq.desc + pos;
+ len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) &
+ DESC_DATA_MASK;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ if (skb->len > MAC_MAX_FRAME_SIZE) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ dev->last_rx = jiffies;
+next:
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR);
+
+ hix5hd2_rx_refill(priv);
+
+ return num;
+}
+
+static void hix5hd2_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ u32 start, end, num, pos, i;
+ dma_addr_t addr;
+
+ netif_tx_lock(dev);
+
+ /* software read */
+ start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR));
+ /* logic write */
+ end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR));
+ num = CIRC_CNT(end, start, TX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->tx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent tx_skb\n");
+ break;
+ }
+
+ pkts_compl++;
+ bytes_compl += skb->len;
+ desc = priv->tx_rq.desc + pos;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+ priv->tx_skb[pos] = NULL;
+ dev_consume_skb_any(skb);
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR);
+
+ netif_tx_unlock(dev);
+
+ if (pkts_compl || bytes_compl)
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
+ netif_wake_queue(priv->netdev);
+}
+
+static int hix5hd2_poll(struct napi_struct *napi, int budget)
+{
+ struct hix5hd2_priv *priv = container_of(napi,
+ struct hix5hd2_priv, napi);
+ struct net_device *dev = priv->netdev;
+ int work_done = 0, task = budget;
+ int ints, num;
+
+ do {
+ hix5hd2_xmit_reclaim(dev);
+ num = hix5hd2_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if ((work_done >= budget) || (num == 0))
+ break;
+
+ ints = readl_relaxed(priv->base + RAW_PMU_INT);
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ hix5hd2_irq_enable(priv);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ int ints = readl_relaxed(priv->base + RAW_PMU_INT);
+
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ if (likely(ints & DEF_INT_MASK)) {
+ hix5hd2_irq_disable(priv);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 pos;
+
+ /* software write pointer */
+ pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
+ if (unlikely(priv->tx_skb[pos])) {
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ desc = priv->tx_bq.desc + pos;
+ desc->buff_addr = cpu_to_le32(addr);
+ priv->tx_skb[pos] = skb;
+ desc->cmd = cpu_to_le32(DESC_VLD_BUSY | DESC_FL_FULL |
+ (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF |
+ (skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
+
+ /* ensure desc updated */
+ wmb();
+
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
+
+ dev->trans_start = jiffies;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->rx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->rx_fq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr,
+ MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_skb[i] = NULL;
+ }
+
+ for (i = 0; i < TX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->tx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->tx_rq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[i] = NULL;
+ }
+}
+
+static int hix5hd2_net_open(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ netdev_err(dev, "failed to enable clk %d\n", ret);
+ return ret;
+ }
+
+ priv->phy = of_phy_connect(dev, priv->phy_node,
+ &hix5hd2_adjust_link, 0, priv->phy_mode);
+ if (!priv->phy)
+ return -ENODEV;
+
+ phy_start(priv->phy);
+ hix5hd2_hw_init(priv);
+ hix5hd2_rx_refill(priv);
+
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ hix5hd2_port_enable(priv);
+ hix5hd2_irq_enable(priv);
+
+ return 0;
+}
+
+static int hix5hd2_net_close(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ hix5hd2_port_disable(priv);
+ hix5hd2_irq_disable(priv);
+ napi_disable(&priv->napi);
+ netif_stop_queue(dev);
+ hix5hd2_free_dma_desc_rings(priv);
+
+ if (priv->phy) {
+ phy_stop(priv->phy);
+ phy_disconnect(priv->phy);
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static void hix5hd2_tx_timeout_task(struct work_struct *work)
+{
+ struct hix5hd2_priv *priv;
+
+ priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
+ hix5hd2_net_close(priv->netdev);
+ hix5hd2_net_open(priv->netdev);
+}
+
+static void hix5hd2_net_timeout(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static const struct net_device_ops hix5hd2_netdev_ops = {
+ .ndo_open = hix5hd2_net_open,
+ .ndo_stop = hix5hd2_net_close,
+ .ndo_start_xmit = hix5hd2_net_xmit,
+ .ndo_tx_timeout = hix5hd2_net_timeout,
+ .ndo_set_mac_address = hix5hd2_net_set_mac_address,
+};
+
+static int hix5hd2_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hix5hd2_priv *priv = netdev_priv(net_dev);
+
+ if (!priv->phy)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phy, cmd);
+}
+
+static int hix5hd2_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hix5hd2_priv *priv = netdev_priv(net_dev);
+
+ if (!priv->phy)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phy, cmd);
+}
+
+static struct ethtool_ops hix5hd2_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = hix5hd2_get_settings,
+ .set_settings = hix5hd2_set_settings,
+};
+
+static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int i, timeout = 10000;
+
+ for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) {
+ if (i == timeout)
+ return -ETIMEDOUT;
+ usleep_range(10, 20);
+ }
+
+ return 0;
+}
+
+static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int val, ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ val = readl_relaxed(base + MDIO_RDATA_STATUS);
+ if (val & MDIO_R_VALID) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ val = readl_relaxed(priv->base + MDIO_SINGLE_DATA);
+ ret = (val >> 16) & 0xFFFF;
+out:
+ return ret;
+}
+
+static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(val, base + MDIO_SINGLE_DATA);
+ writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+out:
+ return ret;
+}
+
+static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ if (priv->pool[i].desc) {
+ dma_free_coherent(priv->dev, priv->pool[i].size,
+ priv->pool[i].desc,
+ priv->pool[i].phys_addr);
+ priv->pool[i].desc = NULL;
+ }
+ }
+}
+
+static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct hix5hd2_desc *virt_addr;
+ dma_addr_t phys_addr;
+ int size, i;
+
+ priv->rx_fq.count = RX_DESC_NUM;
+ priv->rx_bq.count = RX_DESC_NUM;
+ priv->tx_bq.count = TX_DESC_NUM;
+ priv->tx_rq.count = TX_DESC_NUM;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
+ virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
+ if (virt_addr == NULL)
+ goto error_free_pool;
+
+ memset(virt_addr, 0, size);
+ priv->pool[i].size = size;
+ priv->pool[i].desc = virt_addr;
+ priv->pool[i].phys_addr = phys_addr;
+ }
+ return 0;
+
+error_free_pool:
+ hix5hd2_destroy_hw_desc_queue(priv);
+
+ return -ENOMEM;
+}
+
+static int hix5hd2_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct net_device *ndev;
+ struct hix5hd2_priv *priv;
+ struct resource *res;
+ struct mii_bus *bus;
+ const char *mac_addr;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->netdev = ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_free_netdev;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->ctrl_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ctrl_base)) {
+ ret = PTR_ERR(priv->ctrl_base);
+ goto out_free_netdev;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ netdev_err(ndev, "failed to get clk\n");
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to enable clk %d\n", ret);
+ goto out_free_netdev;
+ }
+
+ bus = mdiobus_alloc();
+ if (bus == NULL) {
+ ret = -ENOMEM;
+ goto out_free_netdev;
+ }
+
+ bus->priv = priv;
+ bus->name = "hix5hd2_mii_bus";
+ bus->read = hix5hd2_mdio_read;
+ bus->write = hix5hd2_mdio_write;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ priv->bus = bus;
+
+ ret = of_mdiobus_register(bus, node);
+ if (ret)
+ goto err_free_mdio;
+
+ priv->phy_mode = of_get_phy_mode(node);
+ if (priv->phy_mode < 0) {
+ netdev_err(ndev, "not find phy-mode\n");
+ ret = -EINVAL;
+ goto err_mdiobus;
+ }
+
+ priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ netdev_err(ndev, "not find phy-handle\n");
+ ret = -EINVAL;
+ goto err_mdiobus;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
+ netdev_err(ndev, "No irq resource\n");
+ ret = -EINVAL;
+ goto out_phy_node;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt,
+ 0, pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto out_phy_node;
+ }
+
+ mac_addr = of_get_mac_address(node);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ netdev_warn(ndev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task);
+ ndev->watchdog_timeo = 6 * HZ;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hix5hd2_netdev_ops;
+ ndev->ethtool_ops = &hix5hd2_ethtools_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ ret = hix5hd2_init_hw_desc_queue(priv);
+ if (ret)
+ goto out_phy_node;
+
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+ ret = register_netdev(priv->netdev);
+ if (ret) {
+ netdev_err(ndev, "register_netdev failed!");
+ goto out_destroy_queue;
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+
+out_destroy_queue:
+ netif_napi_del(&priv->napi);
+ hix5hd2_destroy_hw_desc_queue(priv);
+out_phy_node:
+ of_node_put(priv->phy_node);
+err_mdiobus:
+ mdiobus_unregister(bus);
+err_free_mdio:
+ mdiobus_free(bus);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int hix5hd2_dev_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hix5hd2_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+ mdiobus_unregister(priv->bus);
+ mdiobus_free(priv->bus);
+
+ hix5hd2_destroy_hw_desc_queue(priv);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id hix5hd2_of_match[] = {
+ {.compatible = "hisilicon,hix5hd2-gmac",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
+
+static struct platform_driver hix5hd2_dev_driver = {
+ .driver = {
+ .name = "hix5hd2-gmac",
+ .of_match_table = hix5hd2_of_match,
+ },
+ .probe = hix5hd2_dev_probe,
+ .remove = hix5hd2_dev_remove,
+};
+
+module_platform_driver(hix5hd2_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON HIX5HD2 Ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hix5hd2-gmac");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 95837b99a464..85a3866459cf 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -63,8 +63,8 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->duplex = port->full_duplex == 1 ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
- speed = ~0;
- cmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ cmd->duplex = DUPLEX_UNKNOWN;
}
ethtool_cmd_speed_set(cmd, speed);
@@ -278,5 +278,5 @@ static const struct ethtool_ops ehea_ethtool_ops = {
void ehea_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
+ netdev->ethtool_ops = &ehea_ethtool_ops;
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 538903bf13bc..a0b418e007a0 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/device.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -3273,7 +3274,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
return -EINVAL;
}
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
ret = -ENOMEM;
dev_err(&dev->dev, "no mem for ehea_adapter\n");
@@ -3359,7 +3360,6 @@ out_kill_eq:
out_free_ad:
list_del(&adapter->list);
- kfree(adapter);
out:
ehea_update_firmware_handles();
@@ -3386,7 +3386,6 @@ static int ehea_remove(struct platform_device *dev)
ehea_destroy_eq(adapter->neq);
ehea_remove_adapter_mr(adapter);
list_del(&adapter->list);
- kfree(adapter);
ehea_update_firmware_handles();
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 9b03033bb557..a0820f72b25c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -103,12 +103,14 @@ out_nomem:
static void hw_queue_dtor(struct hw_queue *queue)
{
- int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+ int pages_per_kpage;
int i, nr_pages;
if (!queue || !queue->queue_pages)
return;
+ pages_per_kpage = PAGE_SIZE / queue->pagesize;
+
nr_pages = queue->queue_length / queue->pagesize;
for (i = 0; i < nr_pages; i += pages_per_kpage)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ae342fdb42c8..87bd953cc2ee 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2879,7 +2879,7 @@ static int emac_probe(struct platform_device *ofdev)
dev->commac.ops = &emac_commac_sg_ops;
} else
ndev->netdev_ops = &emac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
+ ndev->ethtool_ops = &emac_ethtool_ops;
netif_carrier_off(ndev);
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 25045ae07171..5727779a7df2 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2245,7 +2245,7 @@ static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
*/
dev->netdev_ops = &ipg_netdev_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
- SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
+ dev->ethtool_ops = &ipg_ethtool_ops;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index b56461ce674c..9d979d7debef 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2854,7 +2854,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXALL;
netdev->netdev_ops = &e100_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
+ netdev->ethtool_ops = &e100_ethtool_ops;
netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 73a8aeefb92a..d50f78afb56d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -168,8 +168,8 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
@@ -1460,7 +1460,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
- } while (good_cnt < 64 && jiffies < (time + 20));
+ } while (good_cnt < 64 && time_after(time + 20, jiffies));
+
if (good_cnt != 64) {
ret_val = 13; /* ret_val is the same as mis-compare */
break;
@@ -1905,5 +1906,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
void e1000_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+ netdev->ethtool_ops = &e1000_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c1d3fdb296a0..e9b07ccc0eba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4877,10 +4877,10 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* since the test for a multicast frame will test positive on
* a broadcast frame.
*/
- if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+ if (is_broadcast_ether_addr(mac_addr))
/* Broadcast packet */
stats->bprc++;
- else if (*mac_addr & 0x01)
+ else if (is_multicast_ether_addr(mac_addr))
/* Multicast packet */
stats->mprc++;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 27058dfe418b..660971f304b2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3105,11 +3105,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
*/
tx_ring = adapter->tx_ring;
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
* packets may get corrupted during padding by HW.
* To WA this issue, pad all small packets manually.
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index a5f6b11d6992..08f22f348800 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1365,6 +1365,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
.setup_led = e1000e_setup_led_generic,
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index e0aa7f1efb08..218481e509f9 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1896,6 +1896,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
.config_collision_dist = e1000e_config_collision_dist_generic,
.read_mac_addr = e1000_read_mac_addr_82571,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1471c5464a89..7785240a0da1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -265,10 +265,10 @@ struct e1000_adapter {
u32 tx_hwtstamp_timeouts;
/* Rx */
- bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
- int work_to_do) ____cacheline_aligned_in_smp;
- void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
- gfp_t gfp);
+ bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
+ int work_to_do) ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
+ gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
@@ -391,6 +391,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
*/
#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
+#define E1000_MAX_82574_SYSTIM_REREADS 50
+#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
/* hardware capability, feature, and workaround flags */
#define FLAG_HAS_AMT (1 << 0)
@@ -573,35 +575,8 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
#define er32(reg) __er32(hw, E1000_##reg)
-/**
- * __ew32_prepare - prepare to write to MAC CSR register on certain parts
- * @hw: pointer to the HW structure
- *
- * When updating the MAC CSR registers, the Manageability Engine (ME) could
- * be accessing the registers at the same time. Normally, this is handled in
- * h/w by an arbiter but on some parts there is a bug that acknowledges Host
- * accesses later than it should which could result in the register to have
- * an incorrect value. Workaround this by checking the FWSM register which
- * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
- * and try again a number of times.
- **/
-static inline s32 __ew32_prepare(struct e1000_hw *hw)
-{
- s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
-
- while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
- udelay(50);
-
- return i;
-}
-
-static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
-{
- if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- __ew32_prepare(hw);
-
- writel(val, hw->hw_addr + reg);
-}
+s32 __ew32_prepare(struct e1000_hw *hw);
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cad250bc1b99..815e26c6d34b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -159,8 +159,8 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_EXTERNAL;
}
- speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
if (netif_running(netdev)) {
if (netif_carrier_ok(netdev)) {
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
}
} else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
+
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
reg + (offset << 2), val,
(test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
u32 val;
+
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -2315,5 +2318,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
void e1000e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+ netdev->ethtool_ops = &e1000_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 6b3de5f39a97..72f5475c4b90 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -469,8 +469,9 @@ struct e1000_mac_operations {
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
void (*config_collision_dist)(struct e1000_hw *);
- void (*rar_set)(struct e1000_hw *, u8 *, u32);
+ int (*rar_set)(struct e1000_hw *, u8 *, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
+ u32 (*rar_get_count)(struct e1000_hw *);
};
/* When to use various PHY register access functions:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f0bbd4246d71..8894ab8ed6bd 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -139,8 +139,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
@@ -704,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface =
e1000_setup_copper_link_pch_lpt;
+ mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
}
/* Enable PCS Lock-loss workaround for ICH8 */
@@ -1334,6 +1336,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (((hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_pch_lpt)) && link) {
u32 reg;
+
reg = er32(STATUS);
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
u16 emi_addr;
@@ -1634,9 +1637,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
- return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@@ -1667,7 +1670,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
* contain the MAC address but RAR[1-6] are reserved for manageability (ME).
* Use SHRA[0-3] in place of those reserved for ME.
**/
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -1689,7 +1692,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
- return;
+ return 0;
}
/* RAR[1-6] are owned by manageability. Skip those and program the
@@ -1712,7 +1715,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((er32(SHRAL(index - 1)) == rar_low) &&
(er32(SHRAH(index - 1)) == rar_high))
- return;
+ return 0;
e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
(index - 1), er32(FWSM));
@@ -1720,6 +1723,43 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
out:
e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_rar_get_count_pch_lpt - Get the number of available SHRA
+ * @hw: pointer to the HW structure
+ *
+ * Get the number of available receive registers that the Host can
+ * program. SHRA[0-10] are the shared receive address registers
+ * that are shared between the Host and manageability engine (ME).
+ * ME can reserve any number of addresses and the host needs to be
+ * able to tell how many available registers it has access to.
+ **/
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
+{
+ u32 wlock_mac;
+ u32 num_entries;
+
+ wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+ wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+ switch (wlock_mac) {
+ case 0:
+ /* All SHRA[0..10] and RAR[0] available */
+ num_entries = hw->mac.rar_entry_count;
+ break;
+ case 1:
+ /* Only RAR[0] available */
+ num_entries = 1;
+ break;
+ default:
+ /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
+ num_entries = wlock_mac + 1;
+ break;
+ }
+
+ return num_entries;
}
/**
@@ -1733,7 +1773,7 @@ out:
* contain the MAC address. SHRA[0-10] are the shared receive address
* registers that are shared between the Host and manageability engine (ME).
**/
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
u32 wlock_mac;
@@ -1755,7 +1795,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
- return;
+ return 0;
}
/* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1787,12 +1827,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
(er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
- return;
+ return 0;
}
}
out:
e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
}
/**
@@ -4976,6 +5017,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
/* id_led_init dependent on mac type */
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations ich8_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index baa0a466d1d0..8c386f3a15eb 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -211,6 +211,11 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
return 0;
}
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
+{
+ return hw->mac.rar_entry_count;
+}
+
/**
* e1000e_rar_set_generic - Set receive address register
* @hw: pointer to the HW structure
@@ -220,7 +225,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -244,6 +249,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 4e81c2825b7a..0513d90cdeea 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -61,7 +61,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw);
void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3e69386add04..201cc93f3625 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -124,6 +124,36 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
};
/**
+ * __ew32_prepare - prepare to write to MAC CSR register on certain parts
+ * @hw: pointer to the HW structure
+ *
+ * When updating the MAC CSR registers, the Manageability Engine (ME) could
+ * be accessing the registers at the same time. Normally, this is handled in
+ * h/w by an arbiter but on some parts there is a bug that acknowledges Host
+ * accesses later than it should which could result in the register to have
+ * an incorrect value. Workaround this by checking the FWSM register which
+ * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
+ * and try again a number of times.
+ **/
+s32 __ew32_prepare(struct e1000_hw *hw)
+{
+ s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
+
+ while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
+ udelay(50);
+
+ return i;
+}
+
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+ if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ __ew32_prepare(hw);
+
+ writel(val, hw->hw_addr + reg);
+}
+
+/**
* e1000_regdump - register printout routine
* @hw: pointer to the HW structure
* @reginfo: pointer to the register info table
@@ -599,6 +629,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e_err("ME firmware caused invalid RDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -615,6 +646,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
u32 tctl = er32(TCTL);
+
ew32(TCTL, tctl & ~E1000_TCTL_EN);
e_err("ME firmware caused invalid TDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -1198,6 +1230,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
+
rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1786,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
adapter->flags & FLAG_RX_NEEDS_RESTART) {
/* disable receives */
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
}
@@ -1960,6 +1994,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
if (hw->mac.type == e1000_82574) {
u32 rfctl = er32(RFCTL);
+
rfctl |= E1000_RFCTL_ACK_DIS;
ew32(RFCTL, rfctl);
}
@@ -2204,6 +2239,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
int i;
+
for (i = 0; i < adapter->num_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -2921,6 +2957,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
if (adapter->flags2 & FLAG2_DMA_BURST) {
u32 txdctl = er32(TXDCTL(0));
+
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
E1000_TXDCTL_WTHRESH);
/* set up some performance related parameters to encourage the
@@ -3239,6 +3276,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
+
ew32(RXDCTL(0), rxdctl | 0x3);
}
@@ -3303,9 +3341,11 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.rar_entry_count;
+ unsigned int rar_entries;
int count = 0;
+ rar_entries = hw->mac.ops.rar_get_count(hw);
+
/* save a rar entry for our hardware address */
rar_entries--;
@@ -3324,9 +3364,13 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
+ int rval;
+
if (!rar_entries)
break;
- hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ if (rval < 0)
+ return -ENOMEM;
count++;
}
}
@@ -4085,12 +4129,37 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
- cycle_t systim;
+ cycle_t systim, systim_next;
/* latch SYSTIMH on read of SYSTIML */
systim = (cycle_t)er32(SYSTIML);
systim |= (cycle_t)er32(SYSTIMH) << 32;
+ if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
+ u64 incvalue, time_delta, rem, temp;
+ int i;
+
+ /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue
+ */
+ incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+ for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+ /* latch SYSTIMH on read of SYSTIML */
+ systim_next = (cycle_t)er32(SYSTIML);
+ systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+ time_delta = systim_next - systim;
+ temp = time_delta;
+ rem = do_div(temp, incvalue);
+
+ systim = systim_next;
+
+ if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
+ (rem == 0))
+ break;
+ }
+ }
return systim;
}
@@ -4491,7 +4560,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
e1000_get_phy_info(hw);
/* Enable EEE on 82579 after link up */
- if (hw->phy.type == e1000_phy_82579)
+ if (hw->phy.type >= e1000_phy_82579)
e1000_set_eee_pchlan(hw);
}
@@ -4695,6 +4764,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
/* Correctable ECC Errors */
if (hw->mac.type == e1000_pch_lpt) {
u32 pbeccsts = er32(PBECCSTS);
+
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
@@ -4808,6 +4878,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
(adapter->flags & FLAG_RESTART_NOW)) {
struct e1000_hw *hw = &adapter->hw;
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl | E1000_RCTL_EN);
adapter->flags &= ~FLAG_RESTART_NOW;
}
@@ -4930,6 +5001,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
!txb2b) {
u32 tarc0;
+
tarc0 = er32(TARC(0));
tarc0 &= ~SPEED_MODE_BIT;
ew32(TARC(0), tarc0);
@@ -5170,7 +5242,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
__be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
+ return false;
if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5287,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
i = 0;
tx_ring->next_to_use = i;
- return 1;
+ return true;
}
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -6209,6 +6281,7 @@ static int __e1000_resume(struct pci_dev *pdev)
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
u32 wus = er32(WUS);
+
if (wus) {
e_info("MAC Wakeup cause - %s\n",
wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -7027,7 +7100,7 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7217,7 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
+
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a9a976f04bff..b1f212b7baf7 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_eec_bits(hw, word_out, 16);
widx++;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index d0ac0f3249c8..aa1923f7ebdd 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_IntMode > bd) {
unsigned int int_mode = IntMode[bd];
+
e1000_validate_option(&int_mode, &opt, adapter);
adapter->int_mode = int_mode;
} else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
+
e1000_validate_option(&spd, &opt, adapter);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_CrcStripping > bd) {
unsigned int crc_stripping = CrcStripping[bd];
+
e1000_validate_option(&crc_stripping, &opt, adapter);
if (crc_stripping == OPTION_ENABLED) {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
enabled = kmrn_lock_loss;
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 00b3fc98bf30..b2005e13fb01 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
(hw->phy.addr == 2) &&
!(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
u16 data2 = 0x7EFF;
+
ret_val = e1000_access_phy_debug_regs_hv(hw,
(1 << 6) | 0x3,
&data2, false);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index beb7b4393a6c..65985846345d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -72,6 +72,7 @@
#define I40E_MIN_NUM_DESCRIPTORS 64
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
@@ -97,10 +98,6 @@
#define STRINGIFY(foo) #foo
#define XSTRINGIFY(bar) STRINGIFY(bar)
-#ifndef ARCH_HAS_PREFETCH
-#define prefetch(X)
-#endif
-
#define I40E_RX_DESC(R, i) \
((ring_is_16byte_desc_enabled(R)) \
? (union i40e_32byte_rx_desc *) \
@@ -157,11 +154,23 @@ struct i40e_lump_tracking {
#define I40E_FDIR_BUFFER_FULL_MARGIN 10
#define I40E_FDIR_BUFFER_HEAD_ROOM 200
+enum i40e_fd_stat_idx {
+ I40E_FD_STAT_ATR,
+ I40E_FD_STAT_SB,
+ I40E_FD_STAT_PF_COUNT
+};
+#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
+#define I40E_FD_ATR_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
+#define I40E_FD_SB_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+
struct i40e_fdir_filter {
struct hlist_node fdir_node;
/* filter ipnut set */
u8 flow_type;
u8 ip4_proto;
+ /* TX packet view of src and dst */
__be32 dst_ip[4];
__be32 src_ip[4];
__be16 src_port;
@@ -205,7 +214,6 @@ struct i40e_pf {
unsigned long state;
unsigned long link_check_timeout;
struct msix_entry *msix_entries;
- u16 num_msix_entries;
bool fc_autoneg_status;
u16 eeprom_version;
@@ -220,11 +228,14 @@ struct i40e_pf {
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
+ u16 num_alloc_vsi; /* num VSIs this driver supports */
u8 atr_sample_rate;
bool wol_en;
struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters;
+ u16 fd_sb_cnt_idx;
+ u16 fd_atr_cnt_idx;
#ifdef CONFIG_I40E_VXLAN
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
@@ -266,6 +277,7 @@ struct i40e_pf {
#ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif
+#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -300,7 +312,6 @@ struct i40e_pf {
u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
- struct i40e_aqc_get_switch_config_data *sw_config;
struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
@@ -329,9 +340,7 @@ struct i40e_pf {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
- struct work_struct ptp_tx_work;
struct hwtstamp_config tstamp_config;
- unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock; /* Used to protect the device time registers. */
u64 ptp_base_adj;
@@ -420,6 +429,7 @@ struct i40e_vsi {
struct i40e_q_vector **q_vectors;
int num_q_vectors;
int base_vector;
+ bool irqs_ready;
u16 seid; /* HW index of this VSI (absolute index) */
u16 id; /* VSI number */
@@ -540,6 +550,15 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
(qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
}
+/**
+ * i40e_get_fd_cnt_all - get the total FD filter space available
+ * @pf: pointer to the pf struct
+ **/
+static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+{
+ return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+}
+
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ed3902bf249b..7a027499fc57 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,6 +33,16 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -281,8 +291,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static void i40e_config_asq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1,
@@ -291,6 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ATQBAL1);
} else {
/* configure the transmit queue */
wr32(hw, I40E_PF_ATQBAH,
@@ -299,7 +313,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ATQBAL);
}
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -308,8 +329,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static void i40e_config_arq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1,
@@ -318,6 +342,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ARQBAL1);
} else {
/* configure the receive queue */
wr32(hw, I40E_PF_ARQBAH,
@@ -326,10 +351,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ARQBAL);
}
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -377,7 +409,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_asq_regs(hw);
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -434,7 +468,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_arq_regs(hw);
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -577,14 +613,14 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
- if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
}
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->aq.nvm_busy = false;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
@@ -708,6 +744,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -835,6 +877,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -929,6 +974,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 993f7685a911..b1552fbc48a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -90,6 +90,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7b6374a8f8da..15f289f2917f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
struct i40e_aq_desc {
__le16 flags;
@@ -123,6 +123,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
i40e_aqc_opc_request_resource = 0x0008,
@@ -182,9 +183,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -207,6 +205,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -224,13 +223,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -272,8 +273,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
- i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -341,6 +340,14 @@ struct i40e_aqc_queue_shutdown {
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
@@ -1289,27 +1296,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4];
u8 tc_valid_bits;
- u8 reserved1;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags;
- u8 reserved2[17];
+ u8 reserved1[17];
u8 tc_bw_share_credits[8];
- u8 reserved3[96];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1486,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1539,6 +1535,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1547,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
- I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_MAX
};
@@ -1583,11 +1584,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1694,7 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
@@ -1747,14 +1746,21 @@ struct i40e_aqc_set_lb_mode {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
-/* Set PHY Reset command (0x0622) */
-struct i40e_aqc_set_phy_reset {
- u8 reset_flags;
-#define I40E_AQ_PHY_RESET_REQUEST 0x02
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
u8 reserved[15];
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1785,47 @@ struct i40e_aqc_nvm_update {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 922cdcc45c54..6e65f19dd6e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -43,12 +43,10 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710:
- case I40E_DEV_ID_SFP_X710:
case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_KX_A:
case I40E_DEV_ID_KX_B:
case I40E_DEV_ID_KX_C:
- case I40E_DEV_ID_KX_D:
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
@@ -133,7 +131,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
- return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
}
/**
@@ -653,6 +655,36 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
}
/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
+
+ if (abs_queue_idx >= 128)
+ reg_block = abs_queue_idx / 128;
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
* i40e_get_media_type - Gets media type
* @hw: pointer to the hardware structure
**/
@@ -699,7 +731,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
}
#define I40E_PF_RESET_WAIT_COUNT_A0 200
-#define I40E_PF_RESET_WAIT_COUNT 10
+#define I40E_PF_RESET_WAIT_COUNT 100
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -789,6 +821,9 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
{
u32 reg;
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+
/* Clear single descriptor fetch/write-back mode */
reg = rd32(hw, I40E_GLLAN_RCTL_0);
@@ -907,6 +942,33 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
/* Admin command wrappers */
/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_pxe_mode);
+
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+ return status;
+}
+
+/**
* i40e_aq_set_link_restart_an
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
@@ -975,6 +1037,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -1021,8 +1090,6 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
@@ -1163,8 +1230,6 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), NULL);
@@ -1203,8 +1268,6 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
@@ -1300,6 +1363,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
i40e_status status;
+ u16 len;
if (dv == NULL)
return I40E_ERR_PARAM;
@@ -1311,7 +1375,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
cmd->driver_subbuild_ver = dv->subbuild_version;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
return status;
}
@@ -1900,6 +1971,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
}
}
+ /* Software override ensuring FCoE is disabled if npar or mfp
+ * mode because it is not supported in these modes.
+ */
+ if (p->npar_enable || p->mfp_mode_1)
+ p->fcoe = false;
+
/* additional HW specific goodies that might
* someday be HW version specific
*/
@@ -2094,8 +2171,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2253,6 +2330,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+ cmd->credit = cpu_to_le16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
* @hw: pointer to the hw struct
* @seid: VSI seid
@@ -2405,7 +2511,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
{
u32 fcoe_cntx_size, fcoe_filt_size;
u32 pe_cntx_size, pe_filt_size;
- u32 fcoe_fmax, pe_fmax;
+ u32 fcoe_fmax;
u32 val;
/* Validate FCoE settings passed */
@@ -2480,13 +2586,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
return I40E_ERR_INVALID_SIZE;
- /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
- val = rd32(hw, I40E_GLHMC_PEXFMAX);
- pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
- >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
- if (pe_filt_size + pe_cntx_size > pe_fmax)
- return I40E_ERR_INVALID_SIZE;
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 6e8103abfd0d..00bc0cdb3a03 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -232,7 +232,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_ieee_app_priority_table *app)
{
int v, err;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && pf->vsi[v]->netdev) {
err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
if (err)
@@ -302,8 +302,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
struct net_device *dev = vsi->netdev;
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
- /* DCB not enabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ /* Not DCB capable */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return;
/* Do not setup DCB NL ops for MFP mode */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 3c37386fd138..cffdfc21290f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -45,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
if (seid < 0)
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
else
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
return pf->vsi[i];
@@ -843,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
int i;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
i, pf->vsi[i]->seid);
@@ -862,12 +862,11 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
" rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
dev_info(&pf->pdev->dev,
- " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n",
- estats->rx_broadcast, estats->rx_discards, estats->rx_errors);
+ " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
+ estats->rx_broadcast, estats->rx_discards);
dev_info(&pf->pdev->dev,
- " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
- estats->rx_missed, estats->rx_unknown_protocol,
- estats->tx_bytes);
+ " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+ estats->rx_unknown_protocol, estats->tx_bytes);
dev_info(&pf->pdev->dev,
" tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
@@ -1527,7 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
i40e_vsi_reset_stats(pf->vsi[i]);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
@@ -1744,10 +1743,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
} else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
- } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
- } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1967,8 +1962,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd-atr off\n");
dev_info(&pf->pdev->dev, " fd-atr on\n");
- dev_info(&pf->pdev->dev, " fd-sb off\n");
- dev_info(&pf->pdev->dev, " fd-sb on\n");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
dev_info(&pf->pdev->dev, " lldp get local\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index b2380daef8c1..56438bd579e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -67,17 +67,25 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
- {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
- {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
- {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
- {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
- {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
- {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
- {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
- {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
- {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
- {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
- {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ {I40E_QTX_CTL(0), 0x0000FFBF, 1,
+ I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3,
+ I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
+ I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 1,
+ I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 1,
+ I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
{ 0 }
};
@@ -93,9 +101,25 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
u32 reg, mask;
u32 i, j;
- for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) {
+ for (i = 0; i40e_reg_list[i].offset != 0 &&
+ !ret_code; i++) {
+
+ /* set actual reg range for dynamically allocated resources */
+ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ hw->func_caps.num_tx_qp != 0)
+ i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ hw->func_caps.num_msix_vectors != 0)
+ i40e_reg_list[i].elements =
+ hw->func_caps.num_msix_vectors - 1;
+
+ /* test register access */
mask = i40e_reg_list[i].mask;
- for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) {
+ for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 03d99cbc5c25..4a488ffcd6b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -46,6 +46,8 @@ struct i40e_stats {
I40E_STAT(struct i40e_pf, _name, _stat)
#define I40E_VSI_STAT(_name, _stat) \
I40E_STAT(struct i40e_vsi, _name, _stat)
+#define I40E_VEB_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_veb, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -56,12 +58,36 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(tx_errors),
I40E_NETDEV_STAT(rx_dropped),
I40E_NETDEV_STAT(tx_dropped),
- I40E_NETDEV_STAT(multicast),
I40E_NETDEV_STAT(collisions),
I40E_NETDEV_STAT(rx_length_errors),
I40E_NETDEV_STAT(rx_crc_errors),
};
+static const struct i40e_stats i40e_gstrings_veb_stats[] = {
+ I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
+ I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
+ I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
+ I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
+ I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
+ I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
+ I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
+ I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
+ I40E_VEB_STAT("rx_discards", stats.rx_discards),
+ I40E_VEB_STAT("tx_discards", stats.tx_discards),
+ I40E_VEB_STAT("tx_errors", stats.tx_errors),
+ I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
+};
+
+static const struct i40e_stats i40e_gstrings_misc_stats[] = {
+ I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
+ I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
+ I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
+ I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
+ I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
+ I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
+ I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+};
+
static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
struct ethtool_rxnfc *cmd);
@@ -78,7 +104,12 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
- I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
+ I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
+ I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
+ I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
+ I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
+ I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
+ I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
@@ -88,6 +119,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
I40E_PF_STAT("tx_timeout", tx_timeout_count),
+ I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -112,8 +144,10 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_oversize", stats.rx_oversize),
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
- I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+
/* LPI stats */
I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
@@ -122,11 +156,14 @@ static struct i40e_stats i40e_gstrings_stats[] = {
};
#define I40E_QUEUE_STATS_LEN(n) \
- ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
- ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+ (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
+ * 2 /* Tx and Rx together */ \
+ * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
+ I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#define I40E_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
@@ -135,6 +172,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
/ sizeof(u64))
+#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
I40E_VSI_STATS_LEN((n)))
@@ -620,10 +658,15 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_TEST:
return I40E_TEST_LEN;
case ETH_SS_STATS:
- if (vsi == pf->vsi[pf->lan_vsi])
- return I40E_PF_STATS_LEN(netdev);
- else
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ int len = I40E_PF_STATS_LEN(netdev);
+
+ if (pf->lan_veb != I40E_NO_VEB)
+ len += I40E_VEB_STATS_LEN;
+ return len;
+ } else {
return I40E_VSI_STATS_LEN(netdev);
+ }
default:
return -EOPNOTSUPP;
}
@@ -633,6 +676,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
int i = 0;
@@ -648,10 +692,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
+ p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
rcu_read_lock();
- for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
- struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
- struct i40e_ring *rx_ring;
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
if (!tx_ring)
continue;
@@ -662,33 +710,45 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i] = tx_ring->stats.packets;
data[i + 1] = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+ i += 2;
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- data[i + 2] = rx_ring->stats.packets;
- data[i + 3] = rx_ring->stats.bytes;
+ data[i] = rx_ring->stats.packets;
+ data[i + 1] = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ i += 2;
}
rcu_read_unlock();
- if (vsi == pf->vsi[pf->lan_vsi]) {
- for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
- p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_tx[j];
- data[i++] = pf->stats.priority_xoff_tx[j];
- }
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_rx[j];
- data[i++] = pf->stats.priority_xoff_rx[j];
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return;
+
+ if (pf->lan_veb != I40E_NO_VEB) {
+ struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
+ p = (char *)veb;
+ p += i40e_gstrings_veb_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
- data[i++] = pf->stats.priority_xon_2_xoff[j];
}
+ for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+ p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+ data[i++] = pf->stats.priority_xon_2_xoff[j];
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -713,6 +773,11 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_net_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_misc_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
@@ -723,34 +788,42 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (vsi == pf->vsi[pf->lan_vsi]) {
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "port.%s",
- i40e_gstrings_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon_2_xoff", i);
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return;
+
+ if (pf->lan_veb != I40E_NO_VEB) {
+ for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "veb.%s",
+ i40e_gstrings_veb_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
}
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "port.%s",
+ i40e_gstrings_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon_2_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
@@ -1007,14 +1080,13 @@ static int i40e_get_coalesce(struct net_device *netdev,
ec->rx_max_coalesced_frames_irq = vsi->work_limit;
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
- ec->rx_coalesce_usecs = 1;
- else
- ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+ ec->use_adaptive_rx_coalesce = 1;
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- ec->tx_coalesce_usecs = 1;
- else
- ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
@@ -1033,37 +1105,27 @@ static int i40e_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
- switch (ec->rx_coalesce_usecs) {
- case 0:
- vsi->rx_itr_setting = 0;
- break;
- case 1:
- vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- break;
- default:
- if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
+ if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->rx_itr_setting = ec->rx_coalesce_usecs;
- break;
- }
+ else
+ return -EINVAL;
- switch (ec->tx_coalesce_usecs) {
- case 0:
- vsi->tx_itr_setting = 0;
- break;
- case 1:
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- break;
- default:
- if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
+ if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- break;
- }
+ else
+ return -EINVAL;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
vector = vsi->base_vector;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
@@ -1140,8 +1202,7 @@ static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
int cnt = 0;
/* report total rule count */
- cmd->data = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
+ cmd->data = i40e_get_fd_cnt_all(pf);
hlist_for_each_entry_safe(rule, node2,
&pf->fdir_filter_list, fdir_node) {
@@ -1175,10 +1236,6 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
struct i40e_fdir_filter *rule = NULL;
struct hlist_node *node2;
- /* report total rule count */
- cmd->data = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
-
hlist_for_each_entry_safe(rule, node2,
&pf->fdir_filter_list, fdir_node) {
if (fsp->location <= rule->fd_id)
@@ -1189,11 +1246,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
return -EINVAL;
fsp->flow_type = rule->flow_type;
- fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
- fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
- fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
- fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0];
- fsp->ring_cookie = rule->q_index;
+ if (fsp->flow_type == IP_USER_FLOW) {
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ }
+
+ /* Reverse the src and dest notion, since the HW views them from
+ * Tx perspective where as the user expects it from Rx filter view.
+ */
+ fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
+
+ if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
return 0;
}
@@ -1223,6 +1293,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
+ /* report total rule count */
+ cmd->data = i40e_get_fd_cnt_all(pf);
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
@@ -1291,16 +1363,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &=
- ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |=
- (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -1309,16 +1377,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &=
- ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |=
- (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -1503,7 +1567,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
return -EINVAL;
}
- if (fsp->ring_cookie >= vsi->num_queue_pairs)
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= vsi->num_queue_pairs))
return -EINVAL;
input = kzalloc(sizeof(*input), GFP_KERNEL);
@@ -1524,13 +1589,17 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->pctype = 0;
input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
- input->cnt_index = 0;
+ input->cnt_index = pf->fd_sb_cnt_idx;
input->flow_type = fsp->flow_type;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
- input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
- input->dst_port = fsp->h_u.tcp_ip4_spec.pdst;
- input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
- input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ /* Reverse the src and dest notion, since the HW expects them to be from
+ * Tx perspective where as the input from user is from Rx filter view.
+ */
+ input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
ret = i40e_add_del_fdir(vsi, input, true);
if (ret)
@@ -1692,5 +1761,5 @@ static const struct ethtool_ops i40e_ethtool_ops = {
void i40e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+ netdev->ethtool_ops = &i40e_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index bf2d4cc5b569..9b987ccc9e82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -201,7 +201,7 @@ exit:
**/
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+ u32 idx)
{
i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry;
@@ -237,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
- if (is_pf)
- I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
- else
- I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 0cd4701234f8..b45d8fedc5e7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -163,11 +163,6 @@ struct i40e_hmc_info {
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
- wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
- (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+ u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d5d98fe2691d..870ab1ee072c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -397,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* remove the backing pages from pd_idx1 to i */
while (i && (i > pd_idx1)) {
i40e_remove_pd_bp(hw, info->hmc_info,
- (i - 1), true);
+ (i - 1));
i--;
}
}
@@ -433,11 +433,7 @@ exit_sd_error:
((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++) {
- i40e_remove_pd_bp(
- hw,
- info->hmc_info,
- i,
- true);
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
}
i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break;
@@ -616,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
pd_table =
&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
if (pd_table->pd_entry[rel_pd_idx].valid) {
- ret_code = i40e_remove_pd_bp(hw, info->hmc_info,
- j, true);
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
if (ret_code)
goto exit;
}
@@ -747,6 +742,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
{ 0 }
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 341de925a298..eb65fe23c4a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -56,6 +56,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e72449f1265..275ca9a1719e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 36
+#define DRV_VERSION_MINOR 4
+#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -67,12 +67,10 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
*/
static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
@@ -356,6 +354,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
@@ -368,7 +367,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *tx_ring, *rx_ring;
u64 bytes, packets;
unsigned int start;
@@ -397,7 +395,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
}
rcu_read_unlock();
- /* following stats updated by ixgbe_watchdog_task() */
+ /* following stats updated by i40e_watchdog_subtask() */
stats->multicast = vsi_stats->multicast;
stats->tx_errors = vsi_stats->tx_errors;
stats->tx_dropped = vsi_stats->tx_dropped;
@@ -530,6 +528,12 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_discards, &es->rx_discards);
+ i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
I40E_GLV_GORCL(stat_idx),
@@ -648,10 +652,10 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
return;
/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
- if (!vsi)
+ if (!vsi || !vsi->tx_rings[0])
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -702,10 +706,10 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
}
/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
- if (!vsi)
+ if (!vsi || !vsi->tx_rings[0])
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -720,19 +724,18 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
}
/**
- * i40e_update_stats - Update the board statistics counters.
+ * i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated
*
* There are a few instances where we store the same stat in a
* couple of different structs. This is partly because we have
* the netdev stats that need to be filled out, which is slightly
* different from the "eth_stats" defined by the chip and used in
- * VF communications. We sort it all out here in a central place.
+ * VF communications. We sort it out here.
**/
-void i40e_update_stats(struct i40e_vsi *vsi)
+static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
@@ -741,8 +744,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
u32 rx_page, rx_buf;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
- u32 val;
- int i;
u16 q;
if (test_bit(__I40E_DOWN, &vsi->state) ||
@@ -804,196 +805,256 @@ void i40e_update_stats(struct i40e_vsi *vsi)
ns->tx_packets = tx_p;
ns->tx_bytes = tx_b;
- i40e_update_eth_stats(vsi);
/* update netdev stats from eth stats */
- ons->rx_errors = oes->rx_errors;
- ns->rx_errors = es->rx_errors;
+ i40e_update_eth_stats(vsi);
ons->tx_errors = oes->tx_errors;
ns->tx_errors = es->tx_errors;
ons->multicast = oes->rx_multicast;
ns->multicast = es->rx_multicast;
+ ons->rx_dropped = oes->rx_discards;
+ ns->rx_dropped = es->rx_discards;
ons->tx_dropped = oes->tx_discards;
ns->tx_dropped = es->tx_discards;
- /* Get the port data only if this is the main PF VSI */
+ /* pull in a couple PF stats if this is the main vsi */
if (vsi == pf->vsi[pf->lan_vsi]) {
- struct i40e_hw_port_stats *nsd = &pf->stats;
- struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ ns->rx_crc_errors = pf->stats.crc_errors;
+ ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
+ ns->rx_length_errors = pf->stats.rx_length_errors;
+ }
+}
- i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
- I40E_GLPRT_GORCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
- i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
- I40E_GLPRT_GOTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
- i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_discards,
- &nsd->eth.rx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_discards,
- &nsd->eth.tx_discards);
- i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
- I40E_GLPRT_MPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_multicast,
- &nsd->eth.rx_multicast);
+/**
+ * i40e_update_pf_stats - Update the pf statistics counters.
+ * @pf: the PF to be updated
+ **/
+static void i40e_update_pf_stats(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+ int i;
- i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_dropped_link_down,
- &nsd->tx_dropped_link_down);
+ i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_discards,
+ &nsd->eth.tx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
- pf->stat_offsets_loaded,
- &osd->crc_errors, &nsd->crc_errors);
- ns->rx_crc_errors = nsd->crc_errors;
+ i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_unicast,
+ &nsd->eth.rx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_broadcast,
+ &nsd->eth.rx_broadcast);
+ i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_unicast,
+ &nsd->eth.tx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_multicast,
+ &nsd->eth.tx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_broadcast,
+ &nsd->eth.tx_broadcast);
- i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
- pf->stat_offsets_loaded,
- &osd->illegal_bytes, &nsd->illegal_bytes);
- ns->rx_errors = nsd->crc_errors
- + nsd->illegal_bytes;
+ i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
- i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_local_faults,
- &nsd->mac_local_faults);
- i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_remote_faults,
- &nsd->mac_remote_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
- i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_length_errors,
- &nsd->rx_length_errors);
- ns->rx_length_errors = nsd->rx_length_errors;
+ i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
- i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_rx, &nsd->link_xon_rx);
- i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_tx, &nsd->link_xon_tx);
- i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
- i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xoff_tx, &nsd->link_xoff_tx);
-
- for (i = 0; i < 8; i++) {
- i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_rx[i],
- &nsd->priority_xon_rx[i]);
- i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_tx[i],
- &nsd->priority_xon_tx[i]);
- i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xoff_tx[i],
- &nsd->priority_xoff_tx[i]);
- i40e_stat_update32(hw,
- I40E_GLPRT_RXON2OFFCNT(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_2_xoff[i],
- &nsd->priority_xon_2_xoff[i]);
- }
+ i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
- i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
- I40E_GLPRT_PRC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_64, &nsd->rx_size_64);
- i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
- I40E_GLPRT_PRC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_127, &nsd->rx_size_127);
- i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
- I40E_GLPRT_PRC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_255, &nsd->rx_size_255);
- i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
- I40E_GLPRT_PRC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_511, &nsd->rx_size_511);
- i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
- I40E_GLPRT_PRC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1023, &nsd->rx_size_1023);
- i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
- I40E_GLPRT_PRC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1522, &nsd->rx_size_1522);
- i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
- I40E_GLPRT_PRC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_big, &nsd->rx_size_big);
+ i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
- i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
- I40E_GLPRT_PTC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_64, &nsd->tx_size_64);
- i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
- I40E_GLPRT_PTC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_127, &nsd->tx_size_127);
- i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
- I40E_GLPRT_PTC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_255, &nsd->tx_size_255);
- i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
- I40E_GLPRT_PTC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_511, &nsd->tx_size_511);
- i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
- I40E_GLPRT_PTC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1023, &nsd->tx_size_1023);
- i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
- I40E_GLPRT_PTC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1522, &nsd->tx_size_1522);
- i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
- I40E_GLPRT_PTC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_big, &nsd->tx_size_big);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
- i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_undersize, &nsd->rx_undersize);
- i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_fragments, &nsd->rx_fragments);
- i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ &osd->priority_xon_rx[i],
+ &nsd->priority_xon_rx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_oversize, &nsd->rx_oversize);
- i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ &osd->priority_xon_tx[i],
+ &nsd->priority_xon_tx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_jabber, &nsd->rx_jabber);
-
- val = rd32(hw, I40E_PRTPM_EEE_STAT);
- nsd->tx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
- nsd->rx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
- i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ &osd->priority_xoff_tx[i],
+ &nsd->priority_xoff_tx[i]);
+ i40e_stat_update32(hw,
+ I40E_GLPRT_RXON2OFFCNT(hw->port, i),
pf->stat_offsets_loaded,
- &osd->tx_lpi_count, &nsd->tx_lpi_count);
- i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
- pf->stat_offsets_loaded,
- &osd->rx_lpi_count, &nsd->rx_lpi_count);
+ &osd->priority_xon_2_xoff[i],
+ &nsd->priority_xon_2_xoff[i]);
}
+ i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+
+ /* FDIR stats */
+ i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+ pf->stat_offsets_loaded,
+ &osd->fd_atr_match, &nsd->fd_atr_match);
+ i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+ pf->stat_offsets_loaded,
+ &osd->fd_sb_match, &nsd->fd_sb_match);
+
+ val = rd32(hw, I40E_PRTPM_EEE_STAT);
+ nsd->tx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ nsd->rx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ pf->stat_offsets_loaded,
+ &osd->tx_lpi_count, &nsd->tx_lpi_count);
+ i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
+ pf->stat_offsets_loaded,
+ &osd->rx_lpi_count, &nsd->rx_lpi_count);
+
pf->stat_offsets_loaded = true;
}
/**
+ * i40e_update_stats - Update the various statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * Update the various stats for this VSI and its related entities.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi == pf->vsi[pf->lan_vsi])
+ i40e_update_pf_stats(pf);
+
+ i40e_update_vsi_stats(vsi);
+}
+
+/**
* i40e_find_filter - Search VSI filter list for specific mac/vlan filter
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -1101,6 +1162,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
}
/**
+ * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
+ * @vsi: the PF Main VSI - inappropriate for any other VSI
+ * @macaddr: the MAC address
+ **/
+static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+{
+ struct i40e_aqc_remove_macvlan_element_data element;
+ struct i40e_pf *pf = vsi->back;
+ i40e_status aq_ret;
+
+ /* Only appropriate for the PF main VSI */
+ if (vsi->type != I40E_VSI_MAIN)
+ return;
+
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+ if (aq_ret)
+ dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
+}
+
+/**
* i40e_add_filter - Add a mac/vlan filter to the VSI
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -1125,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (!f)
goto add_filter_out;
- memcpy(f->macaddr, macaddr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
f->changed = true;
@@ -1249,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
}
- memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
}
/* In order to be sure to not drop any packets, add the new address
@@ -1263,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
i40e_sync_vsi_filters(vsi);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0;
}
@@ -1313,7 +1398,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
+ num_tc_qps = vsi->alloc_queue_pairs/numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
@@ -1520,8 +1605,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* add to delete list */
- memcpy(del_list[num_del].mac_addr,
- f->macaddr, ETH_ALEN);
+ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1542,7 +1626,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_del = 0;
memset(del_list, 0, sizeof(*del_list));
- if (aq_ret)
+ if (aq_ret &&
+ pf->hw.aq.asq_last_status !=
+ I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
aq_ret,
@@ -1554,7 +1640,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
del_list, num_del, NULL);
num_del = 0;
- if (aq_ret)
+ if (aq_ret &&
+ pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
@@ -1583,8 +1670,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* add to add array */
- memcpy(add_list[num_add].mac_addr,
- f->macaddr, ETH_ALEN);
+ ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
add_list[num_add].vlan_tag =
cpu_to_le16(
(u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1681,7 +1767,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
pf->flags &= ~I40E_FLAG_FILTER_SYNC;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
i40e_sync_vsi_filters(pf->vsi[v]);
@@ -1698,7 +1784,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct i40e_vsi *vsi = np->vsi;
/* MTU < 68 is an error and causes problems on some kernels */
@@ -2312,6 +2398,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
+ /* set the prefena field to 1 because the manual says to */
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2413,6 +2501,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
**/
static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
{
+ struct i40e_ring *tx_ring, *rx_ring;
u16 qoffset, qcount;
int i, n;
@@ -2426,8 +2515,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) {
- struct i40e_ring *rx_ring = vsi->rx_rings[i];
- struct i40e_ring *tx_ring = vsi->tx_rings[i];
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n;
}
@@ -2565,7 +2654,6 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
I40E_PFINT_ICR0_ENA_GPIO_MASK |
I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
- I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -2733,6 +2821,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
&q_vector->affinity_mask);
}
+ vsi->irqs_ready = true;
return 0;
free_queue_irqs:
@@ -3152,6 +3241,12 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+
+ /* warn the TX unit of coming changes */
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+ if (!enable)
+ udelay(10);
+
for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
@@ -3160,9 +3255,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
/* Skip if the queue is already in the requested state */
- if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- continue;
- if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
continue;
/* turn on/off the queue */
@@ -3178,13 +3271,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* wait for the change to finish */
for (j = 0; j < 10; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- if (enable) {
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3223,15 +3311,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
- if (enable) {
- /* is STAT set ? */
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- } else {
- /* is !STAT set ? */
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
if (enable)
@@ -3244,13 +3326,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
for (j = 0; j < 10; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (enable) {
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3304,6 +3381,10 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
if (!vsi->q_vectors)
return;
+ if (!vsi->irqs_ready)
+ return;
+
+ vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
u16 vector = i + base;
@@ -3476,7 +3557,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
int i;
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
i40e_vsi_free_q_vectors(pf->vsi[i]);
i40e_reset_interrupt_capability(pf);
@@ -3513,6 +3594,19 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_close - Shut down a VSI
+ * @vsi: the vsi to be quelled
+ **/
+static void i40e_vsi_close(struct i40e_vsi *vsi)
+{
+ if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+}
+
+/**
* i40e_quiesce_vsi - Pause a given VSI
* @vsi: the VSI being paused
**/
@@ -3525,8 +3619,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev)) {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
} else {
- set_bit(__I40E_DOWN, &vsi->state);
- i40e_down(vsi);
+ i40e_vsi_close(vsi);
}
}
@@ -3543,7 +3636,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else
- i40e_up(vsi); /* this clears the DOWN bit */
+ i40e_vsi_open(vsi); /* this clears the DOWN bit */
}
/**
@@ -3554,7 +3647,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
int v;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_quiesce_vsi(pf->vsi[v]);
}
@@ -3568,7 +3661,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
int v;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_unquiesce_vsi(pf->vsi[v]);
}
@@ -4009,7 +4102,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
}
/* Update each VSI */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v])
continue;
@@ -4028,6 +4121,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
pf->vsi[v]->seid);
/* Will try to configure as many components */
} else {
+ /* Re-configure VSI vectors based on updated TC map */
+ i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
@@ -4065,14 +4160,69 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
/* When status is not DISABLED then DCBX in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
DCB_CAP_DCBX_VER_IEEE;
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ /* Enable DCB tagging only when more than one TC */
+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
}
+ } else {
+ dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+ pf->hw.aq.asq_last_status);
}
out:
return err;
}
#endif /* CONFIG_I40E_DCB */
+#define SPEED_SIZE 14
+#define FC_SIZE 8
+/**
+ * i40e_print_link_message - print link up or down
+ * @vsi: the VSI for which link needs a message
+ */
+static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+{
+ char speed[SPEED_SIZE] = "Unknown";
+ char fc[FC_SIZE] = "RX/TX";
+
+ if (!isup) {
+ netdev_info(vsi->netdev, "NIC Link is Down\n");
+ return;
+ }
+
+ switch (vsi->back->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ strncpy(speed, "40 Gbps", SPEED_SIZE);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ strncpy(speed, "10 Gbps", SPEED_SIZE);
+ break;
+ case I40E_LINK_SPEED_1GB:
+ strncpy(speed, "1000 Mbps", SPEED_SIZE);
+ break;
+ default:
+ break;
+ }
+
+ switch (vsi->back->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ strncpy(fc, "RX/TX", FC_SIZE);
+ break;
+ case I40E_FC_TX_PAUSE:
+ strncpy(fc, "TX", FC_SIZE);
+ break;
+ case I40E_FC_RX_PAUSE:
+ strncpy(fc, "RX", FC_SIZE);
+ break;
+ default:
+ strncpy(fc, "None", FC_SIZE);
+ break;
+ }
+
+ netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
+ speed, fc);
+}
/**
* i40e_up_complete - Finish the last steps of bringing up a connection
@@ -4099,11 +4249,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
(vsi->netdev)) {
- netdev_info(vsi->netdev, "NIC Link is Up\n");
+ i40e_print_link_message(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
} else if (vsi->netdev) {
- netdev_info(vsi->netdev, "NIC Link is Down\n");
+ i40e_print_link_message(vsi, false);
}
/* replay FDIR SB filters */
@@ -4309,24 +4459,32 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
if (err)
goto err_setup_rx;
- if (!vsi->netdev) {
- err = EINVAL;
- goto err_setup_rx;
- }
- snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
+ if (vsi->netdev) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
+ err = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ } else if (vsi->type == I40E_VSI_FDIR) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+ dev_driver_string(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ } else {
+ err = -EINVAL;
+ goto err_setup_rx;
+ }
err = i40e_up_complete(vsi);
if (err)
@@ -4383,14 +4541,7 @@ static int i40e_close(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- if (test_and_set_bit(__I40E_DOWN, &vsi->state))
- return 0;
-
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
return 0;
}
@@ -4410,6 +4561,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
WARN_ON(in_interrupt());
+ if (i40e_check_asq_alive(&pf->hw))
+ i40e_vc_notify_reset(pf);
+
/* do the biggest reset indicated */
if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
@@ -4475,7 +4629,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
/* Find the VSI(s) that requested a re-init */
dev_info(&pf->pdev->dev,
"VSI reinit requested\n");
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
@@ -4565,6 +4719,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
int ret = 0;
u8 type;
+ /* Not DCB capable or capability disabled */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return ret;
+
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
@@ -4606,6 +4764,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
if (!need_reconfig)
goto exit;
+ /* Enable DCB tagging only when more than one TC */
+ if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
/* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf);
@@ -4709,8 +4873,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
fcnt_prog = i40e_get_current_fd_count(pf);
- fcnt_avail = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
+ fcnt_avail = i40e_get_fd_cnt_all(pf);
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
@@ -4803,7 +4966,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
i40e_veb_link_event(pf->veb[i], link_up);
/* ... now the local VSIs */
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
i40e_vsi_link_event(pf->vsi[i], link_up);
}
@@ -4821,10 +4984,8 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link)
return;
-
if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
- netdev_info(pf->vsi[pf->lan_vsi]->netdev,
- "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+ i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -4862,7 +5023,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
* for each q_vector
* force an interrupt
*/
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
int armed = 0;
@@ -4912,7 +5073,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->netdev)
i40e_update_stats(pf->vsi[i]);
@@ -5018,11 +5179,47 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u16 pending, i = 0;
i40e_status ret;
u16 opcode;
+ u32 oldval;
u32 val;
if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
return;
+ /* check for error indications */
+ val = rd32(&pf->hw, pf->hw.aq.arq.len);
+ oldval = val;
+ if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.arq.len, val);
+
+ val = rd32(&pf->hw, pf->hw.aq.asq.len);
+ oldval = val;
+ if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.asq.len, val);
+
event.msg_size = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf)
@@ -5128,7 +5325,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
int ret;
/* build VSI that owns this VEB, temporarily attached to base VEB */
- for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
+ for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
if (pf->vsi[v] &&
pf->vsi[v]->veb_idx == veb->idx &&
pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
@@ -5158,7 +5355,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
goto end_reconstitute;
/* create the remaining VSIs attached to this VEB */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
continue;
@@ -5226,9 +5423,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
- /* increment MSI-X count because current FW skips one */
- pf->hw.func_caps.num_msix_vectors++;
-
if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
(pf->hw.aq.fw_maj_ver < 2)) {
pf->hw.func_caps.num_msix_vectors++;
@@ -5267,15 +5461,14 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
- bool new_vsi = false;
- int err, i;
+ int i;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
/* find existing VSI and see if it needs configuring */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
break;
@@ -5288,47 +5481,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- goto err_vsi;
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ return;
}
- new_vsi = true;
- }
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
-
- err = i40e_vsi_setup_tx_resources(vsi);
- if (err)
- goto err_setup_tx;
- err = i40e_vsi_setup_rx_resources(vsi);
- if (err)
- goto err_setup_rx;
-
- if (new_vsi) {
- char int_name[IFNAMSIZ + 9];
- err = i40e_vsi_configure(vsi);
- if (err)
- goto err_setup_rx;
- snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
- dev_driver_string(&pf->pdev->dev));
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
- err = i40e_up_complete(vsi);
- if (err)
- goto err_up_complete;
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
}
- return;
-
-err_up_complete:
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-err_setup_rx:
- i40e_vsi_free_rx_resources(vsi);
-err_setup_tx:
- i40e_vsi_free_tx_resources(vsi);
-err_vsi:
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- i40e_vsi_clear(vsi);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
}
/**
@@ -5340,7 +5498,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
int i;
i40e_fdir_filter_exit(pf);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
i40e_vsi_release(pf->vsi[i]);
break;
@@ -5357,7 +5515,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
static int i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ i40e_status ret = 0;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
@@ -5366,13 +5524,10 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
- if (i40e_check_asq_alive(hw))
- i40e_vc_notify_reset(pf);
-
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
pf->vsi[v]->seid = 0;
}
@@ -5380,22 +5535,40 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
i40e_shutdown_adminq(&pf->hw);
/* call shutdown HMC */
- ret = i40e_shutdown_lan_hmc(hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
- clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ if (hw->hmc.hmc_obj) {
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_warn(&pf->pdev->dev,
+ "shutdown_lan_hmc failed: %d\n", ret);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ }
}
return ret;
}
/**
+ * i40e_send_version - update firmware with driver version
+ * @pf: PF struct
+ */
+static void i40e_send_version(struct i40e_pf *pf)
+{
+ struct i40e_driver_version dv;
+
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+}
+
+/**
* i40e_reset_and_rebuild - reset and rebuild using a saved config
* @pf: board private structure
* @reinit: if the Main VSI needs to re-initialized.
**/
static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
{
- struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u32 v;
@@ -5405,8 +5578,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
* because the reset will make them disappear.
*/
ret = i40e_pf_reset(hw);
- if (ret)
+ if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+ goto end_core_reset;
+ }
pf->pfr_count++;
if (test_bit(__I40E_DOWN, &pf->state))
@@ -5426,6 +5601,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_verify_eeprom(pf);
}
+ i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
if (ret) {
dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5526,13 +5702,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
/* tell the firmware that we're starting */
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
- dv.subbuild_version = 0;
- i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
-
- dev_info(&pf->pdev->dev, "reset complete\n");
+ i40e_send_version(pf);
end_core_reset:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5642,7 +5812,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
**/
static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
{
- const int vxlan_hdr_qwords = 4;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u8 filter_index;
@@ -5660,7 +5829,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
port = pf->vxlan_ports[i];
ret = port ?
i40e_aq_add_udp_tunnel(hw, ntohs(port),
- vxlan_hdr_qwords,
I40E_AQC_TUNNEL_TYPE_VXLAN,
&filter_index, NULL)
: i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -5839,15 +6007,15 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
* find next empty vsi slot, looping back around if necessary
*/
i = pf->next_vsi;
- while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
+ while (i < pf->num_alloc_vsi && pf->vsi[i])
i++;
- if (i >= pf->hw.func_caps.num_vsis) {
+ if (i >= pf->num_alloc_vsi) {
i = 0;
while (i < pf->next_vsi && pf->vsi[i])
i++;
}
- if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
+ if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
vsi_idx = i; /* Found one! */
} else {
ret = -ENODEV;
@@ -5870,6 +6038,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
+ vsi->irqs_ready = false;
ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
@@ -5987,14 +6156,12 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_pf *pf = vsi->back;
int i;
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
- struct i40e_ring *tx_ring;
- struct i40e_ring *rx_ring;
-
/* allocate space for both Tx and Rx in one shot */
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
@@ -6052,8 +6219,6 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
vectors = 0;
}
- pf->num_msix_entries = vectors;
-
return vectors;
}
@@ -6107,6 +6272,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
vec = i40e_reserve_msix_vectors(pf, v_budget);
+
+ if (vec != v_budget) {
+ /* If we have limited resources, we will start with no vectors
+ * for the special features and then allocate vectors to some
+ * of these features based on the policy and at the end disable
+ * the features that did not get any vectors.
+ */
+ pf->num_vmdq_msix = 0;
+ }
+
if (vec < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
@@ -6115,27 +6290,25 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else if (vec == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
- dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
- pf->num_vmdq_msix = 0;
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
} else if (vec != v_budget) {
+ /* reserve the misc vector */
+ vec--;
+
/* Scale vector usage down */
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
- vec--; /* reserve the misc vector */
+ pf->num_vmdq_vsis = 1;
/* partition out the remaining vectors */
switch (vec) {
case 2:
- pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 1;
break;
case 3:
- pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 2;
break;
default:
@@ -6147,6 +6320,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ (pf->num_vmdq_msix == 0)) {
+ dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ }
return err;
}
@@ -6171,7 +6349,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
- i40e_napi_poll, vsi->work_limit);
+ i40e_napi_poll, NAPI_POLL_WEIGHT);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
@@ -6231,7 +6409,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (err) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
@@ -6364,7 +6542,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
return 0;
queue_count = min_t(int, queue_count, pf->rss_size_max);
- queue_count = rounddown_pow_of_two(queue_count);
if (queue_count != pf->rss_size) {
i40e_prep_for_reset(pf);
@@ -6407,6 +6584,10 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED;
+ /* Set default ITR */
+ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
+ pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+
/* Depending on PF configurations, it is possible that the RSS
* maximum might end up larger than the available queues
*/
@@ -6416,7 +6597,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
- pf->rss_size = rounddown_pow_of_two(pf->rss_size);
} else {
pf->rss_size = 1;
}
@@ -6432,8 +6612,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+ /* Setup a counter for fd_atr per pf */
+ pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ /* Setup a counter for fd_sb per pf */
+ pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
@@ -6649,6 +6833,96 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
}
#endif
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 flags)
+#else
+static int i40e_ndo_fdb_add(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 flags)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = 0;
+
+ if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ const unsigned char *addr)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = -EOPNOTSUPP;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#endif /* HAVE_FDB_OPS */
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -6669,13 +6943,21 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
- .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
+ .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
+ .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
#ifdef CONFIG_I40E_VXLAN
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
#endif
+#ifdef HAVE_FDB_OPS
+ .ndo_fdb_add = i40e_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+ .ndo_fdb_del = i40e_ndo_fdb_del,
+ .ndo_fdb_dump = i40e_ndo_fdb_dump,
+#endif
+#endif
};
/**
@@ -6720,16 +7002,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_RXCSUM |
- NETIF_F_NTUPLE |
NETIF_F_RXHASH |
0;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ netdev->features |= NETIF_F_NTUPLE;
+
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
- memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+ ether_addr_copy(mac_addr, hw->mac.perm_addr);
+ /* The following two steps are necessary to prevent reception
+ * of tagged packets - by default the NVM loads a MAC-VLAN
+ * filter that will accept any tagged packet. This is to
+ * prevent that during normal operations until a specific
+ * VLAN tag filter has been set.
+ */
+ i40e_rm_default_mac_filter(vsi, mac_addr);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -6739,8 +7031,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
}
i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
- memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
- memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(netdev->dev_addr, mac_addr);
+ ether_addr_copy(netdev->perm_addr, mac_addr);
/* vlan gets same features (except vlan offload)
* after any tweaks for specific VSI types
*/
@@ -6772,7 +7064,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
return;
i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
- return;
}
/**
@@ -6898,6 +7189,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ if (pf->vf[vsi->vf_id].spoofchk) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt.info.sec_flags |=
+ (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
+ }
/* Setup the VSI tx/rx queue map for TC0 only for now */
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -6982,11 +7280,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
unregister_netdev(vsi->netdev);
}
} else {
- if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
}
i40e_vsi_disable_irq(vsi);
}
@@ -7013,7 +7307,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
* the orphan VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
- for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] &&
pf->vsi[i]->uplink_seid == uplink_seid &&
(pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
@@ -7192,7 +7486,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (!veb && uplink_seid != pf->mac_seid) {
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
vsi = pf->vsi[i];
break;
@@ -7435,7 +7729,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (!pf->vsi[i])
continue;
if (pf->vsi[i]->uplink_seid == branch_seid &&
@@ -7487,7 +7781,7 @@ void i40e_veb_release(struct i40e_veb *veb)
pf = veb->pf;
/* find the remaining VSI and check for extras */
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
n++;
vsi = pf->vsi[i];
@@ -7516,8 +7810,6 @@ void i40e_veb_release(struct i40e_veb *veb)
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
i40e_veb_clear(veb);
-
- return;
}
/**
@@ -7601,10 +7893,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
- for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
+ for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
break;
- if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
+ if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
vsi_seid);
return NULL;
@@ -7639,6 +7931,8 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
if (ret)
goto err_veb;
+ if (vsi_idx == pf->lan_vsi)
+ pf->lan_veb = veb->idx;
return veb;
@@ -7774,15 +8068,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
"header: %d reported %d total\n",
num_reported, num_total);
- if (num_reported) {
- int sz = sizeof(*sw_config) * num_reported;
-
- kfree(pf->sw_config);
- pf->sw_config = kzalloc(sz, GFP_KERNEL);
- if (pf->sw_config)
- memcpy(pf->sw_config, sw_config, sz);
- }
-
for (i = 0; i < num_reported; i++) {
struct i40e_aqc_switch_config_element_resp *ele =
&sw_config->element[i];
@@ -7949,9 +8234,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left = pf->hw.func_caps.num_tx_qp;
if ((queues_left == 1) ||
- !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
- !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_DCB_ENABLED))) {
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1;
@@ -7960,14 +8243,27 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
+ } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_CAPABLE))) {
+ /* one qp for PF */
+ pf->rss_size = pf->num_lan_qps = 1;
+ queues_left -= pf->num_lan_qps;
+
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
} else {
/* Not enough queues for all TCs */
- if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
+ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
pf->num_lan_qps = pf->rss_size_max;
@@ -7998,7 +8294,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
}
pf->queues_left = queues_left;
- return;
}
/**
@@ -8055,12 +8350,13 @@ static void i40e_print_features(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_RSS_ENABLED)
buf += sprintf(buf, "RSS ");
- buf += sprintf(buf, "FDir ");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
- buf += sprintf(buf, "ATR ");
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
+ buf += sprintf(buf, "FD_ATR ");
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ buf += sprintf(buf, "FD_SB ");
buf += sprintf(buf, "NTUPLE ");
- if (pf->flags & I40E_FLAG_DCB_ENABLED)
+ }
+ if (pf->flags & I40E_FLAG_DCB_CAPABLE)
buf += sprintf(buf, "DCB ");
if (pf->flags & I40E_FLAG_PTP)
buf += sprintf(buf, "PTP ");
@@ -8083,13 +8379,13 @@ static void i40e_print_features(struct i40e_pf *pf)
**/
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct i40e_driver_version dv;
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
u16 link_status;
int err = 0;
u32 len;
+ u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -8201,6 +8497,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_verify_eeprom(pf);
+ /* Rev 0 hardware was never productized */
+ if (hw->revision_id < 1)
+ dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
+
i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
@@ -8234,7 +8534,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mac_addr;
}
dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
- memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+ ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
@@ -8242,8 +8542,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
- goto err_init_dcb;
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ /* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -8264,10 +8564,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
- /* Set up the *vsi struct based on the number of VSIs in the HW,
- * and set up our local tracking of the MAIN PF vsi.
+ /* The number of VSIs reported by the FW is the minimum guaranteed
+ * to us; HW supports far more and we share the remaining pool with
+ * the other PFs. We allocate space for more than the guarantee with
+ * the understanding that we might not get them all later.
*/
- len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+ else
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+ /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
+ len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
pf->vsi = kzalloc(len, GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
@@ -8279,6 +8587,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+ /* if FDIR VSI was set up, start it now */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_open(pf->vsi[i]);
+ break;
+ }
+ }
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -8300,6 +8615,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+#ifdef CONFIG_PCI_IOV
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
@@ -8322,17 +8638,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err);
}
}
+#endif /* CONFIG_PCI_IOV */
pfs_found++;
i40e_dbg_pf_init(pf);
/* tell the firmware that we're starting */
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
- dv.subbuild_version = 0;
- i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+ i40e_send_version(pf);
/* since everything's happy, start the service_task timer */
mod_timer(&pf->service_timer,
@@ -8373,9 +8686,6 @@ err_vsis:
err_switch_setup:
i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
-#ifdef CONFIG_I40E_DCB
-err_init_dcb:
-#endif /* CONFIG_I40E_DCB */
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
@@ -8456,10 +8766,13 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown and destroy the HMC */
- ret_code = i40e_shutdown_lan_hmc(&pf->hw);
- if (ret_code)
- dev_warn(&pdev->dev,
- "Failed to destroy the HMC resources: %d\n", ret_code);
+ if (pf->hw.hmc.hmc_obj) {
+ ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the HMC resources: %d\n",
+ ret_code);
+ }
/* shutdown the adminq */
ret_code = i40e_shutdown_adminq(&pf->hw);
@@ -8470,7 +8783,7 @@ static void i40e_remove(struct pci_dev *pdev)
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
i40e_clear_interrupt_scheme(pf);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
i40e_vsi_clear_rings(pf->vsi[i]);
i40e_vsi_clear(pf->vsi[i]);
@@ -8485,7 +8798,6 @@ static void i40e_remove(struct pci_dev *pdev)
kfree(pf->qp_pile);
kfree(pf->irq_pile);
- kfree(pf->sw_config);
kfree(pf->vsi);
/* force a PF reset to clean anything leftover */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9cd57e617959..a430699c41d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -70,10 +70,12 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
@@ -157,8 +159,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details);
@@ -167,6 +169,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
@@ -216,6 +221,7 @@ bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e61e63720800..101f439acda6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -48,7 +48,6 @@
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PTP_TX_TIMEOUT (HZ * 15)
/**
* i40e_ptp_read - Read the PHC time from the device
@@ -217,40 +216,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * i40e_ptp_tx_work
- * @work: pointer to work struct
- *
- * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
- * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
- * the stack in the skb.
- */
-static void i40e_ptp_tx_work(struct work_struct *work)
-{
- struct i40e_pf *pf = container_of(work, struct i40e_pf,
- ptp_tx_work);
- struct i40e_hw *hw = &pf->hw;
- u32 prttsyn_stat_0;
-
- if (!pf->ptp_tx_skb)
- return;
-
- if (time_is_before_jiffies(pf->ptp_tx_start +
- I40E_PTP_TX_TIMEOUT)) {
- dev_kfree_skb_any(pf->ptp_tx_skb);
- pf->ptp_tx_skb = NULL;
- pf->tx_hwtstamp_timeouts++;
- dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
- return;
- }
-
- prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
- if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
- i40e_ptp_tx_hwtstamp(pf);
- else
- schedule_work(&pf->ptp_tx_work);
-}
-
-/**
* i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
* @ptp: The PTP clock structure
* @rq: The requested feature to change
@@ -608,7 +573,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
u32 regval;
spin_lock_init(&pf->tmreg_lock);
- INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
netdev->name);
@@ -647,7 +611,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
pf->ptp_tx = false;
pf->ptp_rx = false;
- cancel_work_sync(&pf->ptp_tx_work);
if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 1d40f425acf1..947de98500f3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1340,8 +1340,6 @@
#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1367,8 +1365,6 @@
#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1589,6 +1585,14 @@
#define I40E_GLLAN_TSOMSK_M 0x000442DC
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+
#define I40E_PFLAN_QALLOC 0x001C0400
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9478ddc66caf..e49f31dbd5d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -24,6 +24,7 @@
*
******************************************************************************/
+#include <linux/prefetch.h>
#include "i40e.h"
#include "i40e_prototype.h"
@@ -61,7 +62,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* find existing FDIR VSI */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
vsi = pf->vsi[i];
if (!vsi)
@@ -120,7 +121,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
dcc |= ((u32)fdir_data->cnt_index <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
}
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
@@ -183,7 +184,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
struct iphdr *ip;
bool err = false;
int ret;
- int i;
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -199,21 +199,17 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
ip->saddr = fd_data->src_ip[0];
udp->source = fd_data->src_port;
- for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
- i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
}
return err ? -EOPNOTSUPP : 0;
@@ -262,7 +258,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
}
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
@@ -455,22 +451,20 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
/* filter programming failed most likely due to table full */
fcnt_prog = i40e_get_current_fd_count(pf);
- fcnt_avail = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
-
+ fcnt_avail = i40e_get_fd_cnt_all(pf);
/* If ATR is running fcnt_prog can quickly change,
* if we are very close to full, it makes sense to disable
* FD ATR/SB and then re-enable it when there is room.
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
/* Turn off ATR first */
- if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_ATR_ENABLED;
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
- } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
+ } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
@@ -1199,10 +1193,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u32 rx_error,
u16 rx_ptype)
{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+ bool ipv4 = false, ipv6 = false;
bool ipv4_tunnel, ipv6_tunnel;
__wsum rx_udp_csum;
- __sum16 csum;
struct iphdr *iph;
+ __sum16 csum;
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -1213,29 +1209,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
- rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
return;
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+ ipv4 = true;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ ipv6 = true;
+
+ if (ipv4 &&
+ (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
/* likely incorrect csum if alternate IP extension headers found */
- if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ if (ipv6 &&
+ decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
+ rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
return;
- /* IP or L4 or outmost IP checksum error */
- if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
- vsi->back->hw_csum_rx_error++;
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- }
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ */
if (ipv4_tunnel &&
+ (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
- /* If VXLAN traffic has an outer UDPv4 checksum we need to check
- * it in the driver, hardware does not do it for us.
- * Since L3L4P bit was set we assume a valid IHL value (>=5)
- * so the total length of IPv4 header is IHL*4 bytes
- */
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1252,13 +1276,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum) {
- vsi->back->hw_csum_rx_error++;
- return;
- }
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
}
/**
@@ -1435,6 +1462,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
goto next_desc;
}
@@ -1665,6 +1695,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |=
+ ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
}
@@ -1825,9 +1860,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
I40E_TXD_CTX_QW1_CMD_SHIFT;
- pf->ptp_tx_start = jiffies;
- schedule_work(&pf->ptp_tx_work);
-
return 1;
}
@@ -2179,9 +2211,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
{
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f;
-#endif
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -2190,12 +2220,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index d5349698e513..0277894fe1c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,7 +27,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
-/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -69,16 +69,11 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -122,11 +117,11 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+#define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -184,7 +179,6 @@ enum i40e_ring_state_t {
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
- __I40E_RX_LRO_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -200,12 +194,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define ring_is_lro_enabled(ring) \
- test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define set_ring_lro_enabled(ring) \
- set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define clear_ring_lro_enabled(ring) \
- clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 71a968fe557f..9d39ff23c5fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,12 +36,10 @@
/* Device IDs */
#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_SFP_X710 0x1573
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_A 0x157F
#define I40E_DEV_ID_KX_B 0x1580
#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_KX_D 0x1582
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
@@ -60,8 +58,8 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
-#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
/* forward declaration */
struct i40e_hw;
@@ -167,6 +165,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -409,6 +410,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
@@ -488,9 +490,6 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */
};
-#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
-
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -507,9 +506,14 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+ << I40E_RXD_QW1_STATUS_SHIFT)
+
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -537,7 +541,8 @@ enum i40e_rx_desc_error_bits {
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
- I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -658,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
- I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -862,18 +866,14 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ /* Note: Values 0-30 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ /* Note: Value 32 is reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ /* Note: Values 37-40 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -955,6 +955,16 @@ struct i40e_vsi_context {
struct i40e_aqc_vsi_properties_data info;
};
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats {
u64 rx_bytes; /* gorc */
@@ -962,8 +972,6 @@ struct i40e_eth_stats {
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
- u64 rx_errors; /* repc */
- u64 rx_missed; /* rmpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
@@ -1015,9 +1023,12 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
/* EEE LPI */
- bool tx_lpi_status;
- bool rx_lpi_status;
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
u64 tx_lpi_count; /* etlpic */
u64 rx_lpi_count; /* erlpic */
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 22a1b69cd646..70951d2edcad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event {
int severity;
};
-/* The following are TBD, not necessary for LAN functionality.
- * I40E_VIRTCHNL_OP_FCOE
- */
-
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02c11a7f7d29..f5b9d2062573 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -29,6 +29,24 @@
/***********************misc routines*****************************/
/**
+ * i40e_vc_disable_vf
+ * @pf: pointer to the pf info
+ * @vf: pointer to the vf info
+ *
+ * Disable the VF through a SW reset
+ **/
+static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+}
+
+/**
* i40e_vc_isvalid_vsi_id
* @vf: pointer to the vf info
* @vsi_id: vf relative vsi id
@@ -230,9 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
- tx_ctx.head_wb_ena = 1;
- tx_ctx.head_wb_addr = info->dma_ring_addr +
- (info->ring_len * sizeof(struct i40e_tx_desc));
+ tx_ctx.head_wb_ena = info->headwb_enabled;
+ tx_ctx.head_wb_addr = info->dma_headwb_addr;
/* clear the context in the HMC */
ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -336,6 +353,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
rx_ctx.tphhead_ena = 1;
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -416,6 +434,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ /* Set VF bandwidth if specified */
+ if (vf->tx_rate) {
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ vf->tx_rate / 50, 0, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+ vf->vf_id, ret);
+ }
+
error_alloc_vsi_res:
return ret;
}
@@ -815,6 +842,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf);
pf->vf = NULL;
+ /* This check is for when the driver is unloaded while VFs are
+ * assigned. Setting the number of VFs to 0 through sysfs is caught
+ * before this function ever gets called.
+ */
if (!i40e_vfs_are_assigned(pf)) {
pci_disable_sriov(pf->pdev);
/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
@@ -867,6 +898,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
ret = -ENOMEM;
goto err_alloc;
}
+ pf->vf = vfs;
/* apply default profile */
for (i = 0; i < num_alloc_vfs; i++) {
@@ -876,13 +908,13 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+ vfs[i].spoofchk = true;
/* vf resources get allocated during reset */
i40e_reset_vf(&vfs[i], false);
/* enable vf vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
}
- pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
i40e_enable_pf_switch_lb(pf);
@@ -951,7 +983,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs)
return i40e_pci_sriov_enable(pdev, num_vfs);
- i40e_free_vfs(pf);
+ if (!i40e_vfs_are_assigned(pf)) {
+ i40e_free_vfs(pf);
+ } else {
+ dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -2022,16 +2059,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* delete the temporary mac address */
- i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+ i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+ true, false);
- /* add the new mac address */
- f = i40e_add_filter(vsi, mac, 0, true, false);
- if (!f) {
- dev_err(&pf->pdev->dev,
- "Unable to add VF ucast filter\n");
- ret = -ENOMEM;
- goto error_param;
- }
+ /* Delete all the filters for this VSI - we're going to kill it
+ * anyway.
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
@@ -2040,7 +2075,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ret = -EIO;
goto error_param;
}
- memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ ether_addr_copy(vf->default_lan_addr.addr, mac);
vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
@@ -2088,18 +2123,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
- if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+ if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
+ /* Administrator Error - knock the VF offline until he does
+ * the right thing by reconfiguring his network correctly
+ * and then reloading the VF driver.
+ */
+ i40e_vc_disable_vf(pf, vf);
+ }
/* Check for condition where there was already a port VLAN ID
* filter set and now it is being deleted by setting it to zero.
+ * Additionally check for the condition where there was a port
+ * VLAN but now there is a new and different port VLAN being set.
* Before deleting all the old VLAN filters we must add new ones
* with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
* MAC addresses deleted.
*/
- if (!(vlan_id || qos) && vsi->info.pvid)
+ if ((!(vlan_id || qos) ||
+ (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+ vsi->info.pvid)
ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
if (vsi->info.pvid) {
@@ -2150,6 +2195,8 @@ error_pvid:
return ret;
}
+#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
+#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
/**
* i40e_ndo_set_vf_bw
* @netdev: network interface device structure
@@ -2158,9 +2205,76 @@ error_pvid:
*
* configure vf tx rate
**/
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int speed = 0;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (min_tx_rate) {
+ dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
+ min_tx_rate, vf_id);
+ return -EINVAL;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ switch (pf->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = 40000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = 10000;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = 1000;
+ break;
+ default:
+ break;
+ }
+
+ if (max_tx_rate > speed) {
+ dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
+ max_tx_rate, vf->vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
+ dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = 50;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+ I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
+ ret);
+ ret = -EIO;
+ goto error;
+ }
+ vf->tx_rate = max_tx_rate;
+error:
+ return ret;
}
/**
@@ -2200,10 +2314,18 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = vf->tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
I40E_VLAN_PRIORITY_SHIFT;
+ if (vf->link_forced == false)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up == true)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->spoofchk = vf->spoofchk;
ret = 0;
error_param:
@@ -2270,3 +2392,50 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
error_out:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @enable: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ **/
+int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vf = &(pf->vf[vf_id]);
+
+ if (enable == vf->spoofchk)
+ goto out;
+
+ vf->spoofchk = enable;
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (enable)
+ ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
+ ret);
+ ret = -EIO;
+ }
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 389c47f396d5..63e7e0d81ad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,8 +98,10 @@ struct i40e_vf {
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if vf link is forced */
+ bool spoofchk;
};
void i40e_free_vfs(struct i40e_pf *pf);
@@ -115,10 +117,12 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index e09be37a07a8..3a423836a565 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 5470ce95936e..eb67cce3e8f9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -28,6 +31,16 @@
#include "i40e_prototype.h"
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -276,8 +289,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static void i40e_config_asq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1,
@@ -286,6 +302,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ATQBAL1);
} else {
/* configure the transmit queue */
wr32(hw, I40E_PF_ATQBAH,
@@ -294,7 +311,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ATQBAL);
}
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -303,8 +327,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static void i40e_config_arq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1,
@@ -313,6 +340,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ARQBAL1);
} else {
/* configure the receive queue */
wr32(hw, I40E_PF_ARQBAH,
@@ -321,10 +349,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ARQBAL);
}
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -372,7 +407,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_asq_regs(hw);
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -429,7 +466,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_arq_regs(hw);
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -659,6 +698,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -786,6 +831,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -880,6 +928,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 8f72c31d95cc..e3472c62e155 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -87,6 +90,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 97662b6bd98a..e656ea7a7920 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -31,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
#define I40E_FW_API_VERSION_A0_MINOR 0x0000
struct i40e_aq_desc {
@@ -121,6 +124,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
i40e_aqc_opc_request_resource = 0x0008,
@@ -180,9 +184,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -205,6 +206,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -222,13 +224,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -270,8 +274,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
- i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -339,6 +341,14 @@ struct i40e_aqc_queue_shutdown {
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
@@ -678,7 +688,6 @@ struct i40e_aqc_add_get_update_vsi {
#define I40E_AQ_VSI_TYPE_PF 0x2
#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
__le32 addr_high;
__le32 addr_low;
};
@@ -1040,7 +1049,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- u8 reserved[10];
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1289,27 +1300,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1417,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4];
u8 tc_valid_bits;
- u8 reserved1;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags;
- u8 reserved2[17];
+ u8 reserved1[17];
u8 tc_bw_share_credits[8];
- u8 reserved3[96];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1490,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1539,6 +1539,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1551,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
- I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_MAX
};
@@ -1583,11 +1588,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1698,7 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
@@ -1747,14 +1750,21 @@ struct i40e_aqc_set_lb_mode {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
-/* Set PHY Reset command (0x0622) */
-struct i40e_aqc_set_phy_reset {
- u8 reset_flags;
-#define I40E_AQ_PHY_RESET_REQUEST 0x02
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
u8 reserved[15];
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1789,47 @@ struct i40e_aqc_nvm_update {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -1948,19 +1999,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
- u8 header_len; /* in DWords, 1 to 15 */
+ u8 reserved0[3];
u8 protocol_type;
-#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
-#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
-#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
- u8 variable_udp_length;
-#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
-#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
- u8 udp_key_index;
-#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
-#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
-#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
- u8 reserved[10];
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
index d8654fb9e525..8e6a6dd9212b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index ae084378faab..a43155afdbe2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -40,12 +43,10 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710:
- case I40E_DEV_ID_SFP_X710:
case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_KX_A:
case I40E_DEV_ID_KX_B:
case I40E_DEV_ID_KX_C:
- case I40E_DEV_ID_KX_D:
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
@@ -130,7 +131,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40evf_check_asq_alive(struct i40e_hw *hw)
{
- return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
}
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index cb97b3eed440..a2ad9a4e399d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -160,11 +163,6 @@ struct i40e_hmc_info {
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
- wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
- (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
@@ -223,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+ u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index 17e42ca26d0b..d6f762241537 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -53,6 +56,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 622f373b745d..21a91b14bf81 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 97ab8c2b76f8..849edcc2e398 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 30af953cf106..369839655818 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -1337,8 +1340,6 @@
#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1364,8 +1365,6 @@
#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1586,6 +1585,14 @@
#define I40E_GLLAN_TSOMSK_M 0x000442DC
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+
#define I40E_PFLAN_QALLOC 0x001C0400
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7c08cc2e339b..7fa7a41915c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b9f50f40abe1..48ebb6cd69f2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -725,10 +728,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u32 rx_error,
u16 rx_ptype)
{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+ bool ipv4 = false, ipv6 = false;
bool ipv4_tunnel, ipv6_tunnel;
__wsum rx_udp_csum;
- __sum16 csum;
struct iphdr *iph;
+ __sum16 csum;
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -739,29 +744,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
- rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+ ipv4 = true;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ ipv6 = true;
+
+ if (ipv4 &&
+ (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
/* likely incorrect csum if alternate IP extension headers found */
- if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ if (ipv6 &&
+ decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
+ rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
return;
- /* IP or L4 or outmost IP checksum error */
- if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
- vsi->back->hw_csum_rx_error++;
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- }
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ */
if (ipv4_tunnel &&
+ (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
- /* If VXLAN traffic has an outer UDPv4 checksum we need to check
- * it in the driver, hardware does not do it for us.
- * Since L3L4P bit was set we assume a valid IHL value (>=5)
- * so the total length of IPv4 header is IHL*4 bytes
- */
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -778,13 +811,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum) {
- vsi->back->hw_csum_rx_error++;
- return;
- }
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
}
/**
@@ -953,6 +989,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
goto next_desc;
}
@@ -1508,9 +1547,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
{
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f;
-#endif
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -1519,12 +1556,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 10bf49e18d7f..30d248bc5d19 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -24,7 +27,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
-/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -66,16 +69,11 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -119,11 +117,11 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+#define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -180,7 +178,6 @@ enum i40e_ring_state_t {
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
- __I40E_RX_LRO_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -196,12 +193,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define ring_is_lro_enabled(ring) \
- test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define set_ring_lro_enabled(ring) \
- set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define clear_ring_lro_enabled(ring) \
- clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 4673b3381edd..d3cf5a69de54 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -32,13 +35,11 @@
#include "i40e_lan_hmc.h"
/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_SFP_X710 0x1573
+#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_A 0x157F
#define I40E_DEV_ID_KX_B 0x1580
#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_KX_D 0x1582
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
@@ -57,8 +58,8 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
-#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
/* forward declaration */
struct i40e_hw;
@@ -101,15 +102,6 @@ enum i40e_debug_mask {
I40E_DEBUG_ALL = 0xFFFFFFFF
};
-/* PCI Bus Info */
-#define I40E_PCI_LINK_WIDTH_1 0x10
-#define I40E_PCI_LINK_WIDTH_2 0x20
-#define I40E_PCI_LINK_WIDTH_4 0x40
-#define I40E_PCI_LINK_WIDTH_8 0x80
-#define I40E_PCI_LINK_SPEED_2500 0x1
-#define I40E_PCI_LINK_SPEED_5000 0x2
-#define I40E_PCI_LINK_SPEED_8000 0x3
-
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
@@ -173,6 +165,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -415,6 +410,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
@@ -494,9 +490,6 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */
};
-#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
-
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -513,9 +506,14 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+ << I40E_RXD_QW1_STATUS_SHIFT)
+
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -543,7 +541,8 @@ enum i40e_rx_desc_error_bits {
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
- I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -664,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
- I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -868,18 +866,14 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ /* Note: Values 0-30 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ /* Note: Value 32 is reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ /* Note: Values 37-40 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -961,6 +955,16 @@ struct i40e_vsi_context {
struct i40e_aqc_vsi_properties_data info;
};
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats {
u64 rx_bytes; /* gorc */
@@ -968,8 +972,6 @@ struct i40e_eth_stats {
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
- u64 rx_errors; /* repc */
- u64 rx_missed; /* rmpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
@@ -1021,9 +1023,12 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
/* EEE LPI */
- bool tx_lpi_status;
- bool rx_lpi_status;
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
u64 tx_lpi_count; /* etlpic */
u64 rx_lpi_count; /* erlpic */
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index ccf45d04b7ef..cd18d5689006 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -338,10 +341,6 @@ struct i40e_virtchnl_pf_event {
int severity;
};
-/* The following are TBD, not necessary for LAN functionality.
- * I40E_VIRTCHNL_OP_FCOE
- */
-
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 807807d62387..30ef519d4b91 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -77,7 +80,7 @@ struct i40e_vsi {
#define I40EVF_MIN_TXD 64
#define I40EVF_MAX_RXD 4096
#define I40EVF_MIN_RXD 64
-#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
@@ -193,10 +196,12 @@ struct i40evf_adapter {
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
u32 tx_timeout_count;
struct list_head mac_filter_list;
+ u32 tx_desc_count;
/* RX */
struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
u64 hw_csum_rx_error;
+ u32 rx_desc_count;
int num_msix_vectors;
struct msix_entry *msix_entries;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 8b0db1ce179c..60407a9df0c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -44,8 +47,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
I40EVF_STAT("rx_discards", current_stats.rx_discards),
- I40EVF_STAT("rx_errors", current_stats.rx_errors),
- I40EVF_STAT("rx_missed", current_stats.rx_missed),
I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
@@ -56,10 +57,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
};
#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
-#define I40EVF_QUEUE_STATS_LEN \
+#define I40EVF_QUEUE_STATS_LEN(_dev) \
(((struct i40evf_adapter *) \
- netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
-#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+ netdev_priv(_dev))->vsi_res->num_queue_pairs \
+ * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
+#define I40EVF_STATS_LEN(_dev) \
+ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
/**
* i40evf_get_settings - Get Link Speed and Duplex settings
@@ -75,7 +78,7 @@ static int i40evf_get_settings(struct net_device *netdev,
/* In the future the VF will be able to query the PF for
* some information - for now use a dummy value
*/
- ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->supported = 0;
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = PORT_NONE;
@@ -94,9 +97,9 @@ static int i40evf_get_settings(struct net_device *netdev,
static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
- return I40EVF_STATS_LEN;
+ return I40EVF_STATS_LEN(netdev);
else
- return -ENOTSUPP;
+ return -EINVAL;
}
/**
@@ -219,13 +222,11 @@ static void i40evf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_ring *tx_ring = adapter->tx_rings[0];
- struct i40e_ring *rx_ring = adapter->rx_rings[0];
ring->rx_max_pending = I40EVF_MAX_RXD;
ring->tx_max_pending = I40EVF_MAX_TXD;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_desc_count;
+ ring->tx_pending = adapter->tx_desc_count;
}
/**
@@ -241,7 +242,6 @@ static int i40evf_set_ringparam(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
- int i;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -257,17 +257,16 @@ static int i40evf_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == adapter->tx_rings[0]->count) &&
- (new_rx_count == adapter->rx_rings[0]->count))
+ if ((new_tx_count == adapter->tx_desc_count) &&
+ (new_rx_count == adapter->rx_desc_count))
return 0;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
- adapter->tx_rings[0]->count = new_tx_count;
- adapter->rx_rings[0]->count = new_rx_count;
- }
+ adapter->tx_desc_count = new_tx_count;
+ adapter->rx_desc_count = new_rx_count;
if (netif_running(netdev))
i40evf_reinit_locked(adapter);
+
return 0;
}
@@ -290,14 +289,13 @@ static int i40evf_get_coalesce(struct net_device *netdev,
ec->rx_max_coalesced_frames = vsi->work_limit;
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
- ec->rx_coalesce_usecs = 1;
- else
- ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+ ec->use_adaptive_rx_coalesce = 1;
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- ec->tx_coalesce_usecs = 1;
- else
- ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
@@ -318,54 +316,361 @@ static int i40evf_set_coalesce(struct net_device *netdev,
struct i40e_q_vector *q_vector;
int i;
- if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
- vsi->work_limit = ec->tx_max_coalesced_frames;
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+ if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ else
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ else if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ else
+ return -EINVAL;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
- switch (ec->rx_coalesce_usecs) {
- case 0:
- vsi->rx_itr_setting = 0;
+ for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* We always hash on IP src and dest addresses */
+ cmd->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
- case 1:
- vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
- | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ case UDP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
- default:
- if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ break;
+
+ case TCP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
+ case UDP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ cmd->data = 0;
+ return -EINVAL;
}
- switch (ec->tx_coalesce_usecs) {
- case 0:
- vsi->tx_itr_setting = 0;
+ return 0;
+}
+
+/**
+ * i40evf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->vsi_res->num_queue_pairs;
+ ret = 0;
break;
- case 1:
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
- | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ case ETHTOOL_GRXFH:
+ ret = i40evf_get_rss_hash_opts(adapter, cmd);
break;
default:
- if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ default:
return -EINVAL;
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ }
+ break;
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ default:
+ return -EINVAL;
+ }
break;
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
}
- for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
- q_vector = adapter->q_vector[i];
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
- i40e_flush(hw);
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40evf_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void i40evf_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
+
+ ch->max_other = NONQ_VECS;
+ ch->other_count = NONQ_VECS;
+
+ ch->combined_count = adapter->vsi_res->num_queue_pairs;
+}
+
+/**
+ * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+}
+
+/**
+ * i40evf_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+ indir[j++] = hlut_val & 0xff;
+ indir[j++] = (hlut_val >> 8) & 0xff;
+ indir[j++] = (hlut_val >> 16) & 0xff;
+ indir[j++] = (hlut_val >> 24) & 0xff;
+ }
+ return 0;
+}
+
+/**
+ * i40evf_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+ hlut_val = indir[j++];
+ hlut_val |= indir[j++] << 8;
+ hlut_val |= indir[j++] << 16;
+ hlut_val |= indir[j++] << 24;
+ wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
}
return 0;
}
-static struct ethtool_ops i40evf_ethtool_ops = {
+static const struct ethtool_ops i40evf_ethtool_ops = {
.get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -378,6 +683,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
+ .get_rxnfc = i40evf_get_rxnfc,
+ .set_rxnfc = i40evf_set_rxnfc,
+ .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
+ .get_rxfh = i40evf_get_rxfh,
+ .set_rxfh = i40evf_set_rxfh,
+ .get_channels = i40evf_get_channels,
};
/**
@@ -389,5 +700,5 @@ static struct ethtool_ops i40evf_ethtool_ops = {
**/
void i40evf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+ netdev->ethtool_ops = &i40evf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 2797548fde0d..7fc5f3b5d6bf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -25,13 +28,15 @@
#include "i40e_prototype.h"
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.16"
+#define DRV_VERSION "0.9.34"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -167,7 +172,6 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
- dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
@@ -657,12 +661,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
f = i40evf_find_vlan(adapter, vlan);
if (NULL == f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (NULL == f) {
- dev_info(&adapter->pdev->dev,
- "%s: no memory for new VLAN filter\n",
- __func__);
+ if (NULL == f)
return NULL;
- }
+
f->vlan = vlan;
INIT_LIST_HEAD(&f->list);
@@ -688,7 +689,6 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
}
- return;
}
/**
@@ -767,14 +767,12 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
if (NULL == f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (NULL == f) {
- dev_info(&adapter->pdev->dev,
- "%s: no memory for new filter\n", __func__);
clear_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section);
return NULL;
}
- memcpy(f->macaddr, macaddr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, macaddr);
list_add(&f->list, &adapter->mac_filter_list);
f->add = true;
@@ -807,9 +805,8 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
- memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ ether_addr_copy(hw->mac.addr, addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
}
return (f == NULL) ? -ENOMEM : 0;
@@ -841,7 +838,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
bool found = false;
- if (f->macaddr[0] & 0x01) {
+ if (is_multicast_ether_addr(f->macaddr)) {
netdev_for_each_mc_addr(mca, netdev) {
if (ether_addr_equal(mca->addr, f->macaddr)) {
found = true;
@@ -970,6 +967,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40evf_mac_filter *f;
+ if (adapter->state == __I40EVF_DOWN)
+ return;
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -1027,30 +1027,21 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err) /* Success in acquiring all requested vectors. */
- break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
-
- if (vectors < vector_threshold) {
- dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
+ err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
+ if (err < 0) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = -EIO;
- } else {
- /* Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NONQ_VECS, or the number of
- * vectors we were allocated.
- */
- adapter->num_msix_vectors = vectors;
+ return err;
}
- return err;
+
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NONQ_VECS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = err;
+ return 0;
}
/**
@@ -1096,14 +1087,14 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->queue_index = i;
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
- tx_ring->count = I40EVF_DEFAULT_TXD;
+ tx_ring->count = adapter->tx_desc_count;
adapter->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
- rx_ring->count = I40EVF_DEFAULT_RXD;
+ rx_ring->count = adapter->rx_desc_count;
adapter->rx_rings[i] = rx_ring;
}
@@ -1141,9 +1132,6 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
- /* A failure in MSI-X entry allocation isn't fatal, but it does
- * mean we disable MSI-X capabilities of the adapter.
- */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->msix_entries) {
@@ -1183,7 +1171,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
- i40evf_napi_poll, 64);
+ i40evf_napi_poll, NAPI_POLL_WEIGHT);
adapter->q_vector[q_idx] = q_vector;
}
@@ -1236,8 +1224,6 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
-
- return;
}
/**
@@ -1309,7 +1295,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
- dev_info(&adapter->pdev->dev, "Checking for redemption\n");
if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1340,8 +1325,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
(rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
adapter->state = __I40EVF_RESETTING;
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
- dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
- dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+ dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
schedule_work(&adapter->reset_task);
adapter->aq_pending = 0;
adapter->aq_required = 0;
@@ -1413,7 +1397,7 @@ restart_watchdog:
}
/**
- * i40evf_configure_rss - increment to next available tx queue
+ * next_queue - increment to next available tx queue
* @adapter: board private structure
* @j: queue counter
*
@@ -1504,15 +1488,12 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val != I40E_VFR_VFACTIVE) {
- dev_info(&adapter->pdev->dev, "Reset now occurring\n");
+ if (rstat_val != I40E_VFR_VFACTIVE)
break;
- } else {
+ else
msleep(I40EVF_RESET_WAIT_MS);
- }
}
if (i == I40EVF_RESET_WAIT_COUNT) {
- dev_err(&adapter->pdev->dev, "Reset was not detected\n");
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
goto continue_reset; /* act like the reset happened */
}
@@ -1521,22 +1502,24 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val == I40E_VFR_VFACTIVE) {
- dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
+ if (rstat_val == I40E_VFR_VFACTIVE)
break;
- } else {
+ else
msleep(I40EVF_RESET_WAIT_MS);
- }
}
if (i == I40EVF_RESET_WAIT_COUNT) {
/* reset never finished */
- dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
+ dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
rstat_val);
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev))
- i40evf_close(adapter->netdev);
-
+ if (netif_running(adapter->netdev)) {
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+ }
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1591,7 +1574,7 @@ continue_reset:
}
return;
reset_err:
- dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(adapter->netdev);
}
@@ -1607,6 +1590,7 @@ static void i40evf_adminq_task(struct work_struct *work)
struct i40e_arq_event_info event;
struct i40e_virtchnl_msg *v_msg;
i40e_status ret;
+ u32 val, oldval;
u16 pending;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
@@ -1614,11 +1598,9 @@ static void i40evf_adminq_task(struct work_struct *work)
event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
- if (!event.msg_buf) {
- dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
- __func__);
+ if (!event.msg_buf)
return;
- }
+
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
do {
ret = i40evf_clean_arq_element(hw, &event, &pending);
@@ -1636,6 +1618,41 @@ static void i40evf_adminq_task(struct work_struct *work)
}
} while (pending);
+ /* check for error indications */
+ val = rd32(hw, hw->aq.arq.len);
+ oldval = val;
+ if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.arq.len, val);
+
+ val = rd32(hw, hw->aq.asq.len);
+ oldval = val;
+ if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.asq.len, val);
+
/* re-enable Admin queue interrupt cause */
i40evf_misc_irq_enable(adapter);
@@ -1673,6 +1690,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->tx_rings[i]->count = adapter->tx_desc_count;
err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
if (!err)
continue;
@@ -1700,6 +1718,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->rx_rings[i]->count = adapter->rx_desc_count;
err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
if (!err)
continue;
@@ -1804,12 +1823,11 @@ static int i40evf_close(struct net_device *netdev)
if (adapter->state <= __I40EVF_DOWN)
return 0;
- /* signal that we are down to the interrupt handler */
- adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_down(adapter);
+ adapter->state = __I40EVF_DOWN;
i40evf_free_traffic_irqs(adapter);
i40evf_free_all_tx_resources(adapter);
@@ -1848,8 +1866,6 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
WARN_ON(in_interrupt());
- adapter->state = __I40EVF_RESETTING;
-
i40evf_down(adapter);
/* allocate transmit descriptors */
@@ -1872,7 +1888,7 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
return;
err_reinit:
- dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(netdev);
}
@@ -1967,7 +1983,7 @@ static void i40evf_init_task(struct work_struct *work)
}
err = i40evf_check_reset_complete(hw);
if (err) {
- dev_err(&pdev->dev, "Device is still in reset (%d)\n",
+ dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
err);
goto err;
}
@@ -1993,14 +2009,14 @@ static void i40evf_init_task(struct work_struct *work)
break;
case __I40EVF_INIT_VERSION_CHECK:
if (!i40evf_asq_done(hw)) {
- dev_err(&pdev->dev, "Admin queue command never completed.\n");
+ dev_err(&pdev->dev, "Admin queue command never completed\n");
goto err;
}
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
+ dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
err);
goto err;
}
@@ -2074,12 +2090,12 @@ static void i40evf_init_task(struct work_struct *work)
netdev->hw_features &= ~NETIF_F_RXCSUM;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
- dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
adapter->hw.mac.addr);
random_ether_addr(adapter->hw.mac.addr);
}
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2087,7 +2103,7 @@ static void i40evf_init_task(struct work_struct *work)
if (NULL == f)
goto err_sw_init;
- memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
@@ -2098,6 +2114,8 @@ static void i40evf_init_task(struct work_struct *work)
adapter->watchdog_timer.data = (unsigned long)adapter;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
+ adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
err = i40evf_init_interrupt_scheme(adapter);
if (err)
goto err_sw_init;
@@ -2114,8 +2132,10 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
- adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
adapter->vsi.netdev = adapter->netdev;
if (!adapter->netdev_registered) {
@@ -2128,7 +2148,7 @@ static void i40evf_init_task(struct work_struct *work)
netif_tx_stop_all_queues(netdev);
- dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr);
+ dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
@@ -2152,12 +2172,11 @@ err_alloc:
err:
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
- dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
+ dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
return; /* do not reschedule */
}
schedule_delayed_work(&adapter->init_task, HZ * 3);
- return;
}
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e294f012647d..2dc0bac76717 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -216,11 +219,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
vqci = kzalloc(len, GFP_ATOMIC);
- if (!vqci) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vqci)
return;
- }
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -232,6 +233,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i]->count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+ vqpi->txq.headwb_enabled = 1;
+ vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
+ (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
@@ -329,11 +333,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
(adapter->num_msix_vectors *
sizeof(struct i40e_virtchnl_vector_map));
vimi = kzalloc(len, GFP_ATOMIC);
- if (!vimi) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vimi)
return;
- }
vimi->num_vectors = adapter->num_msix_vectors;
/* Queue vectors first */
@@ -390,7 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -399,16 +400,14 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
veal = kzalloc(len, GFP_ATOMIC);
- if (!veal) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!veal)
return;
- }
+
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add) {
- memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ ether_addr_copy(veal->list[i].addr, f->macaddr);
i++;
f->add = false;
}
@@ -454,7 +453,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -462,16 +461,14 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE;
}
veal = kzalloc(len, GFP_ATOMIC);
- if (!veal) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!veal)
return;
- }
+
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove) {
- memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ ether_addr_copy(veal->list[i].addr, f->macaddr);
i++;
list_del(&f->list);
kfree(f);
@@ -518,7 +515,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -526,11 +523,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE;
}
vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vvfl)
return;
- }
+
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
@@ -580,7 +575,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -588,11 +583,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE;
}
vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vvfl)
return;
- }
+
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
@@ -721,7 +714,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return;
}
if (v_opcode != adapter->current_op) {
- dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
+ dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
__func__, adapter->current_op, v_opcode);
/* We're probably completely screwed at this point, but clear
* the current op and try to carry on....
@@ -730,7 +723,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return;
}
if (v_retval) {
- dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
__func__, v_retval, v_opcode);
}
switch (v_opcode) {
@@ -745,9 +738,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
stats->tx_broadcast;
adapter->net_stats.rx_bytes = stats->rx_bytes;
adapter->net_stats.tx_bytes = stats->tx_bytes;
- adapter->net_stats.rx_errors = stats->rx_errors;
adapter->net_stats.tx_errors = stats->tx_errors;
- adapter->net_stats.rx_dropped = stats->rx_missed;
+ adapter->net_stats.rx_dropped = stats->rx_discards;
adapter->net_stats.tx_dropped = stats->tx_discards;
adapter->current_stats = *stats;
}
@@ -781,7 +773,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
break;
default:
- dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
__func__, v_opcode);
break;
} /* switch v_opcode */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fa36fe12e775..a2db388cc31e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_82575
* e1000_82576
@@ -73,9 +70,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
-static const u16 e1000_82580_rxpbs_table[] =
- { 36, 72, 144, 1, 2, 4, 8, 16,
- 35, 70, 140 };
+static const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
/**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -159,7 +155,7 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
ret_val = igb_check_for_link_82575(hw);
}
- return E1000_SUCCESS;
+ return 0;
}
/**
@@ -526,7 +522,7 @@ out:
static s32 igb_get_invariants_82575(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
s32 ret_val;
u32 ctrl_ext = 0;
u32 link_mode = 0;
@@ -1008,7 +1004,6 @@ out:
static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1032,7 +1027,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
data &= ~E1000_82580_PM_SPD; }
wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return 0;
}
/**
@@ -1052,7 +1047,6 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1077,7 +1071,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
}
wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return 0;
}
/**
@@ -1180,8 +1174,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore(hw) != 0);
- /* Empty */
+ while (igb_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
@@ -1203,7 +1197,6 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
- s32 ret_val = 0;
u32 mask = E1000_NVM_CFG_DONE_PORT_0;
if (hw->bus.func == 1)
@@ -1216,7 +1209,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
while (timeout) {
if (rd32(E1000_EEMNGCTL) & mask)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout--;
}
if (!timeout)
@@ -1227,7 +1220,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_igp_3))
igb_phy_init_script_igp3(hw);
- return ret_val;
+ return 0;
}
/**
@@ -1269,7 +1262,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
- &duplex);
+ &duplex);
/* Use this flag to determine if link needs to be checked or
* not. If we have link clear the flag so that we do not
* continue to check for link.
@@ -1316,7 +1309,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
@@ -1411,7 +1404,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -1436,9 +1429,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* set the completion timeout for interface */
ret_val = igb_set_pcie_completion_timeout(hw);
- if (ret_val) {
+ if (ret_val)
hw_dbg("PCI-E Set completion timeout has failed.\n");
- }
hw_dbg("Masking off all interrupts\n");
wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1439,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 20000);
ctrl = rd32(E1000_CTRL);
@@ -1622,7 +1614,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u16 data;
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
@@ -1676,7 +1668,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw->mac.type == e1000_82576) {
ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
if (ret_val) {
- printk(KERN_DEBUG "NVM Read Error\n\n");
+ hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
return ret_val;
}
@@ -1689,7 +1681,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
* link either autoneg or be forced to 1000/Full
*/
ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
- E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
/* set speed of 1000/Full if speed/duplex is forced */
reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1917,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
- msleep(1);
+ usleep_range(1000, 2000);
rx_enabled = 0;
for (i = 0; i < 4; i++)
rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1945,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
wr32(E1000_RCTL, temp_rctl);
wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
wrfl();
- msleep(2);
+ usleep_range(2000, 3000);
/* Enable RX queues that were previously enabled and restore our
* previous state
@@ -2005,14 +1997,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
* 16ms to 55ms
*/
ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
if (ret_val)
goto out;
pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2233,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Determine whether or not a global dev reset is requested */
if (global_device_reset &&
@@ -2259,7 +2251,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Add delay to insure DEV_RST has time to complete */
if (global_device_reset)
- msleep(5);
+ usleep_range(5000, 6000);
ret_val = igb_get_auto_rd_done(hw);
if (ret_val) {
@@ -2436,8 +2428,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
goto out;
}
@@ -2447,8 +2438,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
&nvm_data);
if (ret_val) {
- hw_dbg("NVM Write Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
goto out;
}
}
@@ -2525,7 +2515,7 @@ out:
static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
if (ret_val)
@@ -2559,7 +2549,6 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
**/
s32 igb_set_eee_i350(struct e1000_hw *hw)
{
- s32 ret_val = 0;
u32 ipcnfg, eeer;
if ((hw->mac.type < e1000_i350) ||
@@ -2593,7 +2582,7 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
rd32(E1000_EEER);
out:
- return ret_val;
+ return 0;
}
/**
@@ -2720,7 +2709,6 @@ static const u8 e1000_emc_therm_limit[4] = {
**/
static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2738,7 +2726,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2762,7 +2750,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
E1000_I2C_THERMAL_SENSOR_ADDR,
&data->sensor[i].temp);
}
- return status;
+ return 0;
}
/**
@@ -2774,7 +2762,6 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
**/
static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2800,7 +2787,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2831,7 +2818,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
low_thresh_delta;
}
}
- return status;
+ return 0;
}
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 09d78be72416..b407c55738fa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_DEF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_ON2))
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
#define E1000_EICR_TX_QUEUE ( \
- E1000_EICR_TX_QUEUE0 | \
- E1000_EICR_TX_QUEUE1 | \
- E1000_EICR_TX_QUEUE2 | \
- E1000_EICR_TX_QUEUE3)
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
#define E1000_EICR_RX_QUEUE ( \
- E1000_EICR_RX_QUEUE0 | \
- E1000_EICR_RX_QUEUE1 | \
- E1000_EICR_RX_QUEUE2 | \
- E1000_EICR_RX_QUEUE3)
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
struct {
struct {
__le16 pkt_info; /* RSS type, Packet type */
- __le16 hdr_info; /* Split Header,
- * header buffer length */
+ __le16 hdr_info; /* Split Head, buf len */
} lo_dword;
union {
__le32 rss; /* RSS Hash */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index b05bf925ac72..2a8bb35c2df2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -101,11 +98,11 @@
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -307,39 +304,34 @@
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
/* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
- * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
- * Threshold */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
#define E1000_DMACR_DMACTHR_SHIFT 16
-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
- * transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
#define E1000_DMACR_DMAC_LX_SHIFT 28
#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
/* DMA Coalescing BMC-to-OS Watchdog Enable */
#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
- * Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
- * Threshold */
-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
- * current window */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
- * Current Cnt */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
- * High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
#define E1000_FCRTC_RTH_COAL_SHIFT 4
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
/* Timestamp in Rx buffer */
#define E1000_RXPBS_CFG_TS_EN 0x80000000
+#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -406,12 +398,12 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC | \
- E1000_IMS_DOUTSYNC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC | \
+ E1000_IMS_DOUTSYNC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -467,7 +459,6 @@
#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
-#define E1000_SUCCESS 0
#define E1000_ERR_NVM 1
#define E1000_ERR_PHY 2
#define E1000_ERR_CONFIG 3
@@ -1011,8 +1002,7 @@
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
- on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 10741d170f2d..89925e405849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
#include "e1000_mbx.h"
struct e1000_mac_operations {
- s32 (*check_for_link)(struct e1000_hw *);
- s32 (*reset_hw)(struct e1000_hw *);
- s32 (*init_hw)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
- s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8 *, u32);
- s32 (*read_mac_addr)(struct e1000_hw *);
- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
#ifdef CONFIG_IGB_HWMON
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
};
struct e1000_phy_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*check_polarity)(struct e1000_hw *);
- s32 (*check_reset_block)(struct e1000_hw *);
- s32 (*force_speed_duplex)(struct e1000_hw *);
- s32 (*get_cfg_done)(struct e1000_hw *hw);
- s32 (*get_cable_length)(struct e1000_hw *);
- s32 (*get_phy_info)(struct e1000_hw *);
- s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_phy_info)(struct e1000_hw *);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*reset)(struct e1000_hw *);
- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
- s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
struct e1000_nvm_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
- s32 (*update)(struct e1000_hw *);
- s32 (*validate)(struct e1000_hw *);
- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
#define E1000_MAX_SENSORS 3
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index f67f8a170b90..337161f440dd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_i210
* e1000_i211
@@ -100,7 +97,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
return -E1000_ERR_NVM;
}
- return E1000_SUCCESS;
+ return 0;
}
/**
@@ -142,7 +139,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
while (i < timeout) {
@@ -187,7 +184,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ while (igb_get_hw_semaphore_i210(hw))
; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
@@ -210,7 +207,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
@@ -220,7 +217,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
status = igb_read_nvm_eerd(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
@@ -228,7 +225,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
status = E1000_ERR_SWFW_SYNC;
}
- if (status != E1000_SUCCESS)
+ if (status)
break;
}
@@ -253,7 +250,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, k, eewr = 0;
u32 attempts = 100000;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
@@ -275,13 +272,13 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
for (k = 0; k < attempts; k++) {
if (E1000_NVM_RW_REG_DONE &
rd32(E1000_SRWR)) {
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
break;
}
udelay(5);
}
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw_dbg("Shadow RAM write EEWR timed out\n");
break;
}
@@ -310,7 +307,7 @@ out:
static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
@@ -320,7 +317,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
status = igb_write_nvm_srwr(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
@@ -328,7 +325,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
status = E1000_ERR_SWFW_SYNC;
}
- if (status != E1000_SUCCESS)
+ if (status)
break;
}
@@ -367,12 +364,12 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
hw_dbg("Read INVM Word 0x%02x = %x\n",
address, *data);
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
}
- if (status != E1000_SUCCESS)
+ if (status)
hw_dbg("Requested word 0x%02x not found in OTP\n", address);
return status;
}
@@ -388,7 +385,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
u16 words __always_unused, u16 *data)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
@@ -398,43 +395,44 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
&data[1]);
ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
&data[2]);
- if (ret_val != E1000_SUCCESS)
+ if (ret_val)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_INIT_CTRL_4:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_LED_1_CFG:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_LED_0_2_CFG:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_ID_LED_SETTINGS:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = ID_LED_RESERVED_FFFF;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
+ break;
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
@@ -488,14 +486,14 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have first version location used */
if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
version = 0;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have second version location used */
else if ((i == 1) &&
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have odd version location
@@ -506,7 +504,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
(i != 1))) {
version = (*next_record & E1000_INVM_VER_FIELD_TWO)
>> 13;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have even version location
@@ -515,12 +513,12 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
((*record & 0x3) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
- if (status == E1000_SUCCESS) {
+ if (!status) {
invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
>> E1000_INVM_MAJOR_SHIFT;
invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
@@ -533,7 +531,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have image type in first location used */
if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
invm_ver->invm_img_type = 0;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have image type in first location used */
@@ -542,7 +540,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
((((*record & 0x3) != 0) && (i != 1)))) {
invm_ver->invm_img_type =
(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
@@ -558,10 +556,10 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
**/
static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
/* Replace the read function with semaphore grabbing with
* the one that skips this for a while.
@@ -593,7 +591,7 @@ static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
**/
static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u16 checksum = 0;
u16 i, nvm_data;
@@ -602,12 +600,12 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
* EEPROM read fails
*/
ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw_dbg("EEPROM read failed\n");
goto out;
}
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
* because we do not want to take the synchronization
* semaphores twice here.
@@ -625,7 +623,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
checksum = (u16) NVM_SUM - checksum;
ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Write Error while updating checksum.\n");
goto out;
@@ -654,7 +652,7 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
reg = rd32(E1000_EECD);
if (reg & E1000_EECD_FLUDONE_I210) {
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
break;
}
udelay(5);
@@ -687,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
**/
static s32 igb_update_flash_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u32 flup;
ret_val = igb_pool_flash_update_done_i210(hw);
@@ -700,7 +698,7 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
wr32(E1000_EECD, flup);
ret_val = igb_pool_flash_update_done_i210(hw);
- if (ret_val == E1000_SUCCESS)
+ if (ret_val)
hw_dbg("Flash update complete\n");
else
hw_dbg("Flash update time out\n");
@@ -753,7 +751,7 @@ out:
static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
u8 dev_addr, u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 907fe99a9813..9f34976687ba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1e0c404db81a..2a88595f956c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* The caller must have a packed mc_addr_list of multicast addresses.
**/
void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value, hash_bit, hash_reg;
int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg "
- "has not completed.\n");
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
goto out;
}
@@ -1265,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
while (i < AUTO_READ_DONE_TIMEOUT) {
if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
break;
- msleep(1);
+ usleep_range(1000, 2000);
i++;
}
@@ -1298,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
}
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
- switch(hw->phy.media_type) {
+ switch (hw->phy.media_type) {
case e1000_media_type_internal_serdes:
*data = ID_LED_DEFAULT_82575_SERDES;
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 99299ba8ee3a..ea24961b0d70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index d5b121771c31..162cc49345d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000_mbx.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index f52f5515e5a8..d20af6b2f581 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 9abf82919c65..e8280d0d7f02 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
igb_shift_out_eec_bits(hw, word_out, 16);
widx++;
@@ -801,5 +798,4 @@ etrack_id:
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
| eeprom_verl;
}
- return;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 5b101170b17e..febc9cdb7391 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
- u32 part_num_size);
+ u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4009bbab7407..c1bb64d8366f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
ret_val = igb_wait_autoneg(hw);
if (ret_val) {
- hw_dbg("Error while waiting for "
- "autoneg to complete\n");
+ hw_dbg("Error while waiting for autoneg to complete\n");
goto out;
}
}
@@ -2208,16 +2204,10 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
void igb_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg &= ~GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
@@ -2231,20 +2221,12 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
void igb_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* i210 Phy requires an additional bit for power up/down */
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg |= GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c2c36c46a73..7af4ffab0285 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
@@ -154,7 +151,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define GS40G_MAC_LB 0x4140
#define GS40G_MAC_SPEED_1G 0X0006
#define GS40G_COPPER_SPEC 0x0010
-#define GS40G_CS_POWER_DOWN 0x0002
#define GS40G_LINE_LB 0x4000
/* SFP modules ID memory locations */
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index bdb246e848e1..1cc4b1a7e597 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
@@ -195,6 +192,10 @@
: (0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
: (0x0E03C + ((_n) * 0x40)))
+
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+
#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -301,9 +302,9 @@
#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x054E0 + ((_i - 16) * 8)))
+ (0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x054E4 + ((_i - 16) * 8)))
+ (0x054E4 + ((_i - 16) * 8)))
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -358,8 +359,7 @@
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
- * Filter - RW */
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
struct e1000_hw;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 27130065d92a..06102d1f7c03 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e5570acbeea8..c737d1f40838 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* ethtool support for igb */
@@ -144,6 +141,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
u32 status;
+ u32 speed;
status = rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
@@ -218,13 +216,13 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (status & E1000_STATUS_LU) {
if ((status & E1000_STATUS_2P5_SKU) &&
!(status & E1000_STATUS_2P5_SKU_OVER)) {
- ecmd->speed = SPEED_2500;
+ speed = SPEED_2500;
} else if (status & E1000_STATUS_SPEED_1000) {
- ecmd->speed = SPEED_1000;
+ speed = SPEED_1000;
} else if (status & E1000_STATUS_SPEED_100) {
- ecmd->speed = SPEED_100;
+ speed = SPEED_100;
} else {
- ecmd->speed = SPEED_10;
+ speed = SPEED_10;
}
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
@@ -232,9 +230,10 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
+ ethtool_cmd_speed_set(ecmd, speed);
if ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg)
ecmd->autoneg = AUTONEG_ENABLE;
@@ -286,7 +285,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
@@ -399,7 +398,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +885,7 @@ static int igb_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1059,8 @@ static struct igb_reg_test reg_test_i350[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1102,8 @@ static struct igb_reg_test reg_test_82580[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1131,10 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
/* Enable all RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1150,14 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 }
};
@@ -1170,7 +1171,8 @@ static struct igb_reg_test reg_test_82575[] = {
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
/* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82575, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1198,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 pat, val;
- static const u32 _test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 _test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
@@ -1206,11 +1208,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1220,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 val;
+
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
dev_err(&adapter->pdev->dev,
- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
- (val & mask), (write & mask));
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1390,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Hook up test interrupt handler just for this test */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
@@ -1412,7 +1415,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Define all writable bits for ICS */
switch (hw->mac.type) {
@@ -1459,7 +1462,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1481,7 +1484,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1503,7 +1506,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, ~mask);
wr32(E1000_ICS, ~mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 5;
@@ -1515,7 +1518,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Unhook test interrupt handler */
if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1664,8 +1667,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
- (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
-
+ (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
/* Enable DH89xxCC MPHY for near end loopback */
reg = rd32(E1000_MPHY_ADDR_CTL);
reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
@@ -1949,6 +1952,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -2413,9 +2417,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2425,9 +2431,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V6_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2730,7 +2738,7 @@ static int igb_get_module_info(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
+ u32 status = 0;
u16 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -2740,12 +2748,12 @@ static int igb_get_module_info(struct net_device *netdev,
/* Check whether we support SFF-8472 or not */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
- if (status != E1000_SUCCESS)
+ if (status)
return -EIO;
/* addressing mode is not supported */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
- if (status != E1000_SUCCESS)
+ if (status)
return -EIO;
/* addressing mode is not supported */
@@ -2772,7 +2780,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
+ u32 status = 0;
u16 *dataword;
u16 first_word, last_word;
int i = 0;
@@ -2791,7 +2799,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
- if (status != E1000_SUCCESS) {
+ if (status) {
/* Error occurred while reading module */
kfree(dataword);
return -EIO;
@@ -2824,7 +2832,7 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
return IGB_RETA_SIZE;
}
-static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
@@ -2870,7 +2878,8 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
}
-static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3019,8 +3028,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_module_info = igb_get_module_info,
.get_module_eeprom = igb_get_module_eeprom,
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
- .get_rxfh_indir = igb_get_rxfh_indir,
- .set_rxfh_indir = igb_set_rxfh_indir,
+ .get_rxfh = igb_get_rxfh,
+ .set_rxfh = igb_set_rxfh,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
.begin = igb_ethtool_begin,
@@ -3029,5 +3038,5 @@ static const struct ethtool_ops igb_ethtool_ops = {
void igb_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+ netdev->ethtool_ops = &igb_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 8333f67acf96..44b6a68f1af7 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "igb.h"
#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 16430a8440fa..f145adbb55ac 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+static const struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
-void igb_reset(struct igb_adapter *);
static int igb_setup_all_tx_resources(struct igb_adapter *);
static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+ struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static void igb_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
@@ -172,7 +169,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
int vf, u16 vlan, u8 qos);
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
bool setting);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
static void igb_netpoll(struct net_device *);
#endif
#ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs = 0;
+static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
- "per physical function");
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start "
- "last_rx\n");
+ pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
netdev->state, netdev->trans_start, netdev->last_rx);
}
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
- "[bi->dma ] leng ntw timestamp "
- "bi->skb\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
else
next_desc = "";
- pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p%s\n", i,
- le64_to_cpu(u0->a),
+ pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
- "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
- "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+ return !!(i2cctl & E1000_I2C_DATA_IN);
}
/**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+ return !!(i2cctl & E1000_I2C_CLK_IN);
}
static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
+
pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
-
pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
adapter->rx_ring[i]->reg_idx = rbase_offset +
Q_IDX_82576(i);
}
+ /* Fall through */
case e1000_82575:
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
+ /* Fall through */
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
- /*
- * On i350, i354, i210, and i211, loopback VLAN packets
+ /* On i350, i354, i210, and i211, loopback VLAN packets
* have the tag byte-swapped.
*/
if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx);
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
*/
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
+
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wrfl();
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
+
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
+
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
wr32(E1000_TCTL, tctl);
/* flush both disables and wait for them to finish */
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
igb_irq_disable(adapter);
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
igb_down(adapter);
igb_up(adapter);
clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
/* disable receive for all VFs and wait one second */
if (adapter->vfs_allocated_count) {
int i;
+
for (i = 0 ; i < adapter->vfs_allocated_count; i++)
adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
@@ -2087,7 +2084,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
.ndo_set_vf_mac = igb_ndo_set_vf_mac,
.ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
- .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_set_vf_rate = igb_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
.ndo_get_vf_config = igb_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2142,7 +2139,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
}
break;
}
- return;
}
/**
@@ -2203,11 +2199,11 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
- return E1000_SUCCESS;
+ return 0;
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structue that implements
@@ -2437,6 +2433,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get firmware version for ethtool -i */
igb_set_fw_version(adapter);
+ /* configure RXPBSIZE and TXPBSIZE */
+ if (hw->mac.type == e1000_i210) {
+ wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
+ wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+ }
+
setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2529,7 +2531,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* let the f/w know that the h/w is now under the control of the
- * driver. */
+ * driver.
+ */
igb_get_hw_control(adapter);
strcpy(netdev->name, "eth%d");
@@ -3077,6 +3080,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -3248,7 +3252,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
* Configure a transmit ring after a reset.
**/
void igb_configure_tx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
+ struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u32 txdctl = 0;
@@ -3389,7 +3393,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->rss_indir_tbl_init != num_rx_queues) {
for (j = 0; j < IGB_RETA_SIZE; j++)
- adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGB_RETA_SIZE;
adapter->rss_indir_tbl_init = num_rx_queues;
}
igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3435,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (hw->mac.type > e1000_82575) {
/* Set the default pool for the PF's first queue */
u32 vtctl = rd32(E1000_VT_CTL);
+
vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
E1000_VT_CTL_DISABLE_DEF_POOL);
vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3517,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
}
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
- int vfn)
+ int vfn)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -4058,7 +4064,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
switch (hw->mac.type) {
case e1000_82576:
case e1000_i350:
- if (!(wvbr = rd32(E1000_WVBR)))
+ wvbr = rd32(E1000_WVBR);
+ if (!wvbr)
return;
break;
default:
@@ -4077,7 +4084,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
if (!adapter->wvbr)
return;
- for(j = 0; j < adapter->vfs_allocated_count; j++) {
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
if (adapter->wvbr & (1 << j) ||
adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4216,15 @@ static void igb_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
+
hw->mac.ops.get_speed_and_duplex(hw,
&adapter->link_speed,
&adapter->link_duplex);
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
- "Duplex, Flow Control: %s\n",
+ netdev_info(netdev,
+ "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4250,8 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
- E1000_THSTAT_LINK_THROTTLE)) {
- netdev_info(netdev, "The network adapter link "
- "speed was downshifted because it "
- "overheated\n");
- }
+ E1000_THSTAT_LINK_THROTTLE))
+ netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -4277,12 +4282,11 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
E1000_THSTAT_PWR_DOWN)) {
- netdev_err(netdev, "The network adapter was "
- "stopped because it overheated\n");
+ netdev_err(netdev, "The network adapter was stopped because it overheated\n");
}
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev_info(netdev, "igb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
@@ -4344,6 +4348,7 @@ static void igb_watchdog_task(struct work_struct *work)
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
+
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
wr32(E1000_EICS, eics);
@@ -4483,13 +4488,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000) {
+ if (bytes/packets > 8000)
itrval = bulk_latency;
- } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ else if ((packets < 10) || ((bytes/packets) > 1200))
itrval = bulk_latency;
- } else if ((packets > 35)) {
+ else if ((packets > 35))
itrval = lowest_latency;
- }
} else if (bytes/packets > 2000) {
itrval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4679,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
return;
} else {
u8 l4_hdr = 0;
+
switch (first->protocol) {
case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4967,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
*/
if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
unsigned short f;
+
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
} else {
@@ -5140,7 +5146,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
@@ -5621,6 +5627,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
vmolr |= E1000_VMOLR_MPME;
} else if (vf_data->num_vf_mc_hashes) {
int j;
+
vmolr |= E1000_VMOLR_ROMPE;
for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5672,6 +5679,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
for (i = 0; i < adapter->vfs_allocated_count; i++) {
u32 vmolr = rd32(E1000_VMOLR(i));
+
vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
vf_data = &adapter->vf_data[i];
@@ -5770,6 +5778,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size += 4;
@@ -5798,6 +5807,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
adapter->vf_data[vf].vlans_enabled--;
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size -= 4;
@@ -5902,8 +5912,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
*/
if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
u32 vlvf, bits;
-
int regndx = igb_find_vlvf_entry(adapter, vid);
+
if (regndx < 0)
goto out;
/* See if any other pools are set for this VLAN filter
@@ -6494,7 +6504,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6963,6 +6973,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid;
+
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7051,7 +7062,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
- return (total_packets < budget);
+ return total_packets < budget;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7172,7 +7183,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
case SIOCGMIIREG:
if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
- &data->val_out))
+ &data->val_out))
return -EIO;
break;
case SIOCSMIIREG:
@@ -7873,7 +7884,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
}
}
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -7882,15 +7894,19 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
if (hw->mac.type != e1000_82576)
return -EOPNOTSUPP;
+ if (min_tx_rate)
+ return -EINVAL;
+
actual_link_speed = igb_link_mbps(adapter->link_speed);
if ((vf >= adapter->vfs_allocated_count) ||
(!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
- (tx_rate < 0) || (tx_rate > actual_link_speed))
+ (max_tx_rate < 0) ||
+ (max_tx_rate > actual_link_speed))
return -EINVAL;
adapter->vf_rate_link_speed = actual_link_speed;
- adapter->vf_data[vf].tx_rate = (u16)tx_rate;
- igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+ adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
+ igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
return 0;
}
@@ -7919,7 +7935,7 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
wr32(reg_offset, reg_val);
adapter->vf_data[vf].spoofchk_enabled = setting;
- return E1000_SUCCESS;
+ return 0;
}
static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -7930,7 +7946,8 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = adapter->vf_data[vf].pf_vlan;
ivi->qos = adapter->vf_data[vf].pf_qos;
ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
@@ -7955,11 +7972,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
reg = rd32(E1000_DTXCTL);
reg |= E1000_DTXCTL_VLAN_ADDED;
wr32(E1000_DTXCTL, reg);
+ /* Fall through */
case e1000_82580:
/* enable replication vlan tag stripping */
reg = rd32(E1000_RPLOLR);
reg |= E1000_RPLOLR_STRVLAN;
wr32(E1000_RPLOLR, reg);
+ /* Fall through */
case e1000_i350:
/* none of the above registers are supported by i350 */
break;
@@ -8049,6 +8068,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
u32 reg = rd32(E1000_PCIEMISC);
+
wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
wr32(E1000_DMACR, 0);
}
@@ -8077,8 +8097,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
swfw_mask = E1000_SWFW_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
- != E1000_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_read_byte_data(this_client, byte_offset);
@@ -8088,7 +8107,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
return E1000_ERR_I2C;
else {
*data = status;
- return E1000_SUCCESS;
+ return 0;
}
}
@@ -8113,7 +8132,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (!this_client)
return E1000_ERR_I2C;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
@@ -8121,7 +8140,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (status)
return E1000_ERR_I2C;
else
- return E1000_SUCCESS;
+ return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ab25e49365f7..794c139f0cc0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -360,8 +360,8 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
return 0;
}
-static int igb_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
@@ -559,10 +559,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
+
/**
- * igb_ptp_set_ts_config - control hardware time stamping
- * @netdev:
- * @ifreq:
+ * igb_ptp_set_timestamp_mode - setup hardware for timestamping
+ * @adapter: networking device structure
+ * @config: hwtstamp configuration
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't case any overhead
@@ -575,12 +576,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
- **/
-int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+ */
+static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
+ struct hwtstamp_config *config)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config *config = &adapter->tstamp_config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 tsync_rx_cfg = 0;
@@ -588,9 +588,6 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
bool is_l2 = false;
u32 regval;
- if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
- return -EFAULT;
-
/* reserved for future extensions */
if (config->flags)
return -EINVAL;
@@ -725,7 +722,33 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
regval = rd32(E1000_RXSTMPL);
regval = rd32(E1000_RXSTMPH);
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ return 0;
+}
+
+/**
+ * igb_ptp_set_ts_config - set hardware time stamping config
+ * @netdev:
+ * @ifreq:
+ *
+ **/
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = igb_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -745,7 +768,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
adapter->cc.mask = CLOCKSOURCE_MASK(64);
adapter->cc.mult = 1;
@@ -765,7 +788,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
@@ -784,7 +807,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
adapter->ptp_caps.settime = igb_ptp_settime_i210;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
/* Enable the timer functions by clearing bit 31. */
wr32(E1000_TSAUXC, 0x0);
break;
@@ -820,6 +843,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
wr32(E1000_IMS, E1000_IMS_TS);
}
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -884,7 +910,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
return;
/* reset the tstamp_config */
- memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+ igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
switch (adapter->hw.mac.type) {
case e1000_82576:
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 90eef07943f4..2178f87e9f61 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -101,8 +101,8 @@ static int igbvf_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -119,7 +119,6 @@ static int igbvf_set_settings(struct net_device *netdev,
static void igbvf_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- return;
}
static int igbvf_set_pauseparam(struct net_device *netdev,
@@ -476,5 +475,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
void igbvf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
+ netdev->ethtool_ops = &igbvf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index dbb7dd2f8e36..b311e9e710d2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -107,8 +107,8 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, SPEED_10000);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -656,5 +656,5 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
void ixgb_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
+ netdev->ethtool_ops = &ixgb_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index c6c4ca7d68e6..ac9f2148cdc5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -155,7 +155,6 @@ struct vf_data_storage {
struct vf_macvlans {
struct list_head l;
int vf;
- int rar_entry;
bool free;
bool is_macvlan;
u8 vf_macvlan[ETH_ALEN];
@@ -363,7 +362,7 @@ struct ixgbe_ring_container {
for (pos = (head).ring; pos != NULL; pos = pos->next)
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
- ? 8 : 1)
+ ? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
/* MAX_Q_VECTORS of these are allocated,
@@ -613,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
#define MAX_MSIX_VECTORS_82598 18
#define MAX_Q_VECTORS_82598 16
+struct ixgbe_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 queue;
+ u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT 0x1
+#define IXGBE_MAC_STATE_MODIFIED 0x2
+#define IXGBE_MAC_STATE_IN_USE 0x4
+
#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
+ struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+#endif
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
struct ixgbe_ring *);
@@ -941,6 +957,7 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
}
void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4c78ea8946c1..15609331ec17 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -41,10 +41,10 @@
#define IXGBE_82598_RX_PB_SIZE 512
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+ u8 *eeprom_data);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -140,7 +140,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -156,8 +156,8 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
- &list_offset,
- &data_offset);
+ &list_offset,
+ &data_offset);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -219,8 +219,8 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = 0;
u32 autoc = 0;
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
int i;
bool link_up;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/*
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
@@ -468,7 +473,7 @@ out:
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -550,8 +555,8 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
* Reads the links register to determine if link is up and the current speed
**/
static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *link_up,
- bool link_up_wait_to_complete)
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
{
u32 links_reg;
u32 i;
@@ -567,7 +572,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
- &adapt_comp_reg);
+ &adapt_comp_reg);
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if ((link_reg & 1) &&
@@ -579,11 +584,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
}
msleep(100);
hw->phy.ops.read_reg(hw, 0xC79F,
- MDIO_MMD_PMAPMD,
- &link_reg);
+ MDIO_MMD_PMAPMD,
+ &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C,
- MDIO_MMD_PMAPMD,
- &adapt_comp_reg);
+ MDIO_MMD_PMAPMD,
+ &adapt_comp_reg);
}
} else {
if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -656,7 +661,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
/* Set KX4/KX support according to speed requested */
else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -689,14 +694,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -735,28 +740,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
/* Enable Tx Atlas so packets can be transmitted again */
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- analog_val);
+ analog_val);
}
/* Reset PHY */
@@ -955,7 +960,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
for (offset = 0; offset < hw->mac.vft_size; offset++)
IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
- 0);
+ 0);
return 0;
}
@@ -973,7 +978,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
u32 atlas_ctl;
IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
- IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
IXGBE_WRITE_FLUSH(hw);
udelay(10);
atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1273,8 +1278,6 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
/* Setup Tx packet buffer sizes */
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
-
- return;
}
static struct ixgbe_mac_operations mac_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index f32b3dd1ba8e..bc7c924240a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -48,17 +48,17 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
@@ -96,9 +96,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
!ixgbe_mng_enabled(hw)) {
mac->ops.disable_tx_laser =
- &ixgbe_disable_tx_laser_multispeed_fiber;
+ &ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
- &ixgbe_enable_tx_laser_multispeed_fiber;
+ &ixgbe_enable_tx_laser_multispeed_fiber;
mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
} else {
mac->ops.disable_tx_laser = NULL;
@@ -132,13 +132,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset = NULL;
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != 0)
goto setup_sfp_out;
/* PHY config will finish before releasing the semaphore */
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto setup_sfp_out;
@@ -334,7 +334,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
@@ -352,7 +352,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
+ ixgbe_link_speed *speed,
bool *autoneg)
{
s32 status = 0;
@@ -543,7 +543,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -672,8 +672,8 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status = 0;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -820,8 +820,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
*/
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed,
- autoneg_wait_to_complete);
+ highest_link_speed,
+ autoneg_wait_to_complete);
out:
/* Set autoneg_advertised value based on input link speed */
@@ -1009,8 +1009,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
- link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
/* Switch from 1G SFI to 10G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
(pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -1018,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
}
} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
/* Switch from 10G SFI to 1G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
(pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -1051,7 +1051,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
status =
- IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ IXGBE_ERR_AUTONEG_NOT_COMPLETE;
hw_dbg(hw, "Autoneg did not complete.\n");
}
}
@@ -1074,14 +1074,14 @@ out:
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -1224,7 +1224,7 @@ mac_reset_top:
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
autoc2 |= (hw->mac.orig_autoc2 &
- IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_AUTOC2_UPPER_MASK);
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
}
}
@@ -1246,7 +1246,7 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -1257,7 +1257,7 @@ mac_reset_top:
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
+ &hw->mac.wwpn_prefix);
reset_hw_out:
return status;
@@ -1271,6 +1271,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
/*
@@ -1284,8 +1285,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
udelay(10);
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw, "Flow Director previous command isn't complete, "
- "aborting table re-initialization.\n");
+ hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -1299,12 +1299,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
* - write 0 to bit 8 of FDIRCMD register
*/
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
- IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- ~IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
/*
* Clear FDIR Hash register to clear any leftover hashes
@@ -1319,7 +1319,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
/* Poll init-done after we write FDIRCTRL register */
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
usleep_range(1000, 2000);
}
@@ -1368,7 +1368,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
usleep_range(1000, 2000);
}
@@ -1453,7 +1453,7 @@ do { \
bucket_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
sig_hash ^= hi_hash_dword << (16 - n); \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
@@ -1529,9 +1529,9 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* @queue: queue index to direct traffic to
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue)
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
{
u64 fdirhashcmd;
u32 fdircmd;
@@ -1555,7 +1555,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
@@ -1579,7 +1579,7 @@ do { \
bucket_hash ^= lo_hash_dword >> n; \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
bucket_hash ^= hi_hash_dword >> n; \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
@@ -1651,6 +1651,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
{
u32 mask = ntohs(input_mask->formatted.dst_port);
+
mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
mask |= ntohs(input_mask->formatted.src_port);
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
@@ -1885,7 +1886,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
u32 core_ctl;
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
- (reg << 8));
+ (reg << 8));
IXGBE_WRITE_FLUSH(hw);
udelay(10);
core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 981b8a7b100d..4e5385a2a465 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -41,7 +41,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count);
+ u16 count);
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
@@ -271,6 +271,7 @@ out:
**/
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
+ s32 ret_val;
u32 ctrl_ext;
/* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ixgbe_setup_fc(hw);
+ ret_val = ixgbe_setup_fc(hw);
+ if (!ret_val)
+ goto out;
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
- return 0;
+out:
+ return ret_val;
}
/**
@@ -481,7 +485,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
* Reads the part number string from the EEPROM.
**/
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+ u32 pba_num_size)
{
s32 ret_val;
u16 data;
@@ -814,9 +818,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
eeprom->address_bits = 16;
else
eeprom->address_bits = 8;
- hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
- "%d\n", eeprom->type, eeprom->word_size,
- eeprom->address_bits);
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
+ eeprom->type, eeprom->word_size, eeprom->address_bits);
}
return 0;
@@ -1388,8 +1391,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
}
if (i == timeout) {
- hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
- "not granted.\n");
+ hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
/*
* this release is particularly important because our attempts
* above to get the semaphore may have succeeded, and if there
@@ -1434,14 +1436,12 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* was not granted because we don't have access to the EEPROM
*/
if (i >= timeout) {
- hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
- "not granted.\n");
+ hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
} else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
+ hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
}
return status;
@@ -1483,7 +1483,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
*/
for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
- IXGBE_EEPROM_OPCODE_BITS);
+ IXGBE_EEPROM_OPCODE_BITS);
spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
break;
@@ -1532,7 +1532,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
* @count: number of bits to shift out
**/
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count)
+ u16 count)
{
u32 eec;
u32 mask;
@@ -1736,7 +1736,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* caller does not need checksum_val, the value can be NULL.
**/
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val)
+ u16 *checksum_val)
{
s32 status;
u16 checksum;
@@ -1809,7 +1809,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
* Puts an ethernet address into a receive address register.
**/
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr)
+ u32 enable_addr)
{
u32 rar_low, rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -2053,7 +2053,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
if (hw->addr_ctrl.mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
- IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
return 0;
@@ -2071,7 +2071,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
if (a->mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
- hw->mac.mc_filter_type);
+ hw->mac.mc_filter_type);
return 0;
}
@@ -2106,19 +2106,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
u32 fcrtl, fcrth;
int i;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration. */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/* Negotiate the fc mode to use */
ixgbe_fc_autoneg(hw);
@@ -2181,12 +2187,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
} else {
@@ -2654,8 +2659,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
/* For informational purposes only */
if (i >= IXGBE_MAX_SECRX_POLL)
- hw_dbg(hw, "Rx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
+ hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
return 0;
@@ -2782,7 +2786,7 @@ out:
* get and set mac_addr routines.
**/
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+ u16 *san_mac_offset)
{
s32 ret_val;
@@ -2828,7 +2832,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
hw->mac.ops.set_lan_id(hw);
/* apply the port offset to the address offset */
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
&san_mac_data);
@@ -3068,7 +3072,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on)
{
s32 regindex;
u32 bitindex;
@@ -3190,9 +3194,9 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* Ignore it. */
vfta_changed = false;
}
- }
- else
+ } else {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
}
if (vfta_changed)
@@ -3292,7 +3296,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f12c40fb5537..2ae5d4b8fc93 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -39,7 +39,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size);
+ u32 pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
@@ -61,16 +61,16 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 *data);
+ u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val);
+ u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
+ u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
@@ -92,13 +92,13 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
+ u32 vind, bool vlan_on);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete);
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix);
+ u16 *wwpn_prefix);
s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
return unlikely(!addr);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
}
#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
-static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
-{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
- u32 value;
-
- if (ixgbe_removed(reg_addr))
- return IXGBE_FAILED_READ_REG;
- value = readl(reg_addr + reg);
- if (unlikely(value == IXGBE_FAILED_READ_REG))
- ixgbe_check_remove(hw, reg);
- return value;
-}
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index e055e000131b..a689ee0d4bed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -267,7 +267,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
u8 pfc_en;
@@ -389,7 +389,6 @@ static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
for (i = 0; i < MAX_USER_PRIORITY; i++)
map[i] = IXGBE_RTRUP2TC_UP_MASK &
(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
- return;
}
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 7a77f37a7cbc..d3ba63f9ad37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
continue;
}
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index bdb99b3b0f30..3b932fe64ab6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
max_tc = prio_tc[i];
}
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
if (enabled) {
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d5a1e3db0774..90c370230e20 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -31,17 +31,17 @@
/* DCB register definitions */
#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
- * 1 WSP - Weighted Strict Priority
- */
+ * 1 WSP - Weighted Strict Priority
+ */
#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
- * 1 WRR - Weighted Round Robin
- */
+ * 1 WRR - Weighted Round Robin
+ */
#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
- * clear!
- */
+ * clear!
+ */
#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
/* Receive UP2TC mapping */
@@ -56,11 +56,11 @@
#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
- * buffers enable
- */
+ * buffers enable
+ */
#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
- * (RSS) enable
- */
+ * (RSS) enable
+ */
/* RTRPCS Bit Masks */
#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
@@ -81,8 +81,8 @@
/* RTTPCS Bit Masks */
#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
- * 1 SP - Strict Priority
- */
+ * 1 SP - Strict Priority
+ */
#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
#define IXGBE_RTTPCS_ARBD_SHIFT 22
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index edd89a1ef27f..5172b6b12c09 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -192,8 +192,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
}
static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+ u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -218,8 +218,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -236,7 +236,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+ u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -244,8 +244,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -256,7 +256,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
+ u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -264,8 +264,8 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -276,7 +276,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
+ u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -284,7 +284,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
- u8 setting)
+ u8 setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
}
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
- u8 *setting)
+ u8 *setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 472b0f450bf9..5e2c1e35e517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -253,8 +253,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
**/
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
{
- if (adapter->ixgbe_dbg_adapter)
- debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
+ debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
adapter->ixgbe_dbg_adapter = NULL;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6c55c14d082a..a452730a3278 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -141,8 +141,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
/ sizeof(u64))
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
- IXGBE_PB_STATS_LEN + \
- IXGBE_QUEUE_STATS_LEN)
+ IXGBE_PB_STATS_LEN + \
+ IXGBE_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)",
@@ -152,7 +152,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
static int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -161,13 +161,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
bool autoneg = false;
bool link_up;
- /* SFP type is needed for get_link_capabilities */
- if (hw->phy.media_type & (ixgbe_media_type_fiber |
- ixgbe_media_type_fiber_qsfp)) {
- if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- hw->phy.ops.identify_sfp(hw);
- }
-
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
@@ -303,15 +296,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
}
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static int ixgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -368,7 +361,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
}
static void ixgbe_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -390,7 +383,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
static int ixgbe_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -450,7 +443,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
static void ixgbe_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+ struct ethtool_regs *regs, void *p)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -812,7 +805,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
}
static int ixgbe_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -918,7 +911,7 @@ err:
}
static void ixgbe_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 nvm_track_id;
@@ -940,7 +933,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
}
static void ixgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -953,7 +946,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
}
static int ixgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
@@ -1082,7 +1075,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
}
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct rtnl_link_stats64 temp;
@@ -1110,7 +1103,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < netdev->num_tx_queues; j++) {
ring = adapter->tx_ring[j];
@@ -1180,7 +1173,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+ u8 *data)
{
char *p = (char *)data;
int i;
@@ -1357,8 +1350,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
val = ixgbe_read_reg(&adapter->hw, reg);
if (val != (test_pattern[pat] & write & mask)) {
- e_err(drv, "pattern test reg %04X failed: got "
- "0x%08X expected 0x%08X\n",
+ e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (test_pattern[pat] & write & mask));
*data = reg;
ixgbe_write_reg(&adapter->hw, reg, before);
@@ -1382,8 +1374,8 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
ixgbe_write_reg(&adapter->hw, reg, write & mask);
val = ixgbe_read_reg(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
- e_err(drv, "set/check reg %04X test failed: got 0x%08X "
- "expected 0x%08X\n", reg, (val & mask), (write & mask));
+ e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
ixgbe_write_reg(&adapter->hw, reg, before);
return true;
@@ -1430,8 +1422,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
if (value != after) {
- e_err(drv, "failed STATUS register test got: 0x%08X "
- "expected: 0x%08X\n", after, value);
+ e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+ after, value);
*data = 1;
return 1;
}
@@ -1533,10 +1525,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
return -1;
}
} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
shared_int = false;
} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -1563,9 +1555,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
*/
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
@@ -1587,7 +1579,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
- if (!(adapter->test_icr &mask)) {
+ if (!(adapter->test_icr & mask)) {
*data = 4;
break;
}
@@ -1602,9 +1594,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
*/
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
@@ -1964,7 +1956,7 @@ out:
}
static void ixgbe_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
+ struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
@@ -1987,10 +1979,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
int i;
for (i = 0; i < adapter->num_vfs; i++) {
if (adapter->vfinfo[i].clear_to_send) {
- netdev_warn(netdev, "%s",
- "offline diagnostic is not "
- "supported when VFs are "
- "present\n");
+ netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
data[0] = 1;
data[1] = 1;
data[2] = 1;
@@ -2037,8 +2026,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
* loopback diagnostic. */
if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
IXGBE_FLAG_VMDQ_ENABLED)) {
- e_info(hw, "Skip MAC loopback diagnostic in VT "
- "mode\n");
+ e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
data[3] = 0;
goto skip_loopback;
}
@@ -2078,7 +2066,7 @@ skip_ol_tests:
}
static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
struct ixgbe_hw *hw = &adapter->hw;
int retval = 0;
@@ -2094,12 +2082,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
}
static void ixgbe_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
if (ixgbe_wol_exclusion(adapter, wol) ||
@@ -2181,7 +2169,7 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
}
static int ixgbe_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2222,8 +2210,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- e_info(probe, "rx-usecs value high enough "
- "to re-enable RSC\n");
+ e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
return true;
}
/* if interrupt rate is too high then disable RSC */
@@ -2236,7 +2223,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
}
static int ixgbe_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
@@ -2421,9 +2408,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case UDP_V4_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2433,9 +2422,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case UDP_V6_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2787,8 +2778,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
if ((flags2 & UDP_RSS_FLAGS) &&
!(adapter->flags2 & UDP_RSS_FLAGS))
- e_warn(drv, "enabling UDP RSS: fragmented packets"
- " may arrive out of order to the stack above\n");
+ e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
adapter->flags2 = flags2;
@@ -3099,5 +3089,5 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
+ netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index b16cc786750d..0772b7730fce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
-#ifdef CONFIG_IXGBE_DCB
u8 up;
-#endif
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2067d392cc3d..2d9451e39686 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1113,8 +1113,8 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
err = pci_enable_msi(adapter->pdev);
if (err) {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
- "Unable to allocate MSI interrupt, "
- "falling back to legacy. Error: %d\n", err);
+ "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n",
+ err);
return;
}
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c047c3ef8d71..f5aa3311ea28 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
ixgbe_service_event_schedule(adapter);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
{
u32 value;
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
ixgbe_remove_adapter(hw);
}
+/**
+ * ixgbe_read_reg - Read from device register
+ * @hw: hw specific details
+ * @reg: offset of register to read
+ *
+ * Returns : value read or IXGBE_FAILED_READ_REG if removed
+ *
+ * This function is used to read device registers. It checks for device
+ * removal by confirming any read that returns all ones by checking the
+ * status register value for all ones. This function avoids reading from
+ * the hardware if a removal was previously detected in which case it
+ * returns IXGBE_FAILED_READ_REG (all ones).
+ */
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (ixgbe_removed(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbe_check_remove(hw, reg);
+ return value;
+}
+
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
{
u16 value;
@@ -3743,35 +3769,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
}
/**
- * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
- * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
* ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
* @adapter: driver data
*/
@@ -3850,6 +3847,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ else
+ return -ENOMEM;
+
+#ifdef CONFIG_PCI_IOV
+ ixgbe_restore_vf_multicasts(adapter);
+#endif
+
+ return netdev_mc_count(netdev);
+}
+
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+}
+#endif
+
+static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+ if (adapter->mac_table[i].state &
+ IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i,
+ adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &=
+ ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ }
+ ixgbe_sync_mac_table(adapter);
+}
+
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, count = 0;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state == 0)
+ count++;
+ }
+ return count;
+}
+
+/* this function destroys the first RAR entry */
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
+ u8 *addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+ adapter->mac_table[0].queue = VMDQ_P(0);
+ adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+ IXGBE_MAC_STATE_IN_USE);
+ hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].queue,
+ IXGBE_RAH_AV);
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ continue;
+ adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+ IXGBE_MAC_STATE_IN_USE);
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ ixgbe_sync_mac_table(adapter);
+ return i;
+ }
+ return -ENOMEM;
+}
+
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ /* search table for addr, if found, set to 0 and sync */
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+ adapter->mac_table[i].queue == queue) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ ixgbe_sync_mac_table(adapter);
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
*
@@ -3858,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
* 0 on no addresses written
* X on writing X addresses to the RAR table
**/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
- /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
-
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > rar_entries)
+ if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
return -ENOMEM;
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
- /* return error if we do not support writing to RAR table */
- if (!hw->mac.ops.set_rar)
- return -ENOMEM;
-
netdev_for_each_uc_addr(ha, netdev) {
- if (!rar_entries)
- break;
- hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
- VMDQ_P(0), IXGBE_RAH_AV);
+ ixgbe_del_mac_filter(adapter, ha->addr, vfn);
+ ixgbe_add_mac_filter(adapter, ha->addr, vfn);
count++;
}
}
- /* write the addresses in reverse order to avoid write combining */
- for (; rar_entries > 0 ; rar_entries--)
- hw->mac.ops.clear_rar(hw, rar_entries);
-
return count;
}
@@ -3908,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ u32 vlnctrl;
int count;
/* Check for Promiscuous and All Multicast modes */
-
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
/* set all bits that we expect to always be set */
fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3922,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ vmolr |= IXGBE_VMOLR_MPE;
/* Only disable hardware filter vlans in promiscuous mode
* if SR-IOV and VMDQ are disabled - otherwise ensure
* that hardware VLAN filters remain enabled.
*/
if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
IXGBE_FLAG_SRIOV_ENABLED)))
- ixgbe_vlan_filter_disable(adapter);
- else
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
hw->addr_ctrl.user_set_promisc = false;
}
@@ -3950,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = ixgbe_write_uc_addr_list(netdev);
+ count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
fctrl |= IXGBE_FCTRL_UPE;
vmolr |= IXGBE_VMOLR_ROPE;
@@ -3960,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
- hw->mac.ops.update_mc_addr_list(hw, netdev);
- vmolr |= IXGBE_VMOLR_ROMPE;
-
- if (adapter->num_vfs)
- ixgbe_restore_vf_multicasts(adapter);
+ count = ixgbe_write_mc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else if (count) {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
if (hw->mac.type != ixgbe_mac_82598EB) {
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3985,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* NOTE: VLAN filtering is disabled by setting PROMISC */
}
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4101,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
(tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-
#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4143,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
*/
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *dev = adapter->netdev;
@@ -4153,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
/* Calculate max LAN frame size */
tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if ((dev->features & NETIF_F_FCOE_MTU) &&
+ (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4179,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
if (!num_tc)
num_tc = 1;
- hw->fc.low_water = ixgbe_lpbthresh(adapter);
-
for (i = 0; i < num_tc; i++) {
hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+ hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
/* Low water marks must not be larger than high water marks */
- if (hw->fc.low_water > hw->fc.high_water[i])
- hw->fc.low_water = 0;
+ if (hw->fc.low_water[i] > hw->fc.high_water[i])
+ hw->fc.low_water[i] = 0;
}
+
+ for (; i < MAX_TRAFFIC_CLASS; i++)
+ hw->fc.high_water[i] = 0;
}
static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4249,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
vmolr |= IXGBE_VMOLR_ROMPE;
hw->mac.ops.update_mc_addr_list(hw, dev);
}
- ixgbe_write_uc_addr_list(adapter->netdev);
+ ixgbe_write_uc_addr_list(adapter->netdev, pool);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
- u8 *addr, u16 pool)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int entry;
-
- entry = hw->mac.num_rar_entries - pool;
- hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
-}
-
static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
{
struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4521,6 +4656,8 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_qsfp_active_unknown:
case ixgbe_phy_qsfp_intel:
case ixgbe_phy_qsfp_unknown:
+ /* ixgbe_phy_none is set when no SFP module is present */
+ case ixgbe_phy_none:
return true;
case ixgbe_phy_nl:
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4742,7 +4879,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
int err;
+ u8 old_addr[ETH_ALEN];
if (ixgbe_removed(hw->hw_addr))
return;
@@ -4778,9 +4917,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
- /* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+ /* do not flush user set addresses */
+ memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+ ixgbe_flush_sw_mac_table(adapter);
+ ixgbe_mac_set_default_filter(adapter, old_addr);
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
@@ -5026,6 +5166,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* CONFIG_IXGBE_DCB */
#endif /* IXGBE_FCOE */
+ adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+ hw->mac.num_rar_entries,
+ GFP_ATOMIC);
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -5517,6 +5661,17 @@ err_setup_tx:
return err;
}
+static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
+{
+ ixgbe_ptp_suspend(adapter);
+
+ ixgbe_down(adapter);
+ ixgbe_free_irq(adapter);
+
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_free_all_rx_resources(adapter);
+}
+
/**
* ixgbe_close - Disables a network interface
* @netdev: network interface device structure
@@ -5534,14 +5689,10 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_ptp_stop(adapter);
- ixgbe_down(adapter);
- ixgbe_free_irq(adapter);
+ ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
- ixgbe_free_all_tx_resources(adapter);
- ixgbe_free_all_rx_resources(adapter);
-
ixgbe_release_hw_control(adapter);
return 0;
@@ -5608,12 +5759,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
rtnl_lock();
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_free_irq(adapter);
- ixgbe_free_all_tx_resources(adapter);
- ixgbe_free_all_rx_resources(adapter);
- }
+ if (netif_running(netdev))
+ ixgbe_close_suspend(adapter);
rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter);
@@ -5945,7 +6092,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_TX_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->state));
+ &(adapter->tx_ring[i]->state));
/* re-enable flow director interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
} else {
@@ -7172,16 +7319,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
+ int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
-
- return 0;
+ ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
+ return ret > 0 ? 0 : ret;
}
static int
@@ -7783,7 +7931,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_do_ioctl = ixgbe_ioctl,
.ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
- .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
@@ -8187,6 +8335,8 @@ skip_sriov:
goto err_sw_init;
}
+ ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter);
@@ -8242,7 +8392,7 @@ skip_sriov:
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- part_str);
+ part_str);
else
e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, part_str);
@@ -8304,8 +8454,8 @@ skip_sriov:
ixgbe_dbg_adapter_init(adapter);
- /* Need link setup for MNG FW, else wait for IXGBE_UP */
- if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
+ /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
+ if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw,
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
@@ -8319,6 +8469,7 @@ err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
+ kfree(adapter->mac_table);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -8392,6 +8543,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n");
+ kfree(adapter->mac_table);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index f5c6af2b891b..1918e0abf734 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -223,7 +223,7 @@ out:
* received an ack to that message within delay * timeout period
**/
static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
@@ -291,7 +291,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = 0;
hw->mbx.stats.acks++;
}
@@ -366,7 +366,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
@@ -407,7 +407,7 @@ out_no_write:
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a9b9ad69ed0e..a5cb755de3a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -54,11 +54,11 @@
* Message ACK's are the value or'd with 0xF0000000
*/
#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
+ * this are the ACK */
#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
+ * this are the NACK */
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- clear to send requests */
+ clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index a76af8e28a04..ff68b7a9deff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -67,7 +67,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
ixgbe_get_phy_id(hw);
hw->phy.type =
- ixgbe_get_phy_type_from_id(hw->phy.id);
+ ixgbe_get_phy_type_from_id(hw->phy.id);
if (hw->phy.type == ixgbe_phy_unknown) {
hw->phy.ops.read_reg(hw,
@@ -136,12 +136,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
u16 phy_id_low = 0;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
- &phy_id_high);
+ &phy_id_high);
if (status == 0) {
hw->phy.id = (u32)(phy_id_high << 16);
status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
- &phy_id_low);
+ &phy_id_low);
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
@@ -318,7 +318,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data)
+ u32 device_type, u16 *phy_data)
{
s32 status;
u16 gssr;
@@ -421,7 +421,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
* @phy_data: Data to write to the PHY register
**/
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+ u32 device_type, u16 phy_data)
{
s32 status;
u16 gssr;
@@ -548,8 +548,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @speed: new link speed
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
/*
@@ -582,8 +582,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the link capabilities by reading the AUTOC register.
*/
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = IXGBE_ERR_LINK_SETUP;
u16 speed_ability;
@@ -592,7 +592,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
*autoneg = true;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
+ &speed_ability);
if (status == 0) {
if (speed_ability & MDIO_SPEED_10G)
@@ -806,11 +806,11 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
/* reset the PHY and poll for completion */
hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
- (phy_data | MDIO_CTRL1_RESET));
+ (phy_data | MDIO_CTRL1_RESET));
for (i = 0; i < 100; i++) {
hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
- &phy_data);
+ &phy_data);
if ((phy_data & MDIO_CTRL1_RESET) == 0)
break;
usleep_range(10000, 20000);
@@ -824,7 +824,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
/* Get init offsets */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != 0)
goto out;
@@ -838,7 +838,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if (ret_val)
goto err_eeprom;
control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
+ IXGBE_CONTROL_SHIFT_NL;
edata = eword & IXGBE_DATA_MASK_NL;
switch (control) {
case IXGBE_DELAY_NL:
@@ -859,7 +859,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if (ret_val)
goto err_eeprom;
hw->phy.ops.write_reg(hw, phy_offset,
- MDIO_MMD_PMAPMD, eword);
+ MDIO_MMD_PMAPMD, eword);
hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
phy_offset);
data_offset++;
@@ -1010,10 +1010,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core0;
+ ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core1;
+ ixgbe_sfp_type_da_cu_core1;
} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
hw->phy.ops.read_i2c_eeprom(
hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -1035,10 +1035,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core0;
+ ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core1;
+ ixgbe_sfp_type_srlr_core1;
} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
@@ -1087,15 +1087,15 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE1,
- &oui_bytes[1]);
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE2,
- &oui_bytes[2]);
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
if (status != 0)
goto err_read_i2c_eeprom;
@@ -1403,8 +1403,8 @@ err_read_i2c_eeprom:
* so it returns the offsets to the phy init sequence block.
**/
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset)
+ u16 *list_offset,
+ u16 *data_offset)
{
u16 sfp_id;
u16 sfp_type = hw->phy.sfp_type;
@@ -1493,11 +1493,11 @@ err_phy:
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+ u8 *eeprom_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1525,11 +1525,11 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data)
+ u8 eeprom_data)
{
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1542,7 +1542,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* a specified device address.
**/
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+ u8 dev_addr, u8 *data)
{
s32 status = 0;
u32 max_retry = 10;
@@ -1631,7 +1631,7 @@ read_byte_out:
* a specified device address.
**/
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+ u8 dev_addr, u8 data)
{
s32 status = 0;
u32 max_retry = 1;
@@ -2046,7 +2046,7 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
/* Check that the LASI temp alarm status was triggered */
hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
- MDIO_MMD_PMAPMD, &phy_data);
+ MDIO_MMD_PMAPMD, &phy_data);
if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 0bb047f751c2..54071ed17e3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -114,47 +114,47 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data);
+ u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data);
+ u32 device_type, u16 phy_data);
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
+ ixgbe_link_speed *speed,
+ bool *autoneg);
bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up);
+ ixgbe_link_speed *speed,
+ bool *link_up);
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset);
+ u16 *list_offset,
+ u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
+ u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
+ u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+ u8 *eeprom_data);
s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data);
+ u8 eeprom_data);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 8902ae683457..68f87ecb8a76 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,7 +26,6 @@
*******************************************************************************/
#include "ixgbe.h"
-#include <linux/export.h>
#include <linux/ptp_classify.h>
/*
@@ -334,7 +333,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * ixgbe_ptp_enable
+ * ixgbe_ptp_feature_enable
* @ptp: the ptp clock structure
* @rq: the requested feature to change
* @on: whether to enable or disable the feature
@@ -342,8 +341,8 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
*/
-static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
@@ -570,9 +569,9 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
}
/**
- * ixgbe_ptp_set_ts_config - control hardware time stamping
- * @adapter: pointer to adapter struct
- * @ifreq: ioctl data
+ * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode
+ * @adapter: the private ixgbe adapter structure
+ * @config: the hwtstamp configuration requested
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -590,25 +589,25 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
* packets, regardless of the type specified in the register, only use V2
* Event mode. This more accurately tells the user what the hardware is going
* to do anyways.
+ *
+ * Note: this may modify the hwtstamp configuration towards a more general
+ * mode, if required to support the specifically requested mode.
*/
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ struct hwtstamp_config *config)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
bool is_l2 = false;
u32 regval;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
/* reserved for future extensions */
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
@@ -617,7 +616,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
@@ -641,7 +640,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
@@ -652,7 +651,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -671,7 +670,6 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
else
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
-
/* enable/disable TX */
regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
@@ -693,6 +691,29 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ return 0;
+}
+
+/**
+ * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
+ * @adapter: pointer to adapter struct
+ * @ifreq: ioctl data
+ *
+ * Set hardware to requested mode. If unsupported, return an error with no
+ * changes. Otherwise, store the mode for future reference.
+ */
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
/* save these settings for future reference */
memcpy(&adapter->tstamp_config, &config,
sizeof(adapter->tstamp_config));
@@ -790,9 +811,13 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
- * When the MAC resets, all timesync features are reset. This function should be
- * called to re-enable the PTP clock structure. It will re-init the timecounter
- * structure based on the kernel time as well as setup the cycle counter data.
+ * When the MAC resets, all the hardware bits for timesync are reset. This
+ * function is used to re-enable the device for PTP based on current settings.
+ * We do lose the current clock time, so just reset the cyclecounter to the
+ * system real clock time.
+ *
+ * This function will maintain hwtstamp_config settings, and resets the SDP
+ * output if it was enabled.
*/
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
{
@@ -804,8 +829,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
IXGBE_WRITE_FLUSH(hw);
- /* Reset the saved tstamp_config */
- memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+ /* reset the hardware timestamping mode */
+ ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
ixgbe_ptp_start_cyclecounter(adapter);
@@ -825,16 +850,23 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_ptp_init
+ * ixgbe_ptp_create_clock
* @adapter: the ixgbe private adapter structure
*
- * This function performs the required steps for enabling ptp
- * support. If ptp support has already been loaded it simply calls the
- * cyclecounter init routine and exits.
+ * This function performs setup of the user entry point function table and
+ * initializes the PTP clock device, which is used to access the clock-like
+ * features of the PTP core. It will be called by ixgbe_ptp_init, only if
+ * there isn't already a clock device (such as after a suspend/resume cycle,
+ * where the clock device wasn't destroyed).
*/
-void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ long err;
+
+ /* do nothing if we already have a clock device */
+ if (!IS_ERR_OR_NULL(adapter->ptp_clock))
+ return 0;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
@@ -851,7 +883,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
- adapter->ptp_caps.enable = ixgbe_ptp_enable;
+ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
case ixgbe_mac_82599EB:
snprintf(adapter->ptp_caps.name,
@@ -867,24 +899,57 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
- adapter->ptp_caps.enable = ixgbe_ptp_enable;
+ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
default:
adapter->ptp_clock = NULL;
- return;
+ return -EOPNOTSUPP;
}
- spin_lock_init(&adapter->tmreg_lock);
- INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
+ err = PTR_ERR(adapter->ptp_clock);
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
+ return err;
} else
e_dev_info("registered PHC device on %s\n", netdev->name);
+ /* set default timestamp mode to disabled here. We do this in
+ * create_clock instead of init, because we don't want to override the
+ * previous settings during a resume cycle.
+ */
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ return 0;
+}
+
+/**
+ * ixgbe_ptp_init
+ * @adapter: the ixgbe private adapter structure
+ *
+ * This function performs the required steps for enabling PTP
+ * support. If PTP support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+{
+ /* initialize the spin lock first since we can't control when a user
+ * will call the entry functions once we have initialized the clock
+ * device
+ */
+ spin_lock_init(&adapter->tmreg_lock);
+
+ /* obtain a PTP device, or re-use an existing device */
+ if (ixgbe_ptp_create_clock(adapter))
+ return;
+
+ /* we have a clock so we can initialize work now */
+ INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
+
+ /* reset the PTP related hardware bits */
ixgbe_ptp_reset(adapter);
/* enter the IXGBE_PTP_RUNNING state */
@@ -894,28 +959,45 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_ptp_stop - disable ptp device and stop the overflow check
- * @adapter: pointer to adapter struct
+ * ixgbe_ptp_suspend - stop PTP work items
+ * @ adapter: pointer to adapter struct
*
- * this function stops the ptp support, and cancels the delayed work.
+ * this function suspends PTP activity, and prevents more PTP work from being
+ * generated, but does not destroy the PTP clock device.
*/
-void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter)
{
/* Leave the IXGBE_PTP_RUNNING state. */
if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
return;
- /* stop the PPS signal */
- adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
- ixgbe_ptp_setup_sdp(adapter);
+ /* since this might be called in suspend, we don't clear the state,
+ * but simply reset the auxiliary PPS signal control register
+ */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0);
+ /* ensure that we cancel any pending PTP Tx work item in progress */
cancel_work_sync(&adapter->ptp_tx_work);
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
+}
+
+/**
+ * ixgbe_ptp_stop - close the PTP device
+ * @adapter: pointer to adapter struct
+ *
+ * completely destroy the PTP device, should only be called when the device is
+ * being fully closed.
+ */
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+{
+ /* first, suspend PTP activity */
+ ixgbe_ptp_suspend(adapter);
+ /* disable the PTP clock device */
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e6c68d396c99..16b3a1cd9db6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
for (i = 0; i < num_vf_macvlans; i++) {
mv_list->vf = -1;
mv_list->free = true;
- mv_list->rar_entry = hw->mac.num_rar_entries -
- (i + adapter->num_vfs + 1);
list_add(&mv_list->l, &adapter->vf_mvs.l);
mv_list++;
}
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
u32 vector_bit;
u32 vector_reg;
u32 mta_reg;
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
return 0;
}
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct list_head *pos;
- struct vf_macvlans *entry;
-
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (!entry->free)
- hw->mac.ops.set_rar(hw, entry->rar_entry,
- entry->vf_macvlan,
- entry->vf, IXGBE_RAH_AV);
- }
-}
-
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
u32 mta_reg;
for (i = 0; i < adapter->num_vfs; i++) {
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
vfinfo = &adapter->vfinfo[i];
for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
}
/* Restore any VF macvlans */
- ixgbe_restore_vf_macvlans(adapter);
+ ixgbe_full_sync_mac_table(adapter);
}
+#endif
static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_BAM);
+ vmolr |= IXGBE_VMOLR_BAM;
if (aupe)
vmolr |= IXGBE_VMOLR_AUPE;
else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
u8 num_tcs = netdev_get_num_tc(adapter->netdev);
/* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
- hw->mac.ops.clear_rar(hw, rar_entry);
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
return 0;
}
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *pos;
struct vf_macvlans *entry;
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = -1;
entry->free = true;
entry->is_macvlan = false;
- hw->mac.ops.clear_rar(hw, entry->rar_entry);
+ ixgbe_del_mac_filter(adapter,
+ entry->vf_macvlan, vf);
}
}
}
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = vf;
memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, mac_addr, vf);
return 0;
}
@@ -1138,9 +1129,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
adapter->vfinfo[vf].vlan_count--;
adapter->vfinfo[vf].pf_vlan = 0;
adapter->vfinfo[vf].pf_qos = 0;
- }
+ }
out:
- return err;
+ return err;
}
static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
@@ -1231,7 +1222,8 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
}
}
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int link_speed;
@@ -1249,13 +1241,16 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
if (link_speed != 10000)
return -EINVAL;
+ if (min_tx_rate)
+ return -EINVAL;
+
/* rate limit cannot be less than 10Mbs or greater than link speed */
- if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
+ if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
return -EINVAL;
/* store values */
adapter->vf_rate_link_speed = link_speed;
- adapter->vfinfo[vf].tx_rate = tx_rate;
+ adapter->vfinfo[vf].tx_rate = max_tx_rate;
/* update hardware configuration */
ixgbe_set_vf_rate_limit(adapter, vf);
@@ -1297,7 +1292,8 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = adapter->vfinfo[vf].pf_vlan;
ivi->qos = adapter->vfinfo[vf].pf_qos;
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 139eaddfb2ed..32c26d586c01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -34,7 +34,9 @@
*/
#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
@@ -42,7 +44,8 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8a6ff2423f07..9a89f98b35f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -160,7 +160,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MAX_EITR 0x00000FF8
#define IXGBE_MIN_EITR 8
#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
- (0x012300 + (((_i) - 24) * 4)))
+ (0x012300 + (((_i) - 24) * 4)))
#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
#define IXGBE_EITR_LLI_MOD 0x00008000
#define IXGBE_EITR_CNT_WDIS 0x80000000
@@ -213,7 +213,7 @@ struct ixgbe_thermal_sensor_data {
* 64-127: 0x0D014 + (n-64)*0x40
*/
#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
(0x0D014 + (((_i) - 64) * 0x40))))
/*
* Rx DCA Control Register:
@@ -222,11 +222,11 @@ struct ixgbe_thermal_sensor_data {
* 64-127: 0x0D00C + (n-64)*0x40
*/
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
(0x0D00C + (((_i) - 64) * 0x40))))
#define IXGBE_RDRXCTL 0x02F00
#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
- /* 8 of these 0x03C00 - 0x03C1C */
+ /* 8 of these 0x03C00 - 0x03C1C */
#define IXGBE_RXCTRL 0x03000
#define IXGBE_DROPEN 0x03D04
#define IXGBE_RXPBSIZE_SHIFT 10
@@ -239,14 +239,14 @@ struct ixgbe_thermal_sensor_data {
/* Multicast Table Array - 128 entries */
#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x0A200 + ((_i) * 8)))
+ (0x0A200 + ((_i) * 8)))
#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x0A204 + ((_i) * 8)))
+ (0x0A204 + ((_i) * 8)))
#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
/* Packet split receive type */
#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
- (0x0EA00 + ((_i) * 4)))
+ (0x0EA00 + ((_i) * 4)))
/* array of 4096 1-bit vlan filters */
#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
/*array of 4096 4-bit vlan vmdq indices */
@@ -696,7 +696,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
- (0x08600 + ((_i) * 4)))
+ (0x08600 + ((_i) * 4)))
#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
@@ -820,7 +820,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
- IXGBE_GCR_EXT_VT_MODE_64)
+ IXGBE_GCR_EXT_VT_MODE_64)
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
@@ -1396,10 +1396,10 @@ enum {
#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_LSC | \
- IXGBE_EIMS_TCP_TIMER | \
- IXGBE_EIMS_OTHER)
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
+ IXGBE_EIMS_OTHER)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
@@ -2161,18 +2161,18 @@ enum {
/* Masks to determine if packets should be dropped due to frame errors */
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXD_ERR_CE | \
- IXGBE_RXD_ERR_LE | \
- IXGBE_RXD_ERR_PE | \
- IXGBE_RXD_ERR_OSE | \
- IXGBE_RXD_ERR_USE)
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXDADV_ERR_CE | \
- IXGBE_RXDADV_ERR_LE | \
- IXGBE_RXDADV_ERR_PE | \
- IXGBE_RXDADV_ERR_OSE | \
- IXGBE_RXDADV_ERR_USE)
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
/* Multicast bit mask */
#define IXGBE_MCSTCTRL_MFE 0x4
@@ -2393,9 +2393,9 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
+ IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
+ IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2435,10 +2435,10 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
+ IXGBE_LINK_SPEED_10GB_FULL)
#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
- IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
/* Flow control parameters */
struct ixgbe_fc_info {
u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
- u32 low_water; /* Flow Control Low-water */
+ u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
@@ -2840,7 +2840,7 @@ struct ixgbe_hw;
/* iterator type for walking multicast address lists */
typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq);
+ u32 *vmdq);
/* Function pointer table */
struct ixgbe_eeprom_operations {
@@ -2887,7 +2887,7 @@ struct ixgbe_mac_operations {
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
- bool *);
+ bool *);
/* Packet Buffer Manipulation */
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 188a5974b85c..40dd798e1290 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -81,7 +81,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
}
/**
@@ -155,7 +155,7 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -166,7 +166,7 @@ mac_reset_top:
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
+ &hw->mac.wwpn_prefix);
reset_hw_out:
return status;
@@ -237,9 +237,9 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -712,8 +712,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
udelay(50);
}
} else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
+ hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
}
return status;
@@ -813,7 +812,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
.get_media_type = &ixgbe_get_media_type_X540,
.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_X540,
+ &ixgbe_get_supported_physical_layer_X540,
.enable_rx_dma = &ixgbe_enable_rx_dma_generic,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 1baecb60f065..d420f124633f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -135,8 +135,8 @@ static int ixgbevf_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
@@ -813,5 +813,5 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+ netdev->ethtool_ops = &ixgbevf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de2793b06305..75467f83772c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b7b8d74c22d9..b151a949f352 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -42,6 +42,7 @@
#include <linux/dma-mapping.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <net/tso.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
@@ -179,10 +180,18 @@ static char mv643xx_eth_driver_version[] = "1.4";
* Misc definitions.
*/
#define DEFAULT_RX_QUEUE_SIZE 128
-#define DEFAULT_TX_QUEUE_SIZE 256
+#define DEFAULT_TX_QUEUE_SIZE 512
#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+#define TSO_HEADER_SIZE 128
+/* Max number of allowed TCP segments for software TSO */
+#define MV643XX_MAX_TSO_SEGS 100
+#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_dma) && \
+ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
/*
* RX/TX descriptors.
*/
@@ -250,6 +259,7 @@ struct tx_desc {
#define GEN_TCP_UDP_CHECKSUM 0x00020000
#define UDP_FRAME 0x00010000
#define MAC_HDR_EXTRA_4_BYTES 0x00008000
+#define GEN_TCP_UDP_CHK_FULL 0x00000400
#define MAC_HDR_EXTRA_8_BYTES 0x00000200
#define TX_IHL_SHIFT 11
@@ -345,6 +355,12 @@ struct tx_queue {
int tx_curr_desc;
int tx_used_desc;
+ int tx_stop_threshold;
+ int tx_wake_threshold;
+
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
+
struct tx_desc *tx_desc_area;
dma_addr_t tx_desc_dma;
int tx_desc_area_size;
@@ -491,7 +507,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
if (netif_tx_queue_stopped(nq)) {
__netif_tx_lock(nq, smp_processor_id());
- if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
+ if (txq->tx_desc_count <= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
__netif_tx_unlock(nq);
}
@@ -661,6 +677,198 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
return 0;
}
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+ return (__force __be16)sum;
+}
+
+static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
+ u16 *l4i_chk, u32 *command, int length)
+{
+ int ret;
+ u32 cmd = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int hdr_len;
+ int tag_bytes;
+
+ BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_8021Q));
+
+ hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+ tag_bytes = hdr_len - ETH_HLEN;
+
+ if (length - hdr_len > mp->shared->tx_csum_limit ||
+ unlikely(tag_bytes & ~12)) {
+ ret = skb_checksum_help(skb);
+ if (!ret)
+ goto no_csum;
+ return ret;
+ }
+
+ if (tag_bytes & 4)
+ cmd |= MAC_HDR_EXTRA_4_BYTES;
+ if (tag_bytes & 8)
+ cmd |= MAC_HDR_EXTRA_8_BYTES;
+
+ cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
+ GEN_IP_V4_CHECKSUM |
+ ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+
+ /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
+ * it seems we don't need to pass the initial checksum. */
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ cmd |= UDP_FRAME;
+ *l4i_chk = 0;
+ break;
+ case IPPROTO_TCP:
+ *l4i_chk = 0;
+ break;
+ default:
+ WARN(1, "protocol not supported");
+ }
+ } else {
+no_csum:
+ /* Errata BTS #50, IHL must be 5 if no HW checksum */
+ cmd |= 5 << TX_IHL_SHIFT;
+ }
+ *command = cmd;
+ return 0;
+}
+
+static inline int
+txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
+ struct sk_buff *skb, char *data, int length,
+ bool last_tcp, bool is_last)
+{
+ int tx_index;
+ u32 cmd_sts;
+ struct tx_desc *desc;
+
+ tx_index = txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+ desc = &txq->tx_desc_area[tx_index];
+
+ desc->l4i_chk = 0;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+ length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
+ WARN(1, "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
+
+ cmd_sts = BUFFER_OWNED_BY_DMA;
+ if (last_tcp) {
+ /* last descriptor in the TCP packet */
+ cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
+ /* last descriptor in SKB */
+ if (is_last)
+ cmd_sts |= TX_ENABLE_INTERRUPT;
+ }
+ desc->cmd_sts = cmd_sts;
+ return 0;
+}
+
+static inline void
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tx_index;
+ struct tx_desc *desc;
+ int ret;
+ u32 cmd_csum = 0;
+ u16 l4i_chk = 0;
+
+ tx_index = txq->tx_curr_desc;
+ desc = &txq->tx_desc_area[tx_index];
+
+ ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
+ if (ret)
+ WARN(1, "failed to prepare checksum!");
+
+ /* Should we set this? Can't use the value from skb_tx_csum()
+ * as it's not the correct initial L4 checksum to use. */
+ desc->l4i_chk = 0;
+
+ desc->byte_cnt = hdr_len;
+ desc->buf_ptr = txq->tso_hdrs_dma +
+ txq->tx_curr_desc * TSO_HEADER_SIZE;
+ desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
+ GEN_CRC;
+
+ txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+}
+
+static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int total_len, data_left, ret;
+ int desc_count = 0;
+ struct tso_t tso;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ /* Count needed descriptors */
+ if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
+ netdev_dbg(dev, "not enough descriptors for TSO!\n");
+ return -EBUSY;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ desc_count++;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ txq_put_hdr_tso(skb, txq, data_left);
+
+ while (data_left > 0) {
+ int size;
+ desc_count++;
+
+ size = min_t(int, tso.size, data_left);
+ ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
+ size == data_left,
+ total_len == 0);
+ if (ret)
+ goto err_release;
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ }
+
+ __skb_queue_tail(&txq->tx_skb, skb);
+ skb_tx_timestamp(skb);
+
+ /* clear TX_END status */
+ mp->work_tx_end &= ~(1 << txq->index);
+
+ /* ensure all descriptors are written before poking hardware */
+ wmb();
+ txq_enable(txq);
+ txq->tx_desc_count += desc_count;
+ return 0;
+err_release:
+ /* TODO: Release all used data descriptors; header descriptors must not
+ * be DMA-unmapped.
+ */
+ return ret;
+}
+
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -671,8 +879,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
skb_frag_t *this_frag;
int tx_index;
struct tx_desc *desc;
+ void *addr;
this_frag = &skb_shinfo(skb)->frags[frag];
+ addr = page_address(this_frag->page.p) + this_frag->page_offset;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
@@ -692,19 +902,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0;
desc->byte_cnt = skb_frag_size(this_frag);
- desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
- this_frag, 0,
- skb_frag_size(this_frag),
- DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
+ desc->byte_cnt, DMA_TO_DEVICE);
}
}
-static inline __be16 sum16_as_be(__sum16 sum)
-{
- return (__force __be16)sum;
-}
-
-static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
+static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
+ struct net_device *dev)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -712,54 +916,22 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
struct tx_desc *desc;
u32 cmd_sts;
u16 l4i_chk;
- int length;
+ int length, ret;
- cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+ cmd_sts = 0;
l4i_chk = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int hdr_len;
- int tag_bytes;
-
- BUG_ON(skb->protocol != htons(ETH_P_IP) &&
- skb->protocol != htons(ETH_P_8021Q));
-
- hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
- tag_bytes = hdr_len - ETH_HLEN;
- if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
- unlikely(tag_bytes & ~12)) {
- if (skb_checksum_help(skb) == 0)
- goto no_csum;
- dev_kfree_skb_any(skb);
- return 1;
- }
-
- if (tag_bytes & 4)
- cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
- if (tag_bytes & 8)
- cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
-
- cmd_sts |= GEN_TCP_UDP_CHECKSUM |
- GEN_IP_V4_CHECKSUM |
- ip_hdr(skb)->ihl << TX_IHL_SHIFT;
-
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_UDP:
- cmd_sts |= UDP_FRAME;
- l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
- break;
- case IPPROTO_TCP:
- l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
- break;
- default:
- BUG();
- }
- } else {
-no_csum:
- /* Errata BTS #50, IHL must be 5 if no HW checksum */
- cmd_sts |= 5 << TX_IHL_SHIFT;
+ if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
+ if (net_ratelimit())
+ netdev_err(dev, "tx queue full?!\n");
+ return -EBUSY;
}
+ ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
+ if (ret)
+ return ret;
+ cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
@@ -801,7 +973,7 @@ no_csum:
static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- int length, queue;
+ int length, queue, ret;
struct tx_queue *txq;
struct netdev_queue *nq;
@@ -810,30 +982,26 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
nq = netdev_get_tx_queue(dev, queue);
if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
- txq->tx_dropped++;
netdev_printk(KERN_DEBUG, dev,
"failed to linearize skb with tiny unaligned fragment\n");
return NETDEV_TX_BUSY;
}
- if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
- if (net_ratelimit())
- netdev_err(dev, "tx queue full?!\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
length = skb->len;
- if (!txq_submit_skb(txq, skb)) {
- int entries_left;
-
+ if (skb_is_gso(skb))
+ ret = txq_submit_tso(txq, skb, dev);
+ else
+ ret = txq_submit_skb(txq, skb, dev);
+ if (!ret) {
txq->tx_bytes += length;
txq->tx_packets++;
- entries_left = txq->tx_ring_size - txq->tx_desc_count;
- if (entries_left < MAX_SKB_FRAGS + 1)
+ if (txq->tx_desc_count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ } else {
+ txq->tx_dropped++;
+ dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
@@ -907,14 +1075,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
mp->dev->stats.tx_errors++;
}
- if (cmd_sts & TX_FIRST_DESC) {
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr))
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
- } else {
- dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
- }
-
dev_kfree_skb(skb);
}
@@ -1010,8 +1173,9 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
/* mii management interface *************************************************/
-static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
+static void mv643xx_eth_adjust_link(struct net_device *dev)
{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
u32 autoneg_disable = FORCE_LINK_PASS |
DISABLE_AUTO_NEG_SPEED_GMII |
@@ -1387,7 +1551,7 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ret = phy_ethtool_sset(mp->phy, cmd);
if (!ret)
- mv643xx_adjust_pscr(mp);
+ mv643xx_eth_adjust_link(dev);
return ret;
}
@@ -1456,7 +1620,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
return -EINVAL;
mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
- mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
+ mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
+ MV643XX_MAX_SKB_DESCS * 2, 4096);
+ if (mp->tx_ring_size != er->tx_pending)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ mp->tx_ring_size, er->tx_pending);
if (netif_running(dev)) {
mv643xx_eth_stop(dev);
@@ -1832,6 +2000,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
txq->tx_ring_size = mp->tx_ring_size;
+ /* A queue must always have room for at least one skb.
+ * Therefore, stop the queue when the free entries reaches
+ * the maximum number of descriptors per skb.
+ */
+ txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
+ txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
txq->tx_desc_count = 0;
txq->tx_curr_desc = 0;
txq->tx_used_desc = 0;
@@ -1871,6 +2046,15 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct tx_desc);
}
+ /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+ txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_dma, GFP_KERNEL);
+ if (txq->tso_hdrs == NULL) {
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+ return -ENOMEM;
+ }
skb_queue_head_init(&txq->tx_skb);
return 0;
@@ -1891,6 +2075,10 @@ static void txq_deinit(struct tx_queue *txq)
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
+ if (txq->tso_hdrs)
+ dma_free_coherent(mp->dev->dev.parent,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs, txq->tso_hdrs_dma);
}
@@ -2303,7 +2491,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ret = phy_mii_ioctl(mp->phy, ifr, cmd);
if (!ret)
- mv643xx_adjust_pscr(mp);
+ mv643xx_eth_adjust_link(dev);
return ret;
}
@@ -2678,6 +2866,7 @@ static void set_params(struct mv643xx_eth_private *mp,
struct mv643xx_eth_platform_data *pd)
{
struct net_device *dev = mp->dev;
+ unsigned int tx_ring_size;
if (is_valid_ether_addr(pd->mac_addr))
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
@@ -2692,22 +2881,22 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->rxq_count = pd->rx_queue_count ? : 1;
- mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
+ tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
if (pd->tx_queue_size)
- mp->tx_ring_size = pd->tx_queue_size;
+ tx_ring_size = pd->tx_queue_size;
+
+ mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
+ MV643XX_MAX_SKB_DESCS * 2, 4096);
+ if (mp->tx_ring_size != tx_ring_size)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ mp->tx_ring_size, tx_ring_size);
+
mp->tx_desc_sram_addr = pd->tx_sram_addr;
mp->tx_desc_sram_size = pd->tx_sram_size;
mp->txq_count = pd->tx_queue_count ? : 1;
}
-static void mv643xx_eth_adjust_link(struct net_device *dev)
-{
- struct mv643xx_eth_private *mp = netdev_priv(dev);
-
- mv643xx_adjust_pscr(mp);
-}
-
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
@@ -2889,7 +3078,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (err)
goto out;
- SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
+ dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
init_pscr(mp, pd->speed, pd->duplex);
@@ -2921,11 +3110,14 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ dev->vlan_features = dev->features;
+
+ dev->features |= NETIF_F_RXCSUM;
+ dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9d5ced263a5e..fc2fb25343f4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -195,11 +195,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
return -ENODEV;
}
- bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
- if (!bus) {
- dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+ bus = devm_mdiobus_alloc_size(&pdev->dev,
+ sizeof(struct orion_mdio_dev));
+ if (!bus)
return -ENOMEM;
- }
bus->name = "orion_mdio_bus";
bus->read = orion_mdio_read;
@@ -208,11 +207,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev_name(&pdev->dev));
bus->parent = &pdev->dev;
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
- if (!bus->irq) {
- mdiobus_free(bus);
+ bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int),
+ GFP_KERNEL);
+ if (!bus->irq)
return -ENOMEM;
- }
for (i = 0; i < PHY_MAX_ADDR; i++)
bus->irq[i] = PHY_POLL;
@@ -264,8 +262,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
out_mdio:
if (!IS_ERR(dev->clk))
clk_disable_unprepare(dev->clk);
- kfree(bus->irq);
- mdiobus_free(bus);
return ret;
}
@@ -276,8 +272,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
mdiobus_unregister(bus);
- kfree(bus->irq);
- mdiobus_free(bus);
if (!IS_ERR(dev->clk))
clk_disable_unprepare(dev->clk);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 14786c8bf99e..45beca17fa50 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -23,6 +23,7 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/io.h>
+#include <net/tso.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
@@ -218,9 +219,6 @@
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
-/* Napi polling weight */
-#define MVNETA_RX_POLL_WEIGHT 64
-
/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
* supported by this driver) or is filled automatically by zeroes on
@@ -244,12 +242,20 @@
#define MVNETA_TX_MTU_MAX 0x3ffff
+/* TSO header size */
+#define TSO_HEADER_SIZE 128
+
/* Max number of Rx descriptors */
#define MVNETA_MAX_RXD 128
/* Max number of Tx descriptors */
#define MVNETA_MAX_TXD 532
+/* Max number of allowed TCP segments for software TSO */
+#define MVNETA_MAX_TSO_SEGS 100
+
+#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
/* descriptor aligned size */
#define MVNETA_DESC_ALIGNED_SIZE 32
@@ -258,6 +264,10 @@
ETH_HLEN + ETH_FCS_LEN, \
MVNETA_CPU_D_CACHE_LINE_SIZE)
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_phys) && \
+ (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
+
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
struct mvneta_pcpu_stats {
@@ -279,9 +289,6 @@ struct mvneta_port {
u32 cause_rx_tx;
struct napi_struct napi;
- /* Napi weight */
- int weight;
-
/* Core clock */
struct clk *clk;
u8 mcast_count[256];
@@ -390,6 +397,8 @@ struct mvneta_tx_queue {
* descriptor ring
*/
int count;
+ int tx_stop_threshold;
+ int tx_wake_threshold;
/* Array of transmitted skb */
struct sk_buff **tx_skb;
@@ -413,6 +422,12 @@ struct mvneta_tx_queue {
/* Index of the next TX DMA descriptor to process */
int next_desc_to_proc;
+
+ /* DMA buffers for TSO headers */
+ char *tso_hdrs;
+
+ /* DMA address of TSO headers */
+ dma_addr_t tso_hdrs_phys;
};
struct mvneta_rx_queue {
@@ -441,7 +456,10 @@ struct mvneta_rx_queue {
int next_desc_to_proc;
};
-static int rxq_number = 8;
+/* The hardware supports eight (8) rx queues, but we are only allowing
+ * the first one to be used. Therefore, let's just allocate one queue.
+ */
+static int rxq_number = 1;
static int txq_number = 8;
static int rxq_def;
@@ -1277,11 +1295,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
mvneta_txq_inc_get(txq);
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
if (!skb)
continue;
-
- dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -1302,7 +1321,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
txq->count -= tx_done;
if (netif_tx_queue_stopped(nq)) {
- if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
+ if (txq->count <= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
}
}
@@ -1519,14 +1538,134 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
return rx_done;
}
+static inline void
+mvneta_tso_put_hdr(struct sk_buff *skb,
+ struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+{
+ struct mvneta_tx_desc *tx_desc;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ txq->tx_skb[txq->txq_put_index] = NULL;
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = hdr_len;
+ tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+ tx_desc->command |= MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+ txq->txq_put_index * TSO_HEADER_SIZE;
+ mvneta_txq_inc_put(txq);
+}
+
+static inline int
+mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
+ struct sk_buff *skb, char *data, int size,
+ bool last_tcp, bool is_last)
+{
+ struct mvneta_tx_desc *tx_desc;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = size;
+ tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
+ size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ tx_desc->buf_phys_addr))) {
+ mvneta_txq_desc_put(txq);
+ return -ENOMEM;
+ }
+
+ tx_desc->command = 0;
+ txq->tx_skb[txq->txq_put_index] = NULL;
+
+ if (last_tcp) {
+ /* last descriptor in the TCP packet */
+ tx_desc->command = MVNETA_TXD_L_DESC;
+
+ /* last descriptor in SKB */
+ if (is_last)
+ txq->tx_skb[txq->txq_put_index] = skb;
+ }
+ mvneta_txq_inc_put(txq);
+ return 0;
+}
+
+static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
+ struct mvneta_tx_queue *txq)
+{
+ int total_len, data_left;
+ int desc_count = 0;
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct tso_t tso;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int i;
+
+ /* Count needed descriptors */
+ if ((txq->count + tso_count_descs(skb)) >= txq->size)
+ return 0;
+
+ if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ pr_info("*** Is this even possible???!?!?\n");
+ return 0;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ desc_count++;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+
+ mvneta_tso_put_hdr(skb, pp, txq);
+
+ while (data_left > 0) {
+ int size;
+ desc_count++;
+
+ size = min_t(int, tso.size, data_left);
+
+ if (mvneta_tso_put_data(dev, txq, skb,
+ tso.data, size,
+ size == data_left,
+ total_len == 0))
+ goto err_release;
+ data_left -= size;
+
+ tso_build_data(skb, &tso, size);
+ }
+ }
+
+ return desc_count;
+
+err_release:
+ /* Release all used data descriptors; header descriptors must not
+ * be DMA-unmapped.
+ */
+ for (i = desc_count - 1; i >= 0; i--) {
+ struct mvneta_tx_desc *tx_desc = txq->descs + i;
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ }
+ return 0;
+}
+
/* Handle tx fragmentation processing */
static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
struct mvneta_tx_queue *txq)
{
struct mvneta_tx_desc *tx_desc;
- int i;
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = page_address(frag->page.p) + frag->page_offset;
@@ -1543,20 +1682,16 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
goto error;
}
- if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+ if (i == nr_frags - 1) {
/* Last descriptor */
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
-
txq->tx_skb[txq->txq_put_index] = skb;
-
- mvneta_txq_inc_put(txq);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
-
txq->tx_skb[txq->txq_put_index] = NULL;
- mvneta_txq_inc_put(txq);
}
+ mvneta_txq_inc_put(txq);
}
return 0;
@@ -1584,15 +1719,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
struct mvneta_tx_desc *tx_desc;
- struct netdev_queue *nq;
int frags = 0;
u32 tx_cmd;
if (!netif_running(dev))
goto out;
+ if (skb_is_gso(skb)) {
+ frags = mvneta_tx_tso(skb, dev, txq);
+ goto out;
+ }
+
frags = skb_shinfo(skb)->nr_frags + 1;
- nq = netdev_get_tx_queue(dev, txq_id);
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -1635,15 +1773,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
}
}
- txq->count += frags;
- mvneta_txq_pend_desc_add(pp, txq, frags);
-
- if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
- netif_tx_stop_queue(nq);
-
out:
if (frags > 0) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+ txq->count += frags;
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+
+ if (txq->count >= txq->tx_stop_threshold)
+ netif_tx_stop_queue(nq);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
@@ -2003,7 +2142,7 @@ static void mvneta_tx_reset(struct mvneta_port *pp)
{
int queue;
- /* free the skb's in the hal tx ring */
+ /* free the skb's in the tx ring */
for (queue = 0; queue < txq_number; queue++)
mvneta_txq_done_force(pp, &pp->txqs[queue]);
@@ -2081,6 +2220,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
{
txq->size = pp->tx_ring_size;
+ /* A queue must always have room for at least one skb.
+ * Therefore, stop the queue when the free entries reaches
+ * the maximum number of descriptors per skb.
+ */
+ txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
+ txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
+
/* Allocate memory for TX descriptors */
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2109,6 +2256,18 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs, txq->descs_phys);
return -ENOMEM;
}
+
+ /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+ txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
+ txq->size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_phys, GFP_KERNEL);
+ if (txq->tso_hdrs == NULL) {
+ kfree(txq->tx_skb);
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+ return -ENOMEM;
+ }
mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
return 0;
@@ -2120,6 +2279,10 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
{
kfree(txq->tx_skb);
+ if (txq->tso_hdrs)
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * TSO_HEADER_SIZE,
+ txq->tso_hdrs, txq->tso_hdrs_phys);
if (txq->descs)
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2279,24 +2442,28 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
return 0;
/* The interface is running, so we have to force a
- * reallocation of the RXQs
+ * reallocation of the queues
*/
mvneta_stop_dev(pp);
mvneta_cleanup_txqs(pp);
mvneta_cleanup_rxqs(pp);
- pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
- netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
+ netdev_err(dev, "unable to setup rxqs after MTU change\n");
return ret;
}
- mvneta_setup_txqs(pp);
+ ret = mvneta_setup_txqs(pp);
+ if (ret) {
+ netdev_err(dev, "unable to setup txqs after MTU change\n");
+ return ret;
+ }
mvneta_start_dev(pp);
mvneta_port_up(pp);
@@ -2323,22 +2490,19 @@ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
{
struct mvneta_port *pp = netdev_priv(dev);
- u8 *mac = addr + 2;
- int i;
-
- if (netif_running(dev))
- return -EBUSY;
+ struct sockaddr *sockaddr = addr;
+ int ret;
+ ret = eth_prepare_mac_addr_change(dev, addr);
+ if (ret < 0)
+ return ret;
/* Remove previous address table entry */
mvneta_mac_addr_set(pp, dev->dev_addr, -1);
/* Set new addr in hw */
- mvneta_mac_addr_set(pp, mac, rxq_def);
-
- /* Set addr in the device */
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = mac[i];
+ mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
+ eth_commit_mac_addr_change(dev, addr);
return 0;
}
@@ -2433,8 +2597,6 @@ static int mvneta_open(struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
int ret;
- mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
-
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2600,8 +2762,12 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
return -EINVAL;
pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
ring->rx_pending : MVNETA_MAX_RXD;
- pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
- ring->tx_pending : MVNETA_MAX_TXD;
+
+ pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
+ MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
+ if (pp->tx_ring_size != ring->tx_pending)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ pp->tx_ring_size, ring->tx_pending);
if (netif_running(dev)) {
mvneta_stop(dev);
@@ -2638,7 +2804,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
};
/* Initialize hw */
-static int mvneta_init(struct mvneta_port *pp, int phy_addr)
+static int mvneta_init(struct device *dev, struct mvneta_port *pp)
{
int queue;
@@ -2648,8 +2814,8 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
/* Set port default values */
mvneta_defaults_set(pp);
- pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
- GFP_KERNEL);
+ pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
+ GFP_KERNEL);
if (!pp->txqs)
return -ENOMEM;
@@ -2661,12 +2827,10 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
}
- pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
- GFP_KERNEL);
- if (!pp->rxqs) {
- kfree(pp->txqs);
+ pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
+ GFP_KERNEL);
+ if (!pp->rxqs)
return -ENOMEM;
- }
/* Create Rx descriptor rings */
for (queue = 0; queue < rxq_number; queue++) {
@@ -2680,12 +2844,6 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
return 0;
}
-static void mvneta_deinit(struct mvneta_port *pp)
-{
- kfree(pp->txqs);
- kfree(pp->rxqs);
-}
-
/* platform glue : initialize decoding windows */
static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
const struct mbus_dram_target_info *dram)
@@ -2768,7 +2926,6 @@ static int mvneta_probe(struct platform_device *pdev)
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
- u32 phy_addr;
struct mvneta_port *pp;
struct net_device *dev;
const char *dt_mac_addr;
@@ -2797,9 +2954,22 @@ static int mvneta_probe(struct platform_device *pdev)
phy_node = of_parse_phandle(dn, "phy", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "no associated PHY\n");
- err = -ENODEV;
- goto err_free_irq;
+ if (!of_phy_is_fixed_link(dn)) {
+ dev_err(&pdev->dev, "no PHY specified\n");
+ err = -ENODEV;
+ goto err_free_irq;
+ }
+
+ err = of_phy_register_fixed_link(dn);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot register fixed PHY\n");
+ goto err_free_irq;
+ }
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ phy_node = dn;
}
phy_mode = of_get_phy_mode(dn);
@@ -2813,11 +2983,9 @@ static int mvneta_probe(struct platform_device *pdev)
dev->watchdog_timeo = 5 * HZ;
dev->netdev_ops = &mvneta_netdev_ops;
- SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
+ dev->ethtool_ops = &mvneta_eth_tool_ops;
pp = netdev_priv(dev);
-
- pp->weight = MVNETA_RX_POLL_WEIGHT;
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
@@ -2864,33 +3032,32 @@ static int mvneta_probe(struct platform_device *pdev)
pp->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
- err = mvneta_init(pp, phy_addr);
- if (err < 0) {
- dev_err(&pdev->dev, "can't init eth hal\n");
+ err = mvneta_init(&pdev->dev, pp);
+ if (err < 0)
goto err_free_stats;
- }
err = mvneta_port_power_up(pp, phy_mode);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- goto err_deinit;
+ goto err_free_stats;
}
dram_target_info = mv_mbus_dram_info();
if (dram_target_info)
mvneta_conf_mbus_windows(pp, dram_target_info);
- netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+ netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ dev->hw_features |= dev->features;
+ dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
- goto err_deinit;
+ goto err_free_stats;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -2900,8 +3067,6 @@ static int mvneta_probe(struct platform_device *pdev)
return 0;
-err_deinit:
- mvneta_deinit(pp);
err_free_stats:
free_percpu(pp->stats);
err_clk:
@@ -2920,7 +3085,6 @@ static int mvneta_remove(struct platform_device *pdev)
struct mvneta_port *pp = netdev_priv(dev);
unregister_netdev(dev);
- mvneta_deinit(pp);
clk_disable_unprepare(pp->clk);
free_percpu(pp->stats);
irq_dispose_mapping(dev->irq);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index b358c2f6f4bd..8f5aa7c62b18 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1488,7 +1488,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
dev->netdev_ops = &pxa168_eth_netdev_ops;
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
- SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+ dev->ethtool_ops = &pxa168_ethtool_ops;
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 7f81ae66cc89..e912b6887d40 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4199,6 +4199,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
},
},
+ {
+ .ident = "FUJITSU SIEMENS A8NE-FM",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")
+ },
+ },
{}
};
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b81106451a0a..69693384b58c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4760,7 +4760,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
SET_NETDEV_DEV(dev, &hw->pdev->dev);
dev->irq = hw->pdev->irq;
- SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
+ dev->ethtool_ops = &sky2_ethtool_ops;
dev->watchdog_timeo = TX_WATCHDOG;
dev->netdev_ops = &sky2_netdev_ops[port];
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index c3ad464d0627..b0297da50304 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -171,7 +171,7 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
*/
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf)
+ struct mlx4_buf *buf, gfp_t gfp)
{
dma_addr_t t;
@@ -180,7 +180,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
+ size, &t, gfp);
if (!buf->direct.buf)
return -ENOMEM;
@@ -200,14 +200,14 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
+ gfp);
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
- &t, GFP_KERNEL);
+ &t, gfp);
if (!buf->page_list[i].buf)
goto err_free;
@@ -218,7 +218,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
if (BITS_PER_LONG == 64) {
struct page **pages;
- pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+ pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
@@ -260,11 +260,12 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
-static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
+ gfp_t gfp)
{
struct mlx4_db_pgdir *pgdir;
- pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
+ pgdir = kzalloc(sizeof *pgdir, gfp);
if (!pgdir)
return NULL;
@@ -272,7 +273,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1;
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
- &pgdir->db_dma, GFP_KERNEL);
+ &pgdir->db_dma, gfp);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
@@ -312,7 +313,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
@@ -324,7 +325,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
goto out;
- pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
+ pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp);
if (!pgdir) {
ret = -ENOMEM;
goto out;
@@ -376,13 +377,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
{
int err;
- err = mlx4_db_alloc(dev, &wqres->db, 1);
+ err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
if (err)
return err;
*wqres->db.db = 0;
- err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
+ err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
if (err)
goto err_db;
@@ -391,7 +392,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
+ err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
if (err)
goto err_mtt;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 92d3249f63f1..5d940a26055c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
/* First, verify that the master reports correct status */
if (comm_pending(dev)) {
- mlx4_warn(dev, "Communication channel is not idle."
- "my toggle is %d (cmd:0x%x)\n",
+ mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
priv->cmd.comm_toggle, cmd);
return -EAGAIN;
}
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param =
be64_to_cpu(vhcr->out_param);
else {
- mlx4_err(dev, "response expected while"
- "output mailbox is NULL for "
- "command 0x%x\n", op);
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param =
be64_to_cpu(vhcr->out_param);
else {
- mlx4_err(dev, "response expected while"
- "output mailbox is NULL for "
- "command 0x%x\n", op);
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
ret = mlx4_status_to_errno(vhcr->status);
} else
- mlx4_err(dev, "failed execution of VHCR_POST command"
- "opcode 0x%x\n", op);
+ mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
+ op);
}
mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -476,6 +473,13 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
goto out;
}
+ if (out_is_imm && !out_param) {
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
+ err = -EINVAL;
+ goto out;
+ }
+
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
if (err)
@@ -554,6 +558,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
+ if (out_is_imm && !out_param) {
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
+ err = -EINVAL;
+ goto out;
+ }
+
init_completion(&context->done);
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@ -625,9 +636,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
(slave & ~0x7f) | (size & 0xff)) {
- mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
- "master_addr:0x%llx slave_id:%d size:%d\n",
- slave_addr, master_addr, slave, size);
+ mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
+ slave_addr, master_addr, slave, size);
return -EINVAL;
}
@@ -705,20 +715,28 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
struct ib_smp *smp = inbox->buf;
u32 index;
u8 port;
+ u8 opcode_modifier;
u16 *table;
int err;
int vidx, pidx;
+ int network_view;
struct mlx4_priv *priv = mlx4_priv(dev);
struct ib_smp *outsmp = outbox->buf;
__be16 *outtab = (__be16 *)(outsmp->data);
__be32 slave_cap_mask;
__be64 slave_node_guid;
+
port = vhcr->in_modifier;
+ /* network-view bit is for driver use only, and should not be passed to FW */
+ opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
+ network_view = !!(vhcr->op_modifier & 0x8);
+
if (smp->base_version == 1 &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
smp->class_version == 1) {
- if (smp->method == IB_MGMT_METHOD_GET) {
+ /* host view is paravirtualized */
+ if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
index = be32_to_cpu(smp->attr_mod);
if (port < 1 || port > dev->caps.num_ports)
@@ -743,7 +761,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
/*get the slave specific caps:*/
/*do the command */
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, vhcr->op_modifier,
+ vhcr->in_modifier, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
/* modify the response for slaves */
if (!err && slave != mlx4_master_func_num(dev)) {
@@ -760,7 +778,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
smp->attr_mod = cpu_to_be32(slave / 8);
/* execute cmd */
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, vhcr->op_modifier,
+ vhcr->in_modifier, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (!err) {
/* if needed, move slave gid to index 0 */
@@ -774,7 +792,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
}
if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, vhcr->op_modifier,
+ vhcr->in_modifier, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (!err) {
slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
@@ -784,19 +802,24 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
}
}
}
+
+ /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
+ * These are the MADs used by ib verbs (such as ib_query_gids).
+ */
if (slave != mlx4_master_func_num(dev) &&
- ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
- (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
- smp->method == IB_MGMT_METHOD_SET))) {
- mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
- "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
- slave, smp->method, smp->mgmt_class,
- be16_to_cpu(smp->attr_id));
- return -EPERM;
+ !mlx4_vf_smi_enabled(dev, slave, port)) {
+ if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
+ smp->method == IB_MGMT_METHOD_GET) || network_view) {
+ mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
+ slave, smp->method, smp->mgmt_class,
+ network_view ? "Network" : "Host",
+ be16_to_cpu(smp->attr_id));
+ return -EPERM;
+ }
}
- /*default:*/
+
return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, vhcr->op_modifier,
+ vhcr->in_modifier, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
@@ -1409,8 +1432,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
ALIGN(sizeof(struct mlx4_vhcr_cmd),
MLX4_ACCESS_MEM_ALIGN), 1);
if (ret) {
- mlx4_err(dev, "%s:Failed reading vhcr"
- "ret: 0x%x\n", __func__, ret);
+ mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
+ __func__, ret);
kfree(vhcr);
return ret;
}
@@ -1461,9 +1484,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
/* Apply permission and bound checks if applicable */
if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
- mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
- "checks for resource_id:%d\n", vhcr->op, slave,
- vhcr->in_modifier);
+ mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
+ vhcr->op, slave, vhcr->in_modifier);
vhcr_cmd->status = CMD_STAT_BAD_OP;
goto out_status;
}
@@ -1502,8 +1524,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
}
if (err) {
- mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
- " error:%d, status %d\n",
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
vhcr->op, slave, vhcr->errno, err);
vhcr_cmd->status = mlx4_errno_to_status(err);
goto out_status;
@@ -1537,8 +1558,8 @@ out_status:
__func__);
else if (vhcr->e_bit &&
mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
- mlx4_warn(dev, "Failed to generate command completion "
- "eqe for slave %d\n", slave);
+ mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
+ slave);
}
out:
@@ -1577,8 +1598,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
slave, port);
- mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan,
- vp_admin->default_qos, vp_admin->link_state);
+ mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
+ vp_admin->default_vlan, vp_admin->default_qos,
+ vp_admin->link_state);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
@@ -1591,7 +1613,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
&admin_vlan_ix);
if (err) {
kfree(work);
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
return err;
@@ -1600,7 +1622,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
- mlx4_dbg((&(priv->dev)),
+ mlx4_dbg(&priv->dev,
"alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_admin->default_vlan),
admin_vlan_ix, slave, port);
@@ -1653,6 +1675,8 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
for (port = min_port; port <= max_port; port++) {
if (!test_bit(port - 1, actv_ports.ports))
continue;
+ priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
+ priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
vp_oper->state = *vp_admin;
@@ -1661,12 +1685,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No vlan resorces slave %d, port %d\n",
slave, port);
return err;
}
- mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n",
+ mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_oper->state.default_vlan),
vp_oper->vlan_idx, slave, port);
}
@@ -1677,12 +1701,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
if (0 > vp_oper->mac_idx) {
err = vp_oper->mac_idx;
vp_oper->mac_idx = NO_INDX;
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No mac resorces slave %d, port %d\n",
slave, port);
return err;
}
- mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
+ mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
vp_oper->state.mac, vp_oper->mac_idx, slave, port);
}
}
@@ -1704,6 +1728,8 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
for (port = min_port; port <= max_port; port++) {
if (!test_bit(port - 1, actv_ports.ports))
continue;
+ priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
+ MLX4_VF_SMI_DISABLED;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (NO_INDX != vp_oper->vlan_idx) {
__mlx4_unregister_vlan(&priv->dev,
@@ -1731,8 +1757,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31;
if (toggle != slave_state[slave].comm_toggle) {
- mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
- "STATE COMPROMISIED ***\n", toggle, slave);
+ mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
+ toggle, slave);
goto reset_slave;
}
if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1759,8 +1785,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
/*command from slave in the middle of FLR*/
if (cmd != MLX4_COMM_CMD_RESET &&
MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
- mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
- "in the middle of FLR\n", slave, cmd);
+ mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
+ slave, cmd);
return;
}
@@ -1798,8 +1824,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_master_process_vhcr(dev, slave, NULL)) {
- mlx4_err(dev, "Failed processing vhcr for slave:%d,"
- " resetting slave.\n", slave);
+ mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
+ slave);
mutex_unlock(&priv->cmd.slave_cmd_mutex);
goto reset_slave;
}
@@ -1816,8 +1842,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
is_going_down = 1;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) {
- mlx4_warn(dev, "Slave is going down aborting command(%d)"
- " executing from slave:%d\n",
+ mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
cmd, slave);
return;
}
@@ -1880,10 +1905,9 @@ void mlx4_master_comm_channel(struct work_struct *work)
if (toggle != slt) {
if (master->slave_state[slave].comm_toggle
!= slt) {
- printk(KERN_INFO "slave %d out of sync."
- " read toggle %d, state toggle %d. "
- "Resynching.\n", slave, slt,
- master->slave_state[slave].comm_toggle);
+ pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
+ slave, slt,
+ master->slave_state[slave].comm_toggle);
master->slave_state[slave].comm_toggle =
slt;
}
@@ -1896,8 +1920,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
}
if (reported && reported != served)
- mlx4_warn(dev, "Got command event with bitmask from %d slaves"
- " but %d were served\n",
+ mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
reported, served);
if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1953,7 +1976,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
ioremap(pci_resource_start(dev->pdev, 2) +
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
if (!priv->mfunc.comm) {
- mlx4_err(dev, "Couldn't map communication vector.\n");
+ mlx4_err(dev, "Couldn't map communication vector\n");
goto err_vhcr;
}
@@ -2080,7 +2103,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) {
- mlx4_err(dev, "Couldn't map command register.\n");
+ mlx4_err(dev, "Couldn't map command register\n");
return -ENOMEM;
}
}
@@ -2481,11 +2504,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
ivf->mac[5] = ((s_info->mac) & 0xff);
- ivf->vlan = s_info->default_vlan;
- ivf->qos = s_info->default_qos;
- ivf->tx_rate = s_info->tx_rate;
- ivf->spoofchk = s_info->spoofchk;
- ivf->linkstate = s_info->link_state;
+ ivf->vlan = s_info->default_vlan;
+ ivf->qos = s_info->default_qos;
+ ivf->max_tx_rate = s_info->tx_rate;
+ ivf->min_tx_rate = 0;
+ ivf->spoofchk = s_info->spoofchk;
+ ivf->linkstate = s_info->link_state;
return 0;
}
@@ -2537,3 +2561,50 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
+
+int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (slave < 1 || slave >= dev->num_slaves ||
+ port < 1 || port > MLX4_MAX_PORTS)
+ return 0;
+
+ return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
+ MLX4_VF_SMI_ENABLED;
+}
+EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
+
+int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (slave == mlx4_master_func_num(dev))
+ return 1;
+
+ if (slave < 1 || slave >= dev->num_slaves ||
+ port < 1 || port > MLX4_MAX_PORTS)
+ return 0;
+
+ return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
+ MLX4_VF_SMI_ENABLED;
+}
+EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
+
+int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
+ int enabled)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (slave == mlx4_master_func_num(dev))
+ return 0;
+
+ if (slave < 1 || slave >= dev->num_slaves ||
+ port < 1 || port > MLX4_MAX_PORTS ||
+ enabled < 0 || enabled > 1)
+ return -EINVAL;
+
+ priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 0487121e4a0f..80f725228f5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -173,11 +173,11 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
if (*cqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &cq_table->table, *cqn);
+ err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
if (err)
goto err_put;
return 0;
@@ -293,6 +293,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
atomic_set(&cq->refcount, 1);
init_completion(&cq->free);
+ cq->irq = priv->eq_table.eq[cq->vector].irq;
+ cq->irq_affinity_change = false;
+
return 0;
err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index c2cd8d31bcad..4b2130760eed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&cq->vector)) {
cq->vector = (cq->ring + 1 + priv->port)
% mdev->dev->caps.num_comp_vectors;
- mlx4_warn(mdev, "Failed Assigning an EQ to "
- "%s ,Falling back to legacy EQ's\n",
+ mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name);
}
}
@@ -164,6 +163,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
} else {
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
+
+ err = irq_set_affinity_hint(cq->mcq.irq,
+ ring->affinity_mask);
+ if (err)
+ mlx4_warn(mdev, "Failed setting affinity hint\n");
+
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
}
@@ -180,8 +186,11 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- if (priv->mdev->dev->caps.comp_pool && cq->vector)
+ if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+ if (!cq->is_tx)
+ irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector);
+ }
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3e8d33605fe7..fa1a069e14e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -378,8 +378,8 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
cmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
if (trans_type > 0 && trans_type <= 0xC) {
@@ -564,7 +564,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
return priv->rx_ring_num;
}
-static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
+static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
@@ -582,8 +582,8 @@ static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
return err;
}
-static int mlx4_en_set_rxfh_indir(struct net_device *dev,
- const u32 *ring_index)
+static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+ const u8 *key)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
} else {
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
- en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+ en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
if (!qpn) {
- en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+ en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
}
err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
if (err) {
- en_err(priv, "Fail to attach network rule at location %d.\n",
+ en_err(priv, "Fail to attach network rule at location %d\n",
cmd->fs.location);
goto out_free_list;
}
@@ -1121,7 +1121,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int port_up;
+ int port_up = 0;
int err = 0;
if (channel->other_count || channel->combined_count ||
@@ -1151,7 +1151,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+ if (dev->num_tc)
+ mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
@@ -1223,8 +1224,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
- .get_rxfh_indir = mlx4_en_get_rxfh_indir,
- .set_rxfh_indir = mlx4_en_set_rxfh_indir,
+ .get_rxfh = mlx4_en_get_rxfh,
+ .set_rxfh = mlx4_en_set_rxfh,
.get_channels = mlx4_en_get_channels,
.set_channels = mlx4_en_set_channels,
.get_ts_info = mlx4_en_get_ts_info,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0c59d4fe7e3a..f953c1d7eae6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
MLX4_EN_MAX_TX_RING_P_UP);
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
- mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+ mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
params->udp_rss = 0;
}
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
if (!mdev->LSO_support)
- mlx4_warn(mdev, "LSO not supported, please upgrade to later "
- "FW version to enable LSO\n");
+ mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
/* Build device profile according to supplied module parameters */
err = mlx4_en_get_profile(mdev);
if (err) {
- mlx4_err(mdev, "Bad module parameters, aborting.\n");
+ mlx4_err(mdev, "Bad module parameters, aborting\n");
goto err_mr;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e4b1720c3d1..7d4fb7bf2593 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -130,7 +130,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
case IPPROTO_TCP:
return MLX4_NET_TRANS_RULE_ID_TCP;
default:
- return -EPROTONOSUPPORT;
+ return MLX4_NET_TRANS_RULE_NUM;
}
};
@@ -177,7 +177,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
- if (spec_tcp_udp.id < 0) {
+ if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
filter->ip_proto);
goto ignore;
@@ -770,11 +770,12 @@ static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
priv->dev->dev_addr, priv->prev_mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
- memcpy(priv->prev_mac, priv->dev->dev_addr,
- sizeof(priv->prev_mac));
} else
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
+ memcpy(priv->prev_mac, priv->dev->dev_addr,
+ sizeof(priv->prev_mac));
+
return err;
}
@@ -788,9 +789,8 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
-
mutex_lock(&mdev->state_lock);
+ memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
err = mlx4_en_do_set_mac(priv);
mutex_unlock(&mdev->state_lock);
@@ -1526,6 +1526,27 @@ static void mlx4_en_linkstate(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
+static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+{
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
+ int numa_node = priv->mdev->dev->numa_node;
+ int ret = 0;
+
+ if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
+ ring->affinity_mask);
+ if (ret)
+ free_cpumask_var(ring->affinity_mask);
+
+ return ret;
+}
+
+static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+{
+ free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
+}
int mlx4_en_start_port(struct net_device *dev)
{
@@ -1567,17 +1588,25 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_cq_init_lock(cq);
+ err = mlx4_en_init_affinity_hint(priv, i);
+ if (err) {
+ en_err(priv, "Failed preparing IRQ affinity hint\n");
+ goto cq_err;
+ }
+
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
+ mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
for (j = 0; j < cq->size; j++)
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- en_err(priv, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
+ mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
@@ -1615,7 +1644,7 @@ int mlx4_en_start_port(struct net_device *dev)
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- en_err(priv, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
@@ -1715,8 +1744,10 @@ rss_err:
mac_err:
mlx4_en_put_qp(priv);
cq_err:
- while (rx_index--)
+ while (rx_index--) {
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
+ mlx4_en_free_affinity_hint(priv, i);
+ }
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
@@ -1847,6 +1878,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
msleep(1);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
+
+ mlx4_en_free_affinity_hint(priv, i);
}
}
@@ -2539,7 +2572,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+ dev->ethtool_ops = &mlx4_en_ethtool_ops;
/*
* Set driver features
@@ -2594,8 +2627,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
prof->tx_pause, prof->tx_ppp,
prof->rx_pause, prof->rx_ppp);
if (err) {
- en_err(priv, "Failed setting port general configurations "
- "for port %d, with error %d\n", priv->port, err);
+ en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+ priv->port, err);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ba049ae88749..d2d415732d99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
ring->actual_size,
GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
- en_err(priv, "Failed to allocate "
- "enough rx buffers\n");
+ en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
} else {
new_size = rounddown_pow_of_two(ring->actual_size);
- en_warn(priv, "Only %d buffers allocated "
- "reducing ring size to %d",
+ en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
ring->actual_size, new_size);
goto reduce_rings;
}
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
- en_err(priv, "CQE completed in error - vendor "
- "syndrom:%d syndrom:%d\n",
- ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
- ((struct mlx4_err_cqe *) cqe)->syndrome);
+ en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+ ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
+ ((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
}
if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -898,10 +895,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
mlx4_en_cq_unlock_napi(cq);
/* If we used up all the quota - we're probably not done yet... */
- if (done == budget)
+ if (done == budget) {
INC_PERF_COUNTER(priv->pstats.napi_quota);
- else {
+ if (unlikely(cq->mcq.irq_affinity_change)) {
+ cq->mcq.irq_affinity_change = false;
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return 0;
+ }
+ } else {
/* Done for now */
+ cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
@@ -944,8 +948,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
- en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
- "num_frags:%d):\n", eff_mtu, priv->num_frags);
+ en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
+ eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
" frag:%d - size:%d prefix:%d align:%d stride:%d\n",
@@ -972,7 +976,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
if (!context)
return -ENOMEM;
- err = mlx4_qp_alloc(mdev->dev, qpn, qp);
+ err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
if (err) {
en_err(priv, "Failed to allocate qp #%x\n", qpn);
goto out;
@@ -1012,7 +1016,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
en_err(priv, "Failed reserving drop qpn\n");
return err;
}
- err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
+ err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
if (err) {
en_err(priv, "Failed allocating drop qp\n");
mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
@@ -1071,7 +1075,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
}
/* Configure RSS indirection qp */
- err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
goto rss_err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index dd1f6d346459..8be7483f8236 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -108,12 +108,12 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->buf = ring->wqres.buf.direct.buf;
- en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
- "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
- ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+ en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
+ ring, ring->buf, ring->size, ring->buf_size,
+ (unsigned long long) ring->wqres.buf.direct.map);
ring->qpn = qpn;
- err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
+ err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_map;
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
- en_dbg(DRV, priv, "working without blueflame (%d)", err);
+ en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
ring->bf.uar = &mdev->priv_uar;
ring->bf.uar->map = mdev->uar_map;
ring->bf_enabled = false;
@@ -474,9 +474,15 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done < budget) {
/* Done for now */
+ cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return done;
+ } else if (unlikely(cq->mcq.irq_affinity_change)) {
+ cq->mcq.irq_affinity_change = false;
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return 0;
}
return budget;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d501a2b0fb79..d954ec1eac17 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,6 +53,11 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
+struct mlx4_irq_notify {
+ void *arg;
+ struct irq_affinity_notify notify;
+};
+
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -152,14 +157,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
if (i != dev->caps.function &&
master->slave_state[i].active)
if (mlx4_GEN_EQE(dev, i, eqe))
- mlx4_warn(dev, "Failed to "
- " generate event "
- "for slave %d\n", i);
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ i);
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
- mlx4_warn(dev, "Failed to generate event "
- "for slave %d\n", slave);
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ slave);
}
++slave_eq->cons;
}
@@ -177,8 +181,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
if ((!!(s_eqe->owner & 0x80)) ^
(!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
- mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
- "No free EQE on slave events queue\n", slave);
+ mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
+ slave);
spin_unlock_irqrestore(&slave_eq->event_lock, flags);
return;
}
@@ -375,9 +379,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
}
break;
default:
- pr_err("%s: BUG!!! UNKNOWN state: "
- "slave:%d, port:%d\n", __func__, slave, port);
- goto out;
+ pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
+ __func__, slave, port);
+ goto out;
}
ret = mlx4_get_slave_port_state(dev, slave, port);
@@ -425,8 +429,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
for (i = 0 ; i < dev->num_slaves; i++) {
if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
- mlx4_dbg(dev, "mlx4_handle_slave_flr: "
- "clean slave: %d\n", i);
+ mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
+ i);
mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/
@@ -438,8 +442,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
- mlx4_warn(dev, "Failed to notify FW on "
- "FLR done (slave:%d)\n", i);
+ mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
+ i);
}
}
}
@@ -490,9 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.qp.qpn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
- mlx4_dbg(dev, "QP event %02x(%02x) on "
- "EQ %d at index %u: could "
- "not get slave id (%d)\n",
+ mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
@@ -520,23 +522,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
& 0xffffff,
&slave);
if (ret && ret != -ENOENT) {
- mlx4_warn(dev, "SRQ event %02x(%02x) "
- "on EQ %d at index %u: could"
- " not get slave id (%d)\n",
+ mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
- " event: %02x(%02x)\n", __func__,
- slave,
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event "
- "%02x(%02x) to slave:%d\n",
- __func__, eqe->type,
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
@@ -569,8 +567,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
if (i == mlx4_master_func_num(dev))
continue;
- mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
- " to slave: %d, port:%d\n",
+ mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
__func__, i, port);
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +631,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.cq_err.cqn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
- mlx4_dbg(dev, "CQ event %02x(%02x) on "
- "EQ %d at index %u: could "
- "not get slave id (%d)\n",
- eqe->type, eqe->subtype,
- eq->eqn, eq->cons_index, ret);
+ mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
break;
}
@@ -667,8 +662,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_COMM_CHANNEL:
if (!mlx4_is_master(dev)) {
- mlx4_warn(dev, "Received comm channel event "
- "for non master device\n");
+ mlx4_warn(dev, "Received comm channel event for non master device\n");
break;
}
memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +675,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_FLR_EVENT:
flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
if (!mlx4_is_master(dev)) {
- mlx4_warn(dev, "Non-master function received"
- "FLR event\n");
+ mlx4_warn(dev, "Non-master function received FLR event\n");
break;
}
@@ -711,22 +704,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
if (mlx4_is_master(dev))
for (i = 0; i < dev->num_slaves; i++) {
- mlx4_dbg(dev, "%s: Sending "
- "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
- " to slave: %d\n", __func__, i);
+ mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
+ __func__, i);
if (i == dev->caps.function)
continue;
mlx4_slave_event(dev, i, eqe);
}
- mlx4_err(dev, "Temperature Threshold was reached! "
- "Threshold: %d celsius degrees; "
- "Current Temperature: %d\n",
- be16_to_cpu(eqe->event.warming.warning_threshold),
- be16_to_cpu(eqe->event.warming.current_temperature));
+ mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
+ be16_to_cpu(eqe->event.warming.warning_threshold),
+ be16_to_cpu(eqe->event.warming.current_temperature));
} else
- mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
- "subtype %02x on EQ %d at index %u. owner=%x, "
- "nent=0x%x, slave=%x, ownership=%s\n",
+ mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
@@ -743,9 +731,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
- mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
- "index %u. owner=%x, nent=0x%x, slave=%x, "
- "ownership=%s\n",
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
@@ -1088,7 +1074,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
if (!priv->clr_base) {
- mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
+ mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
return -ENOMEM;
}
@@ -1102,6 +1088,57 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base);
}
+static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct mlx4_irq_notify *n = container_of(notify,
+ struct mlx4_irq_notify,
+ notify);
+ struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
+ struct radix_tree_iter iter;
+ void **slot;
+
+ radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
+ struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
+
+ if (cq->irq == notify->irq)
+ cq->irq_affinity_change = true;
+ }
+}
+
+static void mlx4_release_irq_notifier(struct kref *ref)
+{
+ struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
+ notify.kref);
+ kfree(n);
+}
+
+static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
+ struct mlx4_dev *dev, int irq)
+{
+ struct mlx4_irq_notify *irq_notifier = NULL;
+ int err = 0;
+
+ irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
+ if (!irq_notifier) {
+ mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
+ irq);
+ return;
+ }
+
+ irq_notifier->notify.irq = irq;
+ irq_notifier->notify.notify = mlx4_irq_notifier_notify;
+ irq_notifier->notify.release = mlx4_release_irq_notifier;
+ irq_notifier->arg = priv;
+ err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
+ if (err) {
+ kfree(irq_notifier);
+ irq_notifier = NULL;
+ mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
+ }
+}
+
+
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1372,6 +1409,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
continue;
/*we dont want to break here*/
}
+ mlx4_assign_irq_notifier(priv, dev,
+ priv->eq_table.eq[vec].irq);
+
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
}
@@ -1398,6 +1438,9 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
Belonging to a legacy EQ*/
mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
+ irq_set_affinity_notifier(
+ priv->eq_table.eq[vec].irq,
+ NULL);
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d16a4d118903..688e1eabab29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -178,8 +178,8 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u8 field;
- u32 size;
+ u8 field, port;
+ u32 size, proxy_qp, qkey;
int err = 0;
#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
@@ -209,6 +209,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
+#define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
@@ -221,6 +222,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
+#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
@@ -234,28 +236,35 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
return -EINVAL;
vhcr->in_modifier = converted_port;
- /* Set nic_info bit to mark new fields support */
- field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
-
/* phys-port = logical-port */
field = vhcr->in_modifier -
find_first_bit(actv_ports.ports, dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
- field = vhcr->in_modifier;
+ port = vhcr->in_modifier;
+ proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
+
+ /* Set nic_info bit to mark new fields support */
+ field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
+
+ if (mlx4_vf_smi_enabled(dev, slave, port) &&
+ !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
+ field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
+ MLX4_PUT(outbox->buf, qkey,
+ QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
+ }
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
+
/* size is now the QP number */
- size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
+ size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
size += 2;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
- size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1;
- MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
-
- size += 2;
- MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
+ MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
+ proxy_qp += 2;
+ MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
@@ -326,7 +335,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 field, op_modifier;
- u32 size;
+ u32 size, qkey;
int err = 0, quotas = 0;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
@@ -414,7 +423,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
- if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
mlx4_err(dev, "VLAN is enforced on this port\n");
err = -EPROTONOSUPPORT;
goto out;
@@ -428,8 +437,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
- mlx4_err(dev, "phy_wqe_gid is "
- "enforced on this ib port\n");
+ mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
err = -EPROTONOSUPPORT;
goto out;
}
@@ -442,6 +450,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
+ if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
+ MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
+ func_cap->qp0_qkey = qkey;
+ } else {
+ func_cap->qp0_qkey = 0;
+ }
+
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
@@ -1054,10 +1069,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
*/
lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
if (lg < MLX4_ICM_PAGE_SHIFT) {
- mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
- MLX4_ICM_PAGE_SIZE,
- (unsigned long long) mlx4_icm_addr(&iter),
- mlx4_icm_size(&iter));
+ mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
+ MLX4_ICM_PAGE_SIZE,
+ (unsigned long long) mlx4_icm_addr(&iter),
+ mlx4_icm_size(&iter));
err = -EINVAL;
goto out;
}
@@ -1093,14 +1108,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
switch (op) {
case MLX4_CMD_MAP_FA:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM_AUX:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
- tc, ts, (unsigned long long) virt - (ts << 10));
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
+ tc, ts, (unsigned long long) virt - (ts << 10));
break;
}
@@ -1186,14 +1201,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
- mlx4_err(dev, "Installed FW has unsupported "
- "command interface revision %d.\n",
+ mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
cmd_if_rev);
mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
(int) (dev->caps.fw_ver >> 32),
(int) (dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->caps.fw_ver & 0xffff);
- mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
+ mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
err = -ENODEV;
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 6811ee00ba7c..1fce03ebe5c4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -134,6 +134,7 @@ struct mlx4_func_cap {
int max_eq;
int reserved_eq;
int mcg_quota;
+ u32 qp0_qkey;
u32 qp0_tunnel_qpn;
u32 qp0_proxy_qpn;
u32 qp1_tunnel_qpn;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 5fbf4924c272..97c9b1db1d27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -245,7 +245,8 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
+ gfp_t gfp)
{
u32 i = (obj & (table->num_obj - 1)) /
(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
@@ -259,7 +260,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
}
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
- (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
+ (table->lowmem ? gfp : GFP_HIGHUSER) |
__GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
@@ -356,7 +357,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 i;
for (i = start; i <= end; i += inc) {
- err = mlx4_table_get(dev, table, i);
+ err = mlx4_table_get(dev, table, i, GFP_KERNEL);
if (err)
goto fail;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index dee67fa39107..0c7364550150 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -71,7 +71,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t gfp_mask, int coherent);
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
+ gfp_t gfp);
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 start, u32 end);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index c187d748115f..82ab427290c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
-#define HCA_GLOBAL_CAP_MASK 0
-
#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
static char mlx4_version[] =
@@ -134,8 +132,7 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
-MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
- "(0/1, default 0)");
+MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
@@ -163,8 +160,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports - 1; i++) {
if (port_type[i] != port_type[i + 1]) {
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
- mlx4_err(dev, "Only same port types supported "
- "on this HCA, aborting.\n");
+ mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
return -EINVAL;
}
}
@@ -172,8 +168,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports; i++) {
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
- mlx4_err(dev, "Requested port type for port %d is not "
- "supported on this HCA\n", i + 1);
+ mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
+ i + 1);
return -EINVAL;
}
}
@@ -195,26 +191,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
if (dev_cap->min_page_sz > PAGE_SIZE) {
- mlx4_err(dev, "HCA minimum page size of %d bigger than "
- "kernel PAGE_SIZE of %ld, aborting.\n",
+ mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
dev_cap->min_page_sz, PAGE_SIZE);
return -ENODEV;
}
if (dev_cap->num_ports > MLX4_MAX_PORTS) {
- mlx4_err(dev, "HCA has %d ports, but we only support %d, "
- "aborting.\n",
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
dev_cap->num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
- mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
- "PCI resource 2 size of 0x%llx, aborting.\n",
+ mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev_cap->uar_size,
(unsigned long long) pci_resource_len(dev->pdev, 2));
return -ENODEV;
@@ -296,7 +289,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
- dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
@@ -347,14 +339,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
- mlx4_warn(dev, "Requested number of MACs is too much "
- "for port %d, reducing to %d.\n",
+ mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_macs);
}
if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
- mlx4_warn(dev, "Requested number of VLANs is too much "
- "for port %d, reducing to %d.\n",
+ mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_vlans);
}
}
@@ -366,7 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
(1 << dev->caps.log_num_macs) *
(1 << dev->caps.log_num_vlans) *
- (1 << dev->caps.log_num_prios) *
dev->caps.num_ports;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
@@ -584,13 +573,14 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
memset(&hca_param, 0, sizeof(hca_param));
err = mlx4_QUERY_HCA(dev, &hca_param);
if (err) {
- mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
return err;
}
- /*fail if the hca has an unknown capability */
- if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
- HCA_GLOBAL_CAP_MASK) {
+ /* fail if the hca has an unknown global capability
+ * at this time global_caps should be always zeroed
+ */
+ if (hca_param.global_caps) {
mlx4_err(dev, "Unknown hca global capabilities\n");
return -ENOSYS;
}
@@ -603,19 +593,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
err = mlx4_QUERY_FW(dev);
if (err)
- mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+ mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
page_size = ~dev->caps.page_size_cap + 1;
mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
if (page_size > PAGE_SIZE) {
- mlx4_err(dev, "HCA minimum page size of %d bigger than "
- "kernel PAGE_SIZE of %ld, aborting.\n",
+ mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
page_size, PAGE_SIZE);
return -ENODEV;
}
@@ -633,8 +622,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
if (err) {
- mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
- err);
+ mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
+ err);
return err;
}
@@ -661,18 +650,20 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.num_amgms = 0;
if (dev->caps.num_ports > MLX4_MAX_PORTS) {
- mlx4_err(dev, "HCA has %d ports, but we only support %d, "
- "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
+ dev->caps.num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
+ dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
- !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
+ !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
+ !dev->caps.qp0_qkey) {
err = -ENOMEM;
goto err_mem;
}
@@ -680,10 +671,11 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
for (i = 1; i <= dev->caps.num_ports; ++i) {
err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
if (err) {
- mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
- " port %d, aborting (%d).\n", i, err);
+ mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
+ i, err);
goto err_mem;
}
+ dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
@@ -699,8 +691,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
pci_resource_len(dev->pdev, 2)) {
- mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
- "PCI resource 2 size of 0x%llx, aborting.\n",
+ mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev->caps.uar_page_size * dev->caps.num_uars,
(unsigned long long) pci_resource_len(dev->pdev, 2));
goto err_mem;
@@ -722,19 +713,23 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
- mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
+ mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
return 0;
err_mem:
+ kfree(dev->caps.qp0_qkey);
kfree(dev->caps.qp0_tunnel);
kfree(dev->caps.qp0_proxy);
kfree(dev->caps.qp1_tunnel);
kfree(dev->caps.qp1_proxy);
- dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
- dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
+ dev->caps.qp0_qkey = NULL;
+ dev->caps.qp0_tunnel = NULL;
+ dev->caps.qp0_proxy = NULL;
+ dev->caps.qp1_tunnel = NULL;
+ dev->caps.qp1_proxy = NULL;
return err;
}
@@ -784,8 +779,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
dev->caps.port_type[port] = port_types[port - 1];
err = mlx4_SET_PORT(dev, port, -1);
if (err) {
- mlx4_err(dev, "Failed to set port %d, "
- "aborting\n", port);
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
goto out;
}
}
@@ -868,9 +863,7 @@ static ssize_t set_port_type(struct device *dev,
}
}
if (err) {
- mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
- "Set only 'eth' or 'ib' for both ports "
- "(should be the same)\n");
+ mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
goto out;
}
@@ -975,8 +968,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
mlx4_CLOSE_PORT(mdev, port);
err = mlx4_SET_PORT(mdev, port, -1);
if (err) {
- mlx4_err(mdev, "Failed to set port %d, "
- "aborting\n", port);
+ mlx4_err(mdev, "Failed to set port %d, aborting\n",
+ port);
goto err_set_port;
}
}
@@ -995,19 +988,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.fw_icm) {
- mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
if (err) {
- mlx4_err(dev, "MAP_FA command failed, aborting.\n");
+ mlx4_err(dev, "MAP_FA command failed, aborting\n");
goto err_free;
}
err = mlx4_RUN_FW(dev);
if (err) {
- mlx4_err(dev, "RUN_FW command failed, aborting.\n");
+ mlx4_err(dev, "RUN_FW command failed, aborting\n");
goto err_unmap_fa;
}
@@ -1091,30 +1084,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
if (err) {
- mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
+ mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
return err;
}
- mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
+ mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
(unsigned long long) icm_size >> 10,
(unsigned long long) aux_pages << 2);
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.aux_icm) {
- mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
if (err) {
- mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
+ mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
goto err_free_aux;
}
err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
if (err) {
- mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
goto err_unmap_aux;
}
@@ -1125,7 +1118,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
num_eqs, num_eqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
goto err_unmap_cmpt;
}
@@ -1146,7 +1139,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
- mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
goto err_unmap_eq;
}
@@ -1156,7 +1149,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mpts,
dev->caps.reserved_mrws, 1, 1);
if (err) {
- mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
goto err_unmap_mtt;
}
@@ -1167,7 +1160,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map QP context memory, aborting\n");
goto err_unmap_dmpt;
}
@@ -1178,7 +1171,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
goto err_unmap_qp;
}
@@ -1189,7 +1182,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
goto err_unmap_auxc;
}
@@ -1210,7 +1203,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_cqs,
dev->caps.reserved_cqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
goto err_unmap_rdmarc;
}
@@ -1220,7 +1213,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_srqs,
dev->caps.reserved_srqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
goto err_unmap_cq;
}
@@ -1238,7 +1231,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
goto err_unmap_srq;
}
@@ -1315,7 +1308,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
- mlx4_warn(dev, "Failed to close slave function.\n");
+ mlx4_warn(dev, "Failed to close slave function\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
@@ -1413,7 +1406,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
u32 cmd_channel_ver;
if (atomic_read(&pf_loading)) {
- mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+ mlx4_warn(dev, "PF is not ready - Deferring probe\n");
return -EPROBE_DEFER;
}
@@ -1426,8 +1419,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
* NUM_OF_RESET_RETRIES times before leaving.*/
if (ret_from_reset) {
if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
- mlx4_warn(dev, "slave is currently in the "
- "middle of FLR. Deferring probe.\n");
+ mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return -EPROBE_DEFER;
} else
@@ -1441,8 +1433,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
MLX4_COMM_GET_IF_REV(slave_read)) {
- mlx4_err(dev, "slave driver version is not supported"
- " by the master\n");
+ mlx4_err(dev, "slave driver version is not supported by the master\n");
goto err;
}
@@ -1520,8 +1511,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
- "set to use B0 steering. Falling back to A0 steering mode.\n");
+ mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
}
dev->oper_log_mgm_entry_size =
mlx4_log_num_mgm_entry_size > 0 ?
@@ -1529,8 +1519,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
}
- mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
- "modparam log_num_mgm_entry_size = %d\n",
+ mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
mlx4_steering_mode_str(dev->caps.steering_mode),
dev->oper_log_mgm_entry_size,
mlx4_log_num_mgm_entry_size);
@@ -1564,15 +1553,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_QUERY_FW(dev);
if (err) {
if (err == -EACCES)
- mlx4_info(dev, "non-primary physical function, skipping.\n");
+ mlx4_info(dev, "non-primary physical function, skipping\n");
else
- mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_FW command failed, aborting\n");
return err;
}
err = mlx4_load_fw(dev);
if (err) {
- mlx4_err(dev, "Failed to start FW, aborting.\n");
+ mlx4_err(dev, "Failed to start FW, aborting\n");
return err;
}
@@ -1584,7 +1573,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
goto err_stop_fw;
}
@@ -1625,7 +1614,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_INIT_HCA(dev, &init_hca);
if (err) {
- mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
/*
@@ -1636,7 +1625,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
memset(&init_hca, 0, sizeof(init_hca));
err = mlx4_QUERY_HCA(dev, &init_hca);
if (err) {
- mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
+ mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
} else {
dev->caps.hca_core_clock =
@@ -1649,14 +1638,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
if (!dev->caps.hca_core_clock) {
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
mlx4_err(dev,
- "HCA frequency is 0. Timestamping is not supported.");
+ "HCA frequency is 0 - timestamping is not supported\n");
} else if (map_internal_clock(dev)) {
/*
* Map internal clock,
* in case of failure disable timestamping
*/
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
- mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
+ mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
}
}
} else {
@@ -1683,7 +1672,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
- mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
goto unmap_bf;
}
@@ -1696,6 +1685,14 @@ unmap_bf:
unmap_internal_clock(dev);
unmap_bf_area(dev);
+ if (mlx4_is_slave(dev)) {
+ kfree(dev->caps.qp0_qkey);
+ kfree(dev->caps.qp0_tunnel);
+ kfree(dev->caps.qp0_proxy);
+ kfree(dev->caps.qp1_tunnel);
+ kfree(dev->caps.qp1_proxy);
+ }
+
err_close:
if (mlx4_is_slave(dev))
mlx4_slave_exit(dev);
@@ -1793,79 +1790,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_uar_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "user access region table, aborting.\n");
- return err;
+ mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
+ return err;
}
err = mlx4_uar_alloc(dev, &priv->driver_uar);
if (err) {
- mlx4_err(dev, "Failed to allocate driver access region, "
- "aborting.\n");
+ mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
goto err_uar_table_free;
}
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!priv->kar) {
- mlx4_err(dev, "Couldn't map kernel access region, "
- "aborting.\n");
+ mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
err = -ENOMEM;
goto err_uar_free;
}
err = mlx4_init_pd_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "protection domain table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
goto err_kar_unmap;
}
err = mlx4_init_xrcd_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "reliable connection domain table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
goto err_pd_table_free;
}
err = mlx4_init_mr_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "memory region table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
goto err_xrcd_table_free;
}
if (!mlx4_is_slave(dev)) {
err = mlx4_init_mcg_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
goto err_mr_table_free;
}
}
err = mlx4_init_eq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "event queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
goto err_mcg_table_free;
}
err = mlx4_cmd_use_events(dev);
if (err) {
- mlx4_err(dev, "Failed to switch to event-driven "
- "firmware commands, aborting.\n");
+ mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
goto err_eq_table_free;
}
err = mlx4_NOP(dev);
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
- mlx4_warn(dev, "NOP command failed to generate MSI-X "
- "interrupt IRQ %d).\n",
+ mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
- mlx4_warn(dev, "Trying again without MSI-X.\n");
+ mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
- mlx4_err(dev, "NOP command failed to generate interrupt "
- "(IRQ %d), aborting.\n",
+ mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -1877,28 +1864,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_cq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "completion queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
goto err_cmd_poll;
}
err = mlx4_init_srq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "shared receive queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
goto err_cq_table_free;
}
err = mlx4_init_qp_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "queue pair table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
goto err_srq_table_free;
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
- mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize counters table, aborting\n");
goto err_qp_table_free;
}
@@ -1908,9 +1892,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_get_port_ib_caps(dev, port,
&ib_port_default_caps);
if (err)
- mlx4_warn(dev, "failed to get port %d default "
- "ib capabilities (%d). Continuing "
- "with caps = 0\n", port, err);
+ mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
+ port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
/* initialize per-slave default ib port capabilities */
@@ -1920,7 +1903,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (i == mlx4_master_func_num(dev))
continue;
priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
- ib_port_default_caps;
+ ib_port_default_caps;
}
}
@@ -1933,7 +1916,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
dev->caps.pkey_table_len[port] : -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
- port);
+ port);
goto err_counters_table_free;
}
}
@@ -2009,7 +1992,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
kfree(entries);
goto no_msi;
} else if (nreq < MSIX_LEGACY_SZ +
- dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
@@ -2210,8 +2193,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
@@ -2258,14 +2240,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
*/
if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing DCS, aborting."
- "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
+ dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
pci_dev_data, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing UAR, aborting.\n");
+ dev_err(&pdev->dev, "Missing UAR, aborting\n");
err = -ENODEV;
goto err_disable_pdev;
}
@@ -2280,21 +2261,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
goto err_release_regions;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
- "consistent PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
goto err_release_regions;
}
}
@@ -2325,7 +2304,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (total_vfs) {
unsigned vfs_offset = 0;
for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
- vfs_offset + nvfs[i] < extended_func_num(pdev);
+ vfs_offset + nvfs[i] < extended_func_num(pdev);
vfs_offset += nvfs[i], i++)
;
if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2351,8 +2330,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (err < 0)
goto err_free_dev;
else {
- mlx4_warn(dev, "Multiple PFs not yet supported."
- " Skipping PF.\n");
+ mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
err = -EINVAL;
goto err_free_dev;
}
@@ -2362,8 +2340,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
total_vfs);
dev->dev_vfs = kzalloc(
- total_vfs * sizeof(*dev->dev_vfs),
- GFP_KERNEL);
+ total_vfs * sizeof(*dev->dev_vfs),
+ GFP_KERNEL);
if (NULL == dev->dev_vfs) {
mlx4_err(dev, "Failed to allocate memory for VFs\n");
err = 0;
@@ -2371,14 +2349,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, total_vfs);
if (err) {
- mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
err);
atomic_dec(&pf_loading);
err = 0;
} else {
mlx4_warn(dev, "Running in master mode\n");
dev->flags |= MLX4_FLAG_SRIOV |
- MLX4_FLAG_MASTER;
+ MLX4_FLAG_MASTER;
dev->num_vfs = total_vfs;
sriov_initialized = 1;
}
@@ -2395,7 +2373,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
*/
err = mlx4_reset(dev);
if (err) {
- mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ mlx4_err(dev, "Failed to reset HCA, aborting\n");
goto err_rel_own;
}
}
@@ -2403,7 +2381,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
slave_start:
err = mlx4_cmd_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init command interface, aborting.\n");
+ mlx4_err(dev, "Failed to init command interface, aborting\n");
goto err_sriov;
}
@@ -2417,8 +2395,7 @@ slave_start:
dev->num_slaves = 0;
err = mlx4_multi_func_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init slave mfunc"
- " interface, aborting.\n");
+ mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
goto err_cmd;
}
}
@@ -2450,8 +2427,7 @@ slave_start:
unsigned sum = 0;
err = mlx4_multi_func_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init master mfunc"
- "interface, aborting.\n");
+ mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
goto err_close;
}
if (sriov_initialized) {
@@ -2462,11 +2438,9 @@ slave_start:
if (ib_ports &&
(num_vfs_argc > 1 || probe_vfs_argc > 1)) {
mlx4_err(dev,
- "Invalid syntax of num_vfs/probe_vfs "
- "with IB port. Single port VFs syntax"
- " is only supported when all ports "
- "are configured as ethernet\n");
- goto err_close;
+ "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
+ err = -EINVAL;
+ goto err_master_mfunc;
}
for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
unsigned j;
@@ -2491,8 +2465,7 @@ slave_start:
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
err = -ENOSYS;
- mlx4_err(dev, "INTx is not supported in multi-function mode."
- " aborting.\n");
+ mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
goto err_free_eq;
}
@@ -2566,6 +2539,14 @@ err_master_mfunc:
if (mlx4_is_master(dev))
mlx4_multi_func_cleanup(dev);
+ if (mlx4_is_slave(dev)) {
+ kfree(dev->caps.qp0_qkey);
+ kfree(dev->caps.qp0_tunnel);
+ kfree(dev->caps.qp0_proxy);
+ kfree(dev->caps.qp1_tunnel);
+ kfree(dev->caps.qp1_proxy);
+ }
+
err_close:
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
@@ -2637,7 +2618,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev)
/* in SRIOV it is not allowed to unload the pf's
* driver while there are alive vf's */
if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
- printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ pr_warn("Removing PF when there are assigned VF's !!!\n");
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
@@ -2689,6 +2670,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev)
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
+ kfree(dev->caps.qp0_qkey);
kfree(dev->caps.qp0_tunnel);
kfree(dev->caps.qp0_proxy);
kfree(dev->caps.qp1_tunnel);
@@ -2800,7 +2782,7 @@ static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
- .shutdown = mlx4_remove_one,
+ .shutdown = __mlx4_remove_one,
.remove = mlx4_remove_one,
.err_handler = &mlx4_err_handler,
};
@@ -2808,33 +2790,36 @@ static struct pci_driver mlx4_driver = {
static int __init mlx4_verify_params(void)
{
if ((log_num_mac < 0) || (log_num_mac > 7)) {
- pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
+ pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
return -1;
}
if (log_num_vlan != 0)
- pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
- MLX4_LOG_NUM_VLANS);
+ pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
+ MLX4_LOG_NUM_VLANS);
+
+ if (use_prio != 0)
+ pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
- pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
+ pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
+ log_mtts_per_seg);
return -1;
}
/* Check if module param for ports type has legal combination */
if (port_type_array[0] == false && port_type_array[1] == true) {
- printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+ pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
port_type_array[0] = true;
}
if (mlx4_log_num_mgm_entry_size != -1 &&
(mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
- pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
- "in legal range (-1 or %d..%d)\n",
- mlx4_log_num_mgm_entry_size,
- MLX4_MIN_MGM_LOG_ENTRY_SIZE,
- MLX4_MAX_MGM_LOG_ENTRY_SIZE);
+ pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
+ mlx4_log_num_mgm_entry_size,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE);
return -1;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 80ccb4edf825..4c36def8e10f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
if (*index != hash) {
- mlx4_err(dev, "Found zero MGID in AMGM.\n");
+ mlx4_err(dev, "Found zero MGID in AMGM\n");
err = -EINVAL;
}
return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
mlx4_err(dev, "%s", buf);
if (len >= BUF_SIZE)
- mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+ mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
}
int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -897,7 +897,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
if (ret < 0) {
mlx4_free_cmd_mailbox(dev, mailbox);
- return -EINVAL;
+ return ret;
}
size += ret;
}
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
if (ret == -ENOMEM)
mlx4_err_rule(dev,
- "mcg table is full. Fail to register network rule.\n",
+ "mcg table is full. Fail to register network rule\n",
rule);
else if (ret)
- mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+ mlx4_err_rule(dev, "Fail to register network rule\n", rule);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count == dev->caps.num_qp_per_mgm) {
- mlx4_err(dev, "MGM at index %x is full.\n", index);
+ mlx4_err(dev, "MGM at index %x is full\n", index);
err = -ENOMEM;
goto out;
}
@@ -1042,7 +1042,7 @@ out:
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
- mlx4_warn(dev, "Got AMGM index %d < %d",
+ mlx4_warn(dev, "Got AMGM index %d < %d\n",
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (amgm_index) {
if (amgm_index < dev->caps.num_mgms)
- mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
+ mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out;
if (index < dev->caps.num_mgms)
- mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
+ mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 8e9eb02e09cb..1d8af7336807 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -133,6 +133,11 @@ enum {
MLX4_COMM_CMD_FLR = 254
};
+enum {
+ MLX4_VF_SMI_DISABLED,
+ MLX4_VF_SMI_ENABLED
+};
+
/*The flag indicates that the slave should delay the RESET cmd*/
#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
/*indicates how many retries will be done if we are in the middle of FLR*/
@@ -216,18 +221,19 @@ extern int mlx4_debug_level;
#define mlx4_debug_level (0)
#endif /* CONFIG_MLX4_DEBUG */
-#define mlx4_dbg(mdev, format, arg...) \
+#define mlx4_dbg(mdev, format, ...) \
do { \
if (mlx4_debug_level) \
- dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
+ dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
+ ##__VA_ARGS__); \
} while (0)
-#define mlx4_err(mdev, format, arg...) \
- dev_err(&mdev->pdev->dev, format, ##arg)
-#define mlx4_info(mdev, format, arg...) \
- dev_info(&mdev->pdev->dev, format, ##arg)
-#define mlx4_warn(mdev, format, arg...) \
- dev_warn(&mdev->pdev->dev, format, ##arg)
+#define mlx4_err(mdev, format, ...) \
+ dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...) \
+ dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...) \
+ dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg;
@@ -488,6 +494,7 @@ struct mlx4_vport_state {
struct mlx4_vf_admin_state {
struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1];
+ u8 enable_smi[MLX4_MAX_PORTS + 1];
};
struct mlx4_vport_oper_state {
@@ -495,8 +502,10 @@ struct mlx4_vport_oper_state {
int mac_idx;
int vlan_idx;
};
+
struct mlx4_vf_oper_state {
struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1];
+ u8 smi_enabled[MLX4_MAX_PORTS + 1];
};
struct slave_list {
@@ -895,7 +904,7 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp);
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
@@ -903,7 +912,7 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
int __mlx4_mpt_reserve(struct mlx4_dev *dev);
void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp);
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 04d9b6fe3e80..0e15295bedd6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -313,6 +313,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
int hwtstamp_rx_filter;
+ cpumask_var_t affinity_mask;
};
struct mlx4_en_cq {
@@ -830,26 +831,26 @@ __printf(3, 4)
int en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...);
-#define en_dbg(mlevel, priv, format, arg...) \
-do { \
- if (NETIF_MSG_##mlevel & priv->msg_enable) \
- en_print(KERN_DEBUG, priv, format, ##arg); \
+#define en_dbg(mlevel, priv, format, ...) \
+do { \
+ if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
+ en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
} while (0)
-#define en_warn(priv, format, arg...) \
- en_print(KERN_WARNING, priv, format, ##arg)
-#define en_err(priv, format, arg...) \
- en_print(KERN_ERR, priv, format, ##arg)
-#define en_info(priv, format, arg...) \
- en_print(KERN_INFO, priv, format, ## arg)
-
-#define mlx4_err(mdev, format, arg...) \
- pr_err("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_info(mdev, format, arg...) \
- pr_info("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_warn(mdev, format, arg...) \
- pr_warning("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
+#define en_warn(priv, format, ...) \
+ en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
+#define en_err(priv, format, ...) \
+ en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
+#define en_info(priv, format, ...) \
+ en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
+
+#define mlx4_err(mdev, format, ...) \
+ pr_err(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...) \
+ pr_info(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...) \
+ pr_warn(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 24835853b753..2839abb878a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
if (err)
- mlx4_warn(dev, "Failed to free mtt range at:"
- "%d order:%d\n", offset, order);
+ mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
+ offset, order);
return;
}
__mlx4_free_mtt_range(dev, offset, order);
@@ -364,14 +364,14 @@ static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
__mlx4_mpt_release(dev, index);
}
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
- return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
}
-static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
+static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
{
u64 param = 0;
@@ -382,7 +382,7 @@ static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_mpt_alloc_icm(dev, index);
+ return __mlx4_mpt_alloc_icm(dev, index, gfp);
}
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err) {
- mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
- mlx4_warn(dev, "MR has MWs bound to it.\n");
+ mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
+ err);
return err;
}
@@ -469,7 +469,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
if (err)
return err;
@@ -627,13 +627,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- struct mlx4_buf *buf)
+ struct mlx4_buf *buf, gfp_t gfp)
{
u64 *page_list;
int err;
int i;
- page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
+ page_list = kmalloc(buf->npages * sizeof *page_list,
+ gfp);
if (!page_list)
return -ENOMEM;
@@ -680,7 +681,7 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
if (err)
return err;
@@ -773,7 +774,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
mlx4_alloc_mtt_range(dev,
fls(dev->caps.reserved_mtts - 1));
if (priv->reserved_mtts < 0) {
- mlx4_warn(dev, "MTT table of order %u is too small.\n",
+ mlx4_warn(dev, "MTT table of order %u is too small\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
goto err_reserve_mtts;
@@ -954,8 +955,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
- printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
- " failed (%d)\n", err);
+ pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
return;
}
@@ -964,8 +964,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
(dev->caps.num_mpts - 1));
mlx4_free_cmd_mailbox(dev, mailbox);
if (err) {
- printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
- err);
+ pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
return;
}
fmr->mr.enabled = MLX4_MPT_EN_SW;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 5ec6f203c6e6..7ab97174886d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -254,8 +254,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
if (validate_index(dev, table, index))
goto out;
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have more references for index %d,"
- "no need to modify mac table\n", index);
+ mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
+ index);
goto out;
}
@@ -453,9 +453,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
}
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have %d more references for index %d,"
- "no need to modify vlan table\n", table->refs[index],
- index);
+ mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
+ table->refs[index], index);
goto out;
}
table->entries[index] = 0;
@@ -796,8 +795,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
sizeof(gid_entry_tbl->raw))) {
/* found duplicate */
- mlx4_warn(dev, "requested gid entry for slave:%d "
- "is a duplicate of gid at index %d\n",
+ mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
slave, i);
mutex_unlock(&(priv->port[port].gid_table.mutex));
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 8e0c3cc2a1ec..14089d9e1667 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
}
if (total_size > dev_cap->max_icm_sz) {
- mlx4_err(dev, "Profile requires 0x%llx bytes; "
- "won't fit in 0x%llx bytes of context memory.\n",
- (unsigned long long) total_size,
- (unsigned long long) dev_cap->max_icm_sz);
+ mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
+ (unsigned long long) total_size,
+ (unsigned long long) dev_cap->max_icm_sz);
kfree(profile);
return -ENOMEM;
}
if (profile[i].size)
- mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
- "size 0x%10llx\n",
- i, res_name[profile[i].type], profile[i].log_num,
+ mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
+ i, res_name[profile[i].type],
+ profile[i].log_num,
(unsigned long long) profile[i].start,
(unsigned long long) profile[i].size);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index fbd32af89c7c..0dc31d85fc3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -264,37 +264,37 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err) {
- mlx4_warn(dev, "Failed to release qp range"
- " base:%d cnt:%d\n", base_qpn, cnt);
+ mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
+ base_qpn, cnt);
}
} else
__mlx4_qp_release_range(dev, base_qpn, cnt);
}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
+ err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
if (err)
goto err_put_qp;
- err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
+ err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
if (err)
goto err_put_auxc;
- err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
if (err)
goto err_put_altc;
- err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
if (err)
goto err_put_rdmarc;
@@ -316,7 +316,7 @@ err_out:
return err;
}
-static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
{
u64 param = 0;
@@ -326,7 +326,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_qp_alloc_icm(dev, qpn);
+ return __mlx4_qp_alloc_icm(dev, qpn, gfp);
}
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
@@ -355,7 +355,7 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
__mlx4_qp_free_icm(dev, qpn);
}
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
@@ -366,7 +366,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
qp->qpn = qpn;
- err = mlx4_qp_alloc_icm(dev, qpn);
+ err = mlx4_qp_alloc_icm(dev, qpn, gfp);
if (err)
return err;
@@ -612,8 +612,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
- mlx4_err(dev, "Failed to bring QP to state: "
- "%d with error: %d\n",
+ mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
states[i + 1], err);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index dd1b5093d8b1..ea1c6d092145 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) {
err = -ENOMEM;
- mlx4_err(dev, "Couldn't allocate memory to save HCA "
- "PCI header, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
goto out;
}
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
continue;
if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't save HCA "
- "PCI header, aborting.\n");
+ mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
goto out;
}
}
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
MLX4_RESET_SIZE);
if (!reset) {
err = -ENOMEM;
- mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
+ mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
goto out;
}
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (vendor == 0xffff) {
err = -ENODEV;
- mlx4_err(dev, "PCI device did not come back after reset, "
- "aborting.\n");
+ mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
goto out;
}
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
devctl)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA PCI Express "
- "Device Control register, aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
goto out;
}
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
linkctl)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA PCI Express "
- "Link control register, aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
goto out;
}
}
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA reg %x, "
- "aborting.\n", i);
+ mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
+ i);
goto out;
}
}
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
hca_header[PCI_COMMAND / 4])) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA COMMAND, "
- "aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index f16e539749c4..0efc1368e5a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -279,7 +279,7 @@ enum qp_transition {
};
/* For Debug uses */
-static const char *ResourceType(enum mlx4_resource rt)
+static const char *resource_str(enum mlx4_resource rt)
{
switch (rt) {
case RES_QP: return "RES_QP";
@@ -307,6 +307,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
&priv->mfunc.master.res_tracker.res_alloc[res_type];
int err = -EINVAL;
int allocated, free, reserved, guaranteed, from_free;
+ int from_rsvd;
if (slave > dev->num_vfs)
return -EINVAL;
@@ -321,11 +322,16 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
res_alloc->res_reserved;
guaranteed = res_alloc->guaranteed[slave];
- if (allocated + count > res_alloc->quota[slave])
+ if (allocated + count > res_alloc->quota[slave]) {
+ mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
+ slave, port, resource_str(res_type), count,
+ allocated, res_alloc->quota[slave]);
goto out;
+ }
if (allocated + count <= guaranteed) {
err = 0;
+ from_rsvd = count;
} else {
/* portion may need to be obtained from free area */
if (guaranteed - allocated > 0)
@@ -333,8 +339,14 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
else
from_free = count;
- if (free - from_free > reserved)
+ from_rsvd = count - from_free;
+
+ if (free - from_free >= reserved)
err = 0;
+ else
+ mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
+ slave, port, resource_str(res_type), free,
+ from_free, reserved);
}
if (!err) {
@@ -342,9 +354,11 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
res_alloc->res_port_free[port - 1] -= count;
+ res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
} else {
res_alloc->allocated[slave] += count;
res_alloc->res_free -= count;
+ res_alloc->res_reserved -= from_rsvd;
}
}
@@ -360,17 +374,36 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
+ int allocated, guaranteed, from_rsvd;
if (slave > dev->num_vfs)
return;
spin_lock(&res_alloc->alloc_lock);
+
+ allocated = (port > 0) ?
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+ res_alloc->allocated[slave];
+ guaranteed = res_alloc->guaranteed[slave];
+
+ if (allocated - count >= guaranteed) {
+ from_rsvd = 0;
+ } else {
+ /* portion may need to be returned to reserved area */
+ if (allocated - guaranteed > 0)
+ from_rsvd = count - (allocated - guaranteed);
+ else
+ from_rsvd = count;
+ }
+
if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
res_alloc->res_port_free[port - 1] += count;
+ res_alloc->res_port_rsvd[port - 1] += from_rsvd;
} else {
res_alloc->allocated[slave] -= count;
res_alloc->res_free += count;
+ res_alloc->res_reserved += from_rsvd;
}
spin_unlock(&res_alloc->alloc_lock);
@@ -963,7 +996,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
ret = alloc_srq_tr(id);
break;
case RES_MAC:
- printk(KERN_ERR "implementation missing\n");
+ pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
ret = alloc_counter_tr(id);
@@ -1057,10 +1090,10 @@ static int remove_mtt_ok(struct res_mtt *res, int order)
{
if (res->com.state == RES_MTT_BUSY ||
atomic_read(&res->ref_count)) {
- printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
- __func__, __LINE__,
- mtt_states_str(res->com.state),
- atomic_read(&res->ref_count));
+ pr_devel("%s-%d: state %s, ref_count %d\n",
+ __func__, __LINE__,
+ mtt_states_str(res->com.state),
+ atomic_read(&res->ref_count));
return -EBUSY;
} else if (res->com.state != RES_MTT_ALLOCATED)
return -EPERM;
@@ -1533,7 +1566,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
if (!fw_reserved(dev, qpn)) {
- err = __mlx4_qp_alloc_icm(dev, qpn);
+ err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
if (err) {
res_abort_move(dev, slave, RES_QP, qpn);
return err;
@@ -1620,7 +1653,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
return err;
- err = __mlx4_mpt_alloc_icm(dev, mpt->key);
+ err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
if (err) {
res_abort_move(dev, slave, RES_MPT, id);
return err;
@@ -2828,10 +2861,12 @@ static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
}
static int verify_qp_parameters(struct mlx4_dev *dev,
+ struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
enum qp_transition transition, u8 slave)
{
u32 qp_type;
+ u32 qpn;
struct mlx4_qp_context *qp_ctx;
enum mlx4_qp_optpar optpar;
int port;
@@ -2874,8 +2909,22 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
default:
break;
}
+ break;
+ case MLX4_QP_ST_MLX:
+ qpn = vhcr->in_modifier & 0x7fffff;
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (transition == QP_TRANS_INIT2RTR &&
+ slave != mlx4_master_func_num(dev) &&
+ mlx4_is_qp_reserved(dev, qpn) &&
+ !mlx4_vf_smi_enabled(dev, slave, port)) {
+ /* only enabled VFs may create MLX proxy QPs */
+ mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
+ __func__, slave, port);
+ return -EPERM;
+ }
break;
+
default:
break;
}
@@ -3455,7 +3504,7 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
return err;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
if (err)
return err;
@@ -3509,7 +3558,7 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
if (err)
return err;
@@ -3531,7 +3580,7 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
if (err)
return err;
@@ -3568,7 +3617,7 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
if (err)
return err;
@@ -3590,7 +3639,7 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
- err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
+ err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
if (err)
return err;
@@ -3881,7 +3930,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
}
if (!be_mac) {
- pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
+ pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
port);
return -EINVAL;
}
@@ -3978,7 +4027,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
- pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+ pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
return err;
}
rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -3996,7 +4045,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
- pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
+ pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL;
goto err_put;
@@ -4005,7 +4054,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
- pr_err("Corrupted mailbox.\n");
+ pr_err("Corrupted mailbox\n");
err = -EINVAL;
goto err_put;
}
@@ -4019,7 +4068,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) {
- mlx4_err(dev, "Fail to add flow steering resources.\n ");
+ mlx4_err(dev, "Fail to add flow steering resources\n");
/* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -4057,7 +4106,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) {
- mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ mlx4_err(dev, "Fail to remove flow steering resources\n");
goto out;
}
@@ -4135,7 +4184,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
if (print)
mlx4_dbg(dev,
"%s id 0x%llx is busy\n",
- ResourceType(type),
+ resource_str(type),
r->res_id);
++busy;
} else {
@@ -4186,8 +4235,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_QP);
if (err)
- mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
- "for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4225,10 +4274,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_qps: failed"
- " to move slave %d qpn %d to"
- " reset\n", slave,
- qp->local_qpn);
+ mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
+ slave, qp->local_qpn);
atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count);
atomic_dec(&qp->mtt->ref_count);
@@ -4262,8 +4309,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_SRQ);
if (err)
- mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4293,9 +4340,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_srqs: failed"
- " to move slave %d srq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
slave, srqn);
atomic_dec(&srq->mtt->ref_count);
@@ -4330,8 +4375,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_CQ);
if (err)
- mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4361,9 +4406,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_cqs: failed"
- " to move slave %d cq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
slave, cqn);
atomic_dec(&cq->mtt->ref_count);
state = RES_CQ_ALLOCATED;
@@ -4395,8 +4438,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MPT);
if (err)
- mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4431,9 +4474,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_mrs: failed"
- " to move slave %d mpt %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
slave, mptn);
if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count);
@@ -4465,8 +4506,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MTT);
if (err)
- mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4568,8 +4609,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_EQ);
if (err)
- mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4601,9 +4642,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_eqs: failed"
- " to move slave %d eqs %d to"
- " SW ownership\n", slave, eqn);
+ mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
+ slave, eqn);
mlx4_free_cmd_mailbox(dev, mailbox);
atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED;
@@ -4632,8 +4672,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_COUNTER);
if (err)
- mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4663,8 +4703,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_XRCD);
if (err)
- mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4809,10 +4849,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
0, MLX4_CMD_UPDATE_QP,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err) {
- mlx4_info(dev, "UPDATE_QP failed for slave %d, "
- "port %d, qpn %d (%d)\n",
- work->slave, port, qp->local_qpn,
- err);
+ mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
+ work->slave, port, qp->local_qpn, err);
errors++;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 98faf870b0b0..67146624eb58 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -103,11 +103,11 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
if (*srqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &srq_table->table, *srqn);
+ err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL);
if (err)
goto err_put;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 405c4fbcd0ad..87d1b018a9c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
}
- mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
- deliv_status_to_str(ent->status), ent->status);
+ mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ err, deliv_status_to_str(ent->status), ent->status);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 64a61b286b2c..7f39ebcd6ad0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
*/
rmb();
- mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
+ mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
+ eq->eqn, eqe_type_str(eqe->type));
switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
- mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+ func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
}
break;
default:
- mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
+ mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
+ eqe->type, eq->eqn);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c3eee5f70051..ee24f132e319 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
return err;
}
}
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev,
- "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
+ "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
- "Can't set consistent PCI DMA mask, aborting.\n");
+ "Can't set consistent PCI DMA mask, aborting\n");
return err;
}
}
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
int err = 0;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
+ dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
return -ENODEV;
}
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_dbg;
}
err = request_bar(pdev);
if (err) {
- dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
+ dev_err(&pdev->dev, "error requesting BARs, aborting\n");
goto err_disable;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 68b74e1ae1b0..f0c9f9a7a361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,24 +39,26 @@
extern int mlx5_core_debug_mask;
-#define mlx5_core_dbg(dev, format, arg...) \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_dbg(dev, format, ...) \
+ pr_debug("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_dbg_mask(dev, mask, format, arg...) \
-do { \
- if ((mask) & mlx5_core_debug_mask) \
- pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \
- __func__, __LINE__, current->pid, ##arg); \
+#define mlx5_core_dbg_mask(dev, mask, format, ...) \
+do { \
+ if ((mask) & mlx5_core_debug_mask) \
+ mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
-#define mlx5_core_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_err(dev, format, ...) \
+ pr_err("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_warn(dev, format, ...) \
+ pr_warn("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
enum {
MLX5_CMD_DATA, /* print command payload only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 4cc927649404..ba0401d4af50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
}
if (err) {
- mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
+ mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
return err;
}
@@ -82,7 +82,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
return mlx5_cmd_status_to_err(&lout.hdr);
}
+ mr->iova = be64_to_cpu(in->seg.start_addr);
+ mr->size = be64_to_cpu(in->seg.len);
mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
+ mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
+
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
be32_to_cpu(lout.mkey), key, mr->key);
@@ -191,7 +195,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
}
if (out.hdr.status) {
- mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status);
+ mlx5_core_err(dev, "create_psv bad status %d\n",
+ out.hdr.status);
return mlx5_cmd_status_to_err(&out.hdr);
}
@@ -220,7 +225,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
}
if (out.hdr.status) {
- mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status);
+ mlx5_core_err(dev, "destroy_psv bad status %d\n",
+ out.hdr.status);
err = mlx5_cmd_status_to_err(&out.hdr);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d59790a82bc3..c2a953ef0e67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -311,7 +311,8 @@ retry:
in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
+ func_id, npages, err);
goto out_alloc;
}
dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+ func_id, npages, out.hdr.status);
goto out_alloc;
}
}
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed recliaming pages\n");
+ mlx5_core_err(dev, "failed reclaiming pages\n");
goto out_free;
}
dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
err = give_pages(dev, req->func_id, req->npages, 1);
if (err)
- mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
- "reclaim" : "give", err);
+ mlx5_core_warn(dev, "%s fail %d\n",
+ req->npages < 0 ? "reclaim" : "give", err);
kfree(req);
}
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
optimal_reclaimed_pages(),
&nclaimed);
if (err) {
- mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
+ mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
+ err);
return err;
}
if (nclaimed)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 510576213dd0..8145b4668229 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
- mlx5_core_warn(dev, "ret %d", err);
+ mlx5_core_warn(dev, "ret %d\n", err);
return err;
}
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = radix_tree_insert(&table->tree, qp->qpn, qp);
spin_unlock_irq(&table->lock);
if (err) {
- mlx5_core_warn(dev, "err %d", err);
+ mlx5_core_warn(dev, "err %d\n", err);
goto err_cmd;
}
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 16435b3cfa9f..6c7c78baedca 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1504,15 +1504,15 @@ ks8695_probe(struct platform_device *pdev)
if (ksp->phyiface_regs && ksp->link_irq == -1) {
ks8695_init_switch(ksp);
ksp->dtype = KS8695_DTYPE_LAN;
- SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+ ndev->ethtool_ops = &ks8695_ethtool_ops;
} else if (ksp->phyiface_regs && ksp->link_irq != -1) {
ks8695_init_wan_phy(ksp);
ksp->dtype = KS8695_DTYPE_WAN;
- SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
+ ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
} else {
/* No initialisation since HPNA does not have a PHY */
ksp->dtype = KS8695_DTYPE_HPNA;
- SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+ ndev->ethtool_ops = &ks8695_ethtool_ops;
}
/* And bring up the net_device with the net core */
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index e0c92e0e5e1d..66d4ab703f45 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -26,6 +26,8 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include "ks8851.h"
@@ -85,6 +87,8 @@ union ks8851_tx_hdr {
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
* @vdd_reg: Optional regulator supplying the chip
+ * @vdd_io: Optional digital power supply for IO
+ * @gpio: Optional reset_n gpio
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -133,6 +137,8 @@ struct ks8851_net {
struct eeprom_93cx6 eeprom;
struct regulator *vdd_reg;
+ struct regulator *vdd_io;
+ int gpio;
};
static int msg_enable;
@@ -1404,6 +1410,7 @@ static int ks8851_probe(struct spi_device *spi)
struct ks8851_net *ks;
int ret;
unsigned cider;
+ int gpio;
ndev = alloc_etherdev(sizeof(struct ks8851_net));
if (!ndev)
@@ -1417,20 +1424,53 @@ static int ks8851_probe(struct spi_device *spi)
ks->spidev = spi;
ks->tx_space = 6144;
- ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
- if (IS_ERR(ks->vdd_reg)) {
- ret = PTR_ERR(ks->vdd_reg);
- if (ret == -EPROBE_DEFER)
- goto err_reg;
- } else {
- ret = regulator_enable(ks->vdd_reg);
+ gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios",
+ 0, NULL);
+ if (gpio == -EPROBE_DEFER) {
+ ret = gpio;
+ goto err_gpio;
+ }
+
+ ks->gpio = gpio;
+ if (gpio_is_valid(gpio)) {
+ ret = devm_gpio_request_one(&spi->dev, gpio,
+ GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
if (ret) {
- dev_err(&spi->dev, "regulator enable fail: %d\n",
- ret);
- goto err_reg_en;
+ dev_err(&spi->dev, "reset gpio request failed\n");
+ goto err_gpio;
}
}
+ ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io");
+ if (IS_ERR(ks->vdd_io)) {
+ ret = PTR_ERR(ks->vdd_io);
+ goto err_reg_io;
+ }
+
+ ret = regulator_enable(ks->vdd_io);
+ if (ret) {
+ dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n",
+ ret);
+ goto err_reg_io;
+ }
+
+ ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd");
+ if (IS_ERR(ks->vdd_reg)) {
+ ret = PTR_ERR(ks->vdd_reg);
+ goto err_reg;
+ }
+
+ ret = regulator_enable(ks->vdd_reg);
+ if (ret) {
+ dev_err(&spi->dev, "regulator vdd enable fail: %d\n",
+ ret);
+ goto err_reg;
+ }
+
+ if (gpio_is_valid(gpio)) {
+ usleep_range(10000, 11000);
+ gpio_set_value(gpio, 1);
+ }
mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
@@ -1471,7 +1511,7 @@ static int ks8851_probe(struct spi_device *spi)
skb_queue_head_init(&ks->txq);
- SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
+ ndev->ethtool_ops = &ks8851_ethtool_ops;
SET_NETDEV_DEV(ndev, &spi->dev);
spi_set_drvdata(spi, ks);
@@ -1527,13 +1567,14 @@ err_netdev:
free_irq(ndev->irq, ks);
err_irq:
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, 0);
err_id:
- if (!IS_ERR(ks->vdd_reg))
- regulator_disable(ks->vdd_reg);
-err_reg_en:
- if (!IS_ERR(ks->vdd_reg))
- regulator_put(ks->vdd_reg);
+ regulator_disable(ks->vdd_reg);
err_reg:
+ regulator_disable(ks->vdd_io);
+err_reg_io:
+err_gpio:
free_netdev(ndev);
return ret;
}
@@ -1547,18 +1588,24 @@ static int ks8851_remove(struct spi_device *spi)
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
- if (!IS_ERR(priv->vdd_reg)) {
- regulator_disable(priv->vdd_reg);
- regulator_put(priv->vdd_reg);
- }
+ if (gpio_is_valid(priv->gpio))
+ gpio_set_value(priv->gpio, 0);
+ regulator_disable(priv->vdd_reg);
+ regulator_disable(priv->vdd_io);
free_netdev(priv->netdev);
return 0;
}
+static const struct of_device_id ks8851_match_table[] = {
+ { .compatible = "micrel,ks8851" },
+ { }
+};
+
static struct spi_driver ks8851_driver = {
.driver = {
.name = "ks8851",
+ .of_match_table = ks8851_match_table,
.owner = THIS_MODULE,
.pm = &ks8851_pm_ops,
},
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 14ac0e2bc09f..064a48d0c368 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4930,7 +4930,7 @@ static void netdev_tx_timeout(struct net_device *dev)
* Only reset the hardware if time between calls is long
* enough.
*/
- if (jiffies - last_reset <= dev->watchdog_timeo)
+ if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
hw_priv = NULL;
}
@@ -7072,6 +7072,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
dev = alloc_etherdev(sizeof(struct dev_priv));
if (!dev)
goto pcidev_init_reg_err;
+ SET_NETDEV_DEV(dev, &pdev->dev);
info->netdev[i] = dev;
priv = netdev_priv(dev);
@@ -7106,7 +7107,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
if (register_netdev(dev))
goto pcidev_init_reg_err;
port_set_power_saving(port, true);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index c7b40aa21f22..b1b5f66b8b69 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1593,7 +1593,7 @@ static int enc28j60_probe(struct spi_device *spi)
dev->irq = spi->irq;
dev->netdev_ops = &enc28j60_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops);
+ dev->ethtool_ops = &enc28j60_ethtool_ops;
enc28j60_lowpower(priv, true);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 130f6b204efa..f3d5d79f1cd1 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4112,7 +4112,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
(unsigned long)mgp);
- SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
+ netdev->ethtool_ops = &myri10ge_ethtool_ops;
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
if (status != 0) {
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 64ec2a437f46..291fba8b9f07 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -927,7 +927,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &natsemi_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
if (mtu)
dev->mtu = mtu;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index dbccf1de49ec..19bb8244b9e3 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -2030,7 +2030,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
pci_dev->subsystem_vendor, pci_dev->subsystem_device);
ndev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ops);
+ ndev->ethtool_ops = &ops;
ndev->watchdog_timeo = 5 * HZ;
pci_set_drvdata(pci_dev, ndev);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index a2844ff322c4..be587647c706 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -534,15 +534,6 @@ static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
netif_tx_start_all_queues(sp->dev);
}
-static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
-{
- if (!sp->config.multiq)
- sp->mac_control.fifos[fifo_no].queue_state =
- FIFO_QUEUE_START;
-
- netif_tx_start_all_queues(sp->dev);
-}
-
static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
{
if (!sp->config.multiq) {
@@ -5369,8 +5360,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
ethtool_cmd_speed_set(info, SPEED_10000);
info->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(info, -1);
- info->duplex = -1;
+ ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
+ info->duplex = DUPLEX_UNKNOWN;
}
info->autoneg = AUTONEG_DISABLE;
@@ -7919,7 +7910,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Driver entry points */
dev->netdev_ops = &s2io_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 089b713b9f7b..2bbd01fcb9b0 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -120,7 +120,6 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
u64 val64;
u32 i = 0;
- enum vxge_hw_status ret = VXGE_HW_FAIL;
udelay(10);
@@ -139,7 +138,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
mdelay(1);
} while (++i <= max_millis);
- return ret;
+ return VXGE_HW_FAIL;
}
static inline enum vxge_hw_status
@@ -1682,12 +1681,10 @@ enum vxge_hw_status vxge_hw_driver_stats_get(
struct __vxge_hw_device *hldev,
struct vxge_hw_device_stats_sw_info *sw_stats)
{
- enum vxge_hw_status status = VXGE_HW_OK;
-
memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
sizeof(struct vxge_hw_device_stats_sw_info));
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -3228,7 +3225,6 @@ enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
{
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- enum vxge_hw_status status = VXGE_HW_OK;
int i = 0, j = 0;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -3241,7 +3237,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
return VXGE_HW_FAIL;
}
}
- return status;
+ return VXGE_HW_OK;
}
/*
* vxge_hw_mgmt_reg_Write - Write Titan register.
@@ -3979,7 +3975,6 @@ __vxge_hw_vpath_mgmt_read(
{
u32 i, mtu = 0, max_pyld = 0;
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
@@ -4009,7 +4004,7 @@ __vxge_hw_vpath_mgmt_read(
else
VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -4039,14 +4034,13 @@ static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->cmn_rsthdlr_cfg0);
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -4227,7 +4221,6 @@ static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vp_config *vp_config;
struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4283,7 +4276,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
writeq(val64, &vp_reg->rxmac_vcfg1);
}
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -4295,7 +4288,6 @@ static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
struct vxge_hw_vp_config *config;
@@ -4545,7 +4537,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
writeq(val64, &vp_reg->tim_wrkld_clc);
- return status;
+ return VXGE_HW_OK;
}
/*
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index f8f073880f84..b07d552a27d4 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -62,8 +62,8 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
ethtool_cmd_speed_set(info, SPEED_10000);
info->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(info, -1);
- info->duplex = -1;
+ ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
+ info->duplex = DUPLEX_UNKNOWN;
}
info->autoneg = AUTONEG_DISABLE;
@@ -1128,5 +1128,5 @@ static const struct ethtool_ops vxge_ethtool_ops = {
void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
- SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
+ ndev->ethtool_ops = &vxge_ethtool_ops;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index d107bcbb8543..7a0deadd53bf 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2122,7 +2122,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
{
fifo->interrupt_count++;
- if (jiffies > fifo->jiffies + HZ / 100) {
+ if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
struct __vxge_hw_fifo *hw_fifo = fifo->handle;
fifo->jiffies = jiffies;
@@ -2150,7 +2150,7 @@ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
{
ring->interrupt_count++;
- if (jiffies > ring->jiffies + HZ / 100) {
+ if (time_before(ring->jiffies + HZ / 100, jiffies)) {
struct __vxge_hw_ring *hw_ring = ring->handle;
ring->jiffies = jiffies;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index fddb464aeab3..9afc536c5734 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -406,7 +406,7 @@ union ring_type {
#define NV_RX_DESCRIPTORVALID (1<<16)
#define NV_RX_MISSEDFRAME (1<<17)
-#define NV_RX_SUBSTRACT1 (1<<18)
+#define NV_RX_SUBTRACT1 (1<<18)
#define NV_RX_ERROR1 (1<<23)
#define NV_RX_ERROR2 (1<<24)
#define NV_RX_ERROR3 (1<<25)
@@ -423,7 +423,7 @@ union ring_type {
#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
#define NV_RX2_DESCRIPTORVALID (1<<29)
-#define NV_RX2_SUBSTRACT1 (1<<25)
+#define NV_RX2_SUBTRACT1 (1<<25)
#define NV_RX2_ERROR1 (1<<18)
#define NV_RX2_ERROR2 (1<<19)
#define NV_RX2_ERROR3 (1<<20)
@@ -2832,7 +2832,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
- if (flags & NV_RX_SUBSTRACT1)
+ if (flags & NV_RX_SUBTRACT1)
len--;
}
/* the rest are hard errors */
@@ -2863,7 +2863,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1)
+ if (flags & NV_RX2_SUBTRACT1)
len--;
}
/* the rest are hard errors */
@@ -2937,7 +2937,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1)
+ if (flags & NV_RX2_SUBTRACT1)
len--;
}
/* the rest are hard errors */
@@ -4285,8 +4285,8 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (np->duplex)
ecmd->duplex = DUPLEX_FULL;
} else {
- speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ethtool_cmd_speed_set(ecmd, speed);
ecmd->autoneg = np->autoneg;
@@ -5766,7 +5766,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
dev->netdev_ops = &nv_netdev_ops_optimized;
netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
pci_set_drvdata(pci_dev, dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 422d9b51ac24..8706c0dbd0c3 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1361,7 +1361,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
__lpc_eth_clock_enable(pldat, true);
/* Map IO space */
- pldat->net_base = ioremap(res->start, res->end - res->start + 1);
+ pldat->net_base = ioremap(res->start, resource_size(res));
if (!pldat->net_base) {
dev_err(&pdev->dev, "failed to map registers\n");
ret = -ENOMEM;
@@ -1417,10 +1417,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
pldat->dma_buff_base_p = dma_handle;
- netdev_dbg(ndev, "IO address start :0x%08x\n",
- res->start);
- netdev_dbg(ndev, "IO address size :%d\n",
- res->end - res->start + 1);
+ netdev_dbg(ndev, "IO address space :%pR\n", res);
+ netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
pldat->net_base);
netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index a588ffde9700..44c8be1c6805 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
config PCH_GBE
tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
- depends on PCI && (X86 || COMPILE_TEST)
+ depends on PCI && (X86_32 || COMPILE_TEST)
select MII
select PTP_1588_CLOCK_PCH
---help---
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 826f0ccdc23c..4fe8ea96bd25 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -91,7 +91,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
if (!netif_carrier_ok(adapter->netdev))
- ethtool_cmd_speed_set(ecmd, -1);
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
return ret;
}
@@ -508,5 +508,5 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
void pch_gbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops);
+ netdev->ethtool_ops = &pch_gbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index b6bdeb3c1971..9a997e4c3e08 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -724,10 +724,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
/* The Hamachi-specific entries in the device structure. */
dev->netdev_ops = &hamachi_netdev_ops;
- if (chip_tbl[hmp->chip_id].flags & CanHaveMII)
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
- else
- SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
+ dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
+ &ethtool_ops : &ethtool_ops_no_mii;
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
dev->mtu = mtu;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 9a6cb482dcd0..69a8dc095072 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -472,7 +472,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
/* The Yellowfin-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index c14bd3116e45..d49cba129081 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,6 +66,17 @@ config QLCNIC_VXLAN
Say Y here if you want to enable hardware offload support for
Virtual eXtensible Local Area Network (VXLAN) in the driver.
+config QLCNIC_HWMON
+ bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+ depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+ default y
+ ---help---
+ This configuration parameter can be used to read the
+ board temperature in Converged Ethernet devices
+ supported by qlcnic.
+
+ This data is available via the hwmon sysfs interface.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f09c35d669b3..5bf05818a12c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1373,7 +1373,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
netxen_nic_change_mtu(netdev, netdev->mtu);
- SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
+ netdev->ethtool_ops = &netxen_nic_ethtool_ops;
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2eabd44f8914..b5d6bc1a8b00 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3838,7 +3838,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
/* Set driver entry points */
ndev->netdev_ops = &ql3xxx_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
+ ndev->ethtool_ops = &ql3xxx_ethtool_ops;
ndev->watchdog_timeo = 5 * HZ;
netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f785d01c7d12..be618b9e874f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -39,8 +39,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 57
-#define QLCNIC_LINUX_VERSIONID "5.3.57"
+#define _QLCNIC_LINUX_SUBVERSION 60
+#define QLCNIC_LINUX_VERSIONID "5.3.60"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -441,6 +441,8 @@ struct qlcnic_82xx_dump_template_hdr {
u32 rsvd1[0];
};
+#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
+
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
bool enable; /* enable/disable dump */
@@ -537,6 +539,7 @@ struct qlcnic_hardware_context {
u8 phys_port_id[ETH_ALEN];
u8 lb_mode;
u16 vxlan_port;
+ struct device *hwmon_dev;
};
struct qlcnic_adapter_stats {
@@ -1018,6 +1021,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_DEL_VXLAN_PORT 0x200000
#endif
+#define QLCNIC_VLAN_FILTERING 0x800000
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1316,6 +1321,7 @@ struct qlcnic_eswitch {
#define QL_STATUS_INVALID_PARAM -1
#define MAX_BW 100 /* % of link speed */
+#define MIN_BW 1 /* % of link speed */
#define MAX_VLAN_ID 4095
#define MIN_VLAN_ID 2
#define DEFAULT_MAC_LEARN 1
@@ -1692,7 +1698,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *);
int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-void qlcnic_sriov_vf_schedule_multi(struct net_device *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
u16 *);
@@ -2338,6 +2344,16 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
}
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+ bool status;
+
+ status = (qlcnic_sriov_pf_check(adapter) ||
+ qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+ return status;
+}
+
static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
{
if (qlcnic_84xx_check(adapter))
@@ -2345,4 +2361,18 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
else
return QLC_DEFAULT_VNIC_COUNT;
}
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+#endif
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b7cffb46a75d..a4a4ec0b68f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -33,6 +33,7 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
#define QLC_SKIP_INACTIVE_PCI_REGS 7
+#define QLC_MAX_LEGACY_FUNC_SUPP 8
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -357,8 +358,15 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
if (!ahw->intr_tbl)
return -ENOMEM;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+ dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+ ahw->pci_func);
+ return -EOPNOTSUPP;
+ }
+
qlcnic_83xx_enable_legacy(adapter);
+ }
for (i = 0; i < num_msix; i++) {
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -879,6 +887,9 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
return 0;
}
}
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+ __func__, type);
return -EINVAL;
}
@@ -3026,19 +3037,18 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
}
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
u32 *data, u32 count)
{
int i, j, ret = 0;
u32 temp;
- int err = 0;
/* Check alignment */
if (addr & 0xF)
return -EIO;
mutex_lock(&adapter->ahw->mem_lock);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
for (i = 0; i < count; i++, addr += 16) {
if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
@@ -3049,26 +3059,16 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
return -EIO;
}
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
- QLCNIC_TA_WRITE_ENABLE);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
- QLCNIC_TA_WRITE_START);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
- if (err == -EIO) {
- mutex_unlock(&adapter->ahw->mem_lock);
- return err;
- }
+ temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
if ((temp & TA_CTL_BUSY) == 0)
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 88d809c35633..2bf101a47d02 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -418,7 +418,6 @@ enum qlcnic_83xx_states {
#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
-#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
@@ -560,7 +559,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
-void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
@@ -617,7 +616,6 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
@@ -659,4 +657,5 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
u32 qlcnic_83xx_get_cap_size(void *, int);
void qlcnic_83xx_set_sys_info(void *, int, u32);
void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ba20c721ee97..f33559b72528 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1363,8 +1363,8 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
return ret;
}
/* 16 byte write to MS memory */
- ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
- size / 16);
+ ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
if (ret) {
vfree(p_cache);
return ret;
@@ -1389,8 +1389,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
p_cache = (u32 *)fw->data;
addr = (u64)dest;
- ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- p_cache, size / 16);
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
release_firmware(fw);
@@ -1405,8 +1405,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
data[i] = fw->data[size + i];
for (; i < 16; i++)
data[i] = 0;
- ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- (u32 *)data, 1);
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
if (ret) {
dev_err(&adapter->pdev->dev,
"MS memory write failed\n");
@@ -2181,6 +2181,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
max_sds_rings = QLCNIC_MAX_SDS_RINGS;
max_tx_rings = QLCNIC_MAX_TX_RINGS;
} else {
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+ __func__, ret);
return -EIO;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index c1e11f5715b0..304e247bdf33 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1027,8 +1027,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
u32 arg1;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
- !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
arg1 = id | (enable_mirroring ? BIT_4 : 0);
arg1 |= pci_func << 8;
@@ -1318,8 +1321,12 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
u32 arg1, arg2 = 0;
u8 pci_func;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
+
pci_func = esw_cfg->pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
@@ -1363,6 +1370,8 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
arg1 &= ~(0x0ffff << 16);
break;
default:
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+ __func__, esw_cfg->op_mode);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5bacf5210aed..1b7f3dbae289 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -726,6 +726,11 @@ static int qlcnic_set_channels(struct net_device *dev,
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
+ return -EINVAL;
+ }
+
if (channel->other_count || channel->combined_count)
return -EINVAL;
@@ -734,7 +739,7 @@ static int qlcnic_set_channels(struct net_device *dev,
if (err)
return err;
- if (channel->rx_count) {
+ if (adapter->drv_sds_rings != channel->rx_count) {
err = qlcnic_validate_rings(adapter, channel->rx_count,
QLCNIC_RX_QUEUE);
if (err) {
@@ -745,7 +750,7 @@ static int qlcnic_set_channels(struct net_device *dev,
adapter->drv_rss_rings = channel->rx_count;
}
- if (channel->tx_count) {
+ if (adapter->drv_tx_rings != channel->tx_count) {
err = qlcnic_validate_rings(adapter, channel->tx_count,
QLCNIC_TX_QUEUE);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 9f3adf4e70b5..851cb4a80d50 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -373,12 +373,16 @@ int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
return data;
}
-void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
{
+ int ret = 0;
+
if (qlcnic_82xx_check(adapter))
qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
else
- qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+ ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+ return ret;
}
static int
@@ -567,28 +571,14 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_mac_vlan_list *cur;
- struct netdev_hw_addr *ha;
- size_t temp;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- if (qlcnic_sriov_vf_check(adapter)) {
- if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(ha, netdev) {
- temp = sizeof(struct qlcnic_mac_vlan_list);
- cur = kzalloc(temp, GFP_ATOMIC);
- if (cur == NULL)
- break;
- memcpy(cur->mac_addr,
- ha->addr, ETH_ALEN);
- list_add_tail(&cur->list, &adapter->vf_mc_list);
- }
- }
- qlcnic_sriov_vf_schedule_multi(adapter->netdev);
- return;
- }
- __qlcnic_set_multi(netdev, 0);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_set_multi(netdev);
+ else
+ __qlcnic_set_multi(netdev, 0);
}
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
@@ -630,7 +620,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
struct hlist_node *n;
struct hlist_head *head;
int i;
- unsigned long time;
+ unsigned long expires;
u8 cmd;
for (i = 0; i < adapter->fhash.fbucket_size; i++) {
@@ -638,8 +628,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
QLCNIC_MAC_DEL;
- time = tmp_fil->ftime;
- if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
qlcnic_sre_macaddr_change(adapter,
tmp_fil->faddr,
tmp_fil->vlan_id,
@@ -657,8 +647,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
{
- time = tmp_fil->ftime;
- if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
spin_lock_bh(&adapter->rx_mac_learn_lock);
adapter->rx_fhash.fnum--;
hlist_del(&tmp_fil->fnode);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 173b3d12991f..e45bf09af0c9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -305,7 +305,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
- struct net_device *netdev = adapter->netdev;
u16 protocol = ntohs(skb->protocol);
struct qlcnic_filter *fil, *tmp_fil;
struct hlist_head *head;
@@ -314,27 +313,16 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
u16 vlan_id = 0;
u8 hindex, hval;
- if (!qlcnic_sriov_pf_check(adapter)) {
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
- return;
- } else {
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->flags & QLCNIC_VLAN_FILTERING) {
if (protocol == ETH_P_8021Q) {
vh = (struct vlan_ethhdr *)skb->data;
vlan_id = ntohs(vh->h_vlan_TCI);
} else if (vlan_tx_tag_present(skb)) {
vlan_id = vlan_tx_tag_get(skb);
}
-
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
- !vlan_id)
- return;
- }
-
- if (adapter->fhash.fnum >= adapter->fhash.fmax) {
- adapter->stats.mac_filter_limit_overrun++;
- netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
- adapter->fhash.fmax, adapter->fhash.fnum);
- return;
}
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
@@ -353,6 +341,11 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
}
}
+ if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+ adapter->stats.mac_filter_limit_overrun++;
+ return;
+ }
+
fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
if (!fil)
return;
@@ -1216,8 +1209,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->rx_mac_learn) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
@@ -1293,8 +1285,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->rx_mac_learn) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 7e55e88a81bf..4fc186713b66 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -378,7 +378,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter)) {
if (is_unicast_ether_addr(addr)) {
err = dev_uc_del(netdev, addr);
if (!err)
@@ -402,7 +403,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ !qlcnic_sriov_check(adapter)) {
pr_info("%s: FDB e-switch is not enabled\n", __func__);
return -EOPNOTSUPP;
}
@@ -432,7 +434,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter))
idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
return idx;
@@ -522,7 +525,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#endif
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
- .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate,
+ .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
.ndo_get_vf_config = qlcnic_sriov_get_vf_config,
.ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
.ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
@@ -690,10 +693,10 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
adapter->msix_entries[vector].entry = vector;
restore:
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err > 0) {
+ err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+ if (err == -ENOSPC) {
if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
- return -ENOSPC;
+ return err;
netdev_info(adapter->netdev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
@@ -1014,6 +1017,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
if (pfn >= ahw->max_vnic_func) {
ret = QL_STATUS_INVALID_PARAM;
+ dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+ __func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
}
@@ -1915,8 +1920,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
@@ -1928,6 +1931,8 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_delete_lb_filters(adapter);
qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
qlcnic_napi_disable(adapter);
@@ -2052,6 +2057,7 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -2061,6 +2067,18 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
goto err_out;
}
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ }
+
/* clear stats */
memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
@@ -2069,12 +2087,20 @@ err_out:
static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
kfree(adapter->recv_ctx);
adapter->recv_ctx = NULL;
- if (adapter->ahw->fw_dump.tmpl_hdr) {
- vfree(adapter->ahw->fw_dump.tmpl_hdr);
- adapter->ahw->fw_dump.tmpl_hdr = NULL;
+ if (fw_dump->tmpl_hdr) {
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ }
+
+ if (fw_dump->dma_buffer) {
+ dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
+ fw_dump->dma_buffer, fw_dump->phys_addr);
+ fw_dump->dma_buffer = NULL;
}
kfree(adapter->ahw->reset.buff);
@@ -2247,10 +2273,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
qlcnic_change_mtu(netdev, netdev->mtu);
- if (qlcnic_sriov_vf_check(adapter))
- SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops);
- else
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+ netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
+ &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO |
@@ -2417,9 +2441,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
- if (pdev->is_virtfn)
- return -ENODEV;
-
err = pci_enable_device(pdev);
if (err)
return err;
@@ -2552,9 +2573,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case -ENOMEM:
dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
goto err_out_free_hw;
+ case -EOPNOTSUPP:
+ dev_err(&pdev->dev, "Adapter initialization failed\n");
+ goto err_out_free_hw;
default:
- dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
- dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+ dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
goto err_out_maintenance_mode;
}
}
@@ -2628,7 +2651,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_alloc_lb_filters_mem(adapter);
qlcnic_add_sysfs(adapter);
-
+ qlcnic_register_hwmon_dev(adapter);
return 0;
err_out_disable_mbx_intr:
@@ -2665,7 +2688,7 @@ err_out_disable_pdev:
err_out_maintenance_mode:
set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
netdev->netdev_ops = &qlcnic_netdev_failed_ops;
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+ netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
ahw->port_type = QLCNIC_XGBE;
if (qlcnic_83xx_check(adapter))
@@ -2698,9 +2721,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
- qlcnic_sriov_pf_disable(adapter);
qlcnic_cancel_idc_work(adapter);
+ qlcnic_sriov_pf_disable(adapter);
ahw = adapter->ahw;
unregister_netdev(netdev);
@@ -2735,6 +2758,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_remove_sysfs(adapter);
+ qlcnic_unregister_hwmon_dev(adapter);
+
qlcnic_cleanup_pci_map(adapter->ahw);
qlcnic_release_firmware(adapter);
@@ -2828,6 +2853,8 @@ static int qlcnic_close(struct net_device *netdev)
return 0;
}
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
{
void *head;
@@ -2843,7 +2870,10 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
spin_lock_init(&adapter->mac_learn_lock);
spin_lock_init(&adapter->rx_mac_learn_lock);
- if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_sriov_vf_check(adapter)) {
+ filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+ adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+ } else if (qlcnic_82xx_check(adapter)) {
filter_size = QLCNIC_LB_MAX_FILTERS;
adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
} else {
@@ -3973,16 +4003,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
strcpy(buf, "Tx");
}
- if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
- netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
- return -EINVAL;
- }
-
- if (adapter->flags & QLCNIC_MSI_ENABLED) {
- netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
- return -EINVAL;
- }
-
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
buf);
@@ -4122,7 +4142,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
rcu_read_lock();
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
- dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid);
+ dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
if (!dev)
continue;
qlcnic_config_indev_addr(adapter, dev, event);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 37b979b1266b..e46fc39d425d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -238,6 +238,8 @@ void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
hdr->drv_cap_mask = hdr->cap_mask;
fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
}
inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
@@ -276,6 +278,8 @@ inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
hdr->saved_state[index] = value;
}
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
{
struct qlcnic_83xx_dump_template_hdr *hdr;
@@ -288,6 +292,9 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
hdr->drv_cap_mask = hdr->cap_mask;
fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+ QLCNIC_TEMPLATE_VERSION;
}
inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
@@ -653,34 +660,31 @@ out:
#define QLC_DMA_CMD_BUFF_ADDR_HI 4
#define QLC_DMA_CMD_STATUS_CTRL 8
-#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
-
static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
struct __mem *mem)
{
- struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
u32 dma_no, dma_base_addr, temp_addr;
int i, ret, dma_sts;
+ void *tmpl_hdr;
tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
- dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+ dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
- mem->desc_card_addr);
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
if (ret)
return ret;
temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
+ ret = qlcnic_ind_wr(adapter, temp_addr, 0);
if (ret)
return ret;
temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
- mem->start_dma_cmd);
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
if (ret)
return ret;
@@ -710,15 +714,16 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
u32 temp, dma_base_addr, size = 0, read_size = 0;
struct qlcnic_pex_dma_descriptor *dma_descr;
- struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
dma_addr_t dma_phys_addr;
void *dma_buffer;
+ void *tmpl_hdr;
tmpl_hdr = fw_dump->tmpl_hdr;
/* Check if DMA engine is available */
- temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+ temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
temp = qlcnic_ind_rd(adapter,
dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
@@ -764,8 +769,8 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
/* Write DMA descriptor to MS memory*/
temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
- *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
- (u32 *)dma_descr, temp);
+ *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+ (u32 *)dma_descr, temp);
if (*ret) {
dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
mem->desc_card_addr);
@@ -1141,8 +1146,6 @@ free_mem:
return err;
}
-#define QLCNIC_TEMPLATE_VERSION (0x20001)
-
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw;
@@ -1150,6 +1153,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
u32 version, csum, *tmp_buf;
u8 use_flash_temp = 0;
u32 temp_size = 0;
+ void *temp_buffer;
int err;
ahw = adapter->ahw;
@@ -1199,16 +1203,23 @@ flash_temp:
qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+ if (fw_dump->use_pex_dma) {
+ fw_dump->dma_buffer = NULL;
+ temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ QLC_PEX_DMA_READ_SIZE,
+ &fw_dump->phys_addr,
+ GFP_KERNEL);
+ if (!temp_buffer)
+ fw_dump->use_pex_dma = false;
+ else
+ fw_dump->dma_buffer = temp_buffer;
+ }
+
+
dev_info(&adapter->pdev->dev,
"Default minidump capture mask 0x%x\n",
fw_dump->cap_mask);
- if (qlcnic_83xx_check(adapter) &&
- (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
- fw_dump->use_pex_dma = true;
- else
- fw_dump->use_pex_dma = false;
-
qlcnic_enable_fw_dump_state(adapter);
return 0;
@@ -1224,7 +1235,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
struct device *dev = &adapter->pdev->dev;
struct qlcnic_hardware_context *ahw;
struct qlcnic_dump_entry *entry;
- void *temp_buffer, *tmpl_hdr;
+ void *tmpl_hdr;
u32 ocm_window;
__le32 *buffer;
char mesg[64];
@@ -1268,16 +1279,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
- if (fw_dump->use_pex_dma) {
- temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
- &fw_dump->phys_addr,
- GFP_KERNEL);
- if (!temp_buffer)
- fw_dump->use_pex_dma = false;
- else
- fw_dump->dma_buffer = temp_buffer;
- }
-
if (qlcnic_82xx_check(adapter)) {
ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
fw_dump_ops = qlcnic_fw_dump_ops;
@@ -1335,10 +1336,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
- if (fw_dump->use_pex_dma)
- dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
- fw_dump->dma_buffer, fw_dump->phys_addr);
-
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 396bd1fd1d27..4677b2edccca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -52,6 +52,7 @@ enum qlcnic_bc_commands {
QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
};
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
#define QLC_BC_CMD 1
struct qlcnic_trans_list {
@@ -151,13 +152,14 @@ struct qlcnic_vf_info {
struct qlcnic_trans_list rcv_pend;
struct qlcnic_adapter *adapter;
struct qlcnic_vport *vp;
- struct mutex vlan_list_lock; /* Lock for VLAN list */
+ spinlock_t vlan_list_lock; /* Lock for VLAN list */
};
struct qlcnic_async_work_list {
struct list_head list;
struct work_struct work;
void *ptr;
+ struct qlcnic_cmd_args *cmd;
};
struct qlcnic_back_channel {
@@ -231,7 +233,7 @@ bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
-int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int);
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
int qlcnic_sriov_get_vf_config(struct net_device *, int ,
struct ifla_vf_info *);
int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 6afe9c1f5ab9..1659c804f1d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -39,6 +39,8 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.read_crb = qlcnic_83xx_read_crb,
@@ -181,7 +183,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vf->adapter = adapter;
vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
mutex_init(&vf->send_cmd_lock);
- mutex_init(&vf->vlan_list_lock);
+ spin_lock_init(&vf->vlan_list_lock);
INIT_LIST_HEAD(&vf->rcv_act.wait_list);
INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
spin_lock_init(&vf->rcv_act.lock);
@@ -197,8 +199,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
goto qlcnic_destroy_async_wq;
}
sriov->vf_info[i].vp = vp;
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
vp->max_tx_bw = MAX_BW;
- vp->spoofchk = true;
+ vp->min_tx_bw = MIN_BW;
+ vp->spoofchk = false;
random_ether_addr(vp->mac);
dev_info(&adapter->pdev->dev,
"MAC Address %pM is configured for VF %d\n",
@@ -454,6 +458,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
struct qlcnic_cmd_args cmd;
int ret = 0;
+ memset(&cmd, 0, sizeof(cmd));
ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
if (ret)
return ret;
@@ -515,6 +520,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
{
int err;
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ adapter->ahw->total_nic_func = 1;
INIT_LIST_HEAD(&adapter->vf_mc_list);
if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
dev_warn(&adapter->pdev->dev,
@@ -770,6 +777,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
cmd->req.arg = (u32 *)trans->req_pay;
cmd->rsp.arg = (u32 *)trans->rsp_pay;
cmd_op = cmd->req.arg[0] & 0xff;
+ cmd->cmd_op = cmd_op;
remainder = (trans->rsp_pay_size) % (bc_pay_sz);
num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
if (remainder)
@@ -1356,7 +1364,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
return -EIO;
}
-static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1408,12 +1416,17 @@ retry:
(mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
rsp = QLCNIC_RCODE_SUCCESS;
} else {
- rsp = mbx_err_code;
- if (!rsp)
- rsp = 1;
- dev_err(dev,
- "MBX command 0x%x failed with err:0x%x for VF %d\n",
- opcode, mbx_err_code, func);
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ rsp = mbx_err_code;
+ if (!rsp)
+ rsp = 1;
+
+ dev_err(dev,
+ "MBX command 0x%x failed with err:0x%x for VF %d\n",
+ opcode, mbx_err_code, func);
+ }
}
err_out:
@@ -1435,12 +1448,23 @@ free_cmd:
return rsp;
}
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+ return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+ else
+ return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
int ret;
+ memset(&cmd, 0, sizeof(cmd));
if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
return -ENOMEM;
@@ -1465,58 +1489,28 @@ out:
return ret;
}
-static void qlcnic_vf_add_mc_list(struct net_device *netdev)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- struct qlcnic_mac_vlan_list *cur;
- struct list_head *head, tmp_list;
struct qlcnic_vf_info *vf;
u16 vlan_id;
int i;
- static const u8 bcast_addr[ETH_ALEN] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
-
vf = &adapter->ahw->sriov->vf_info[0];
- INIT_LIST_HEAD(&tmp_list);
- head = &adapter->vf_mc_list;
- netif_addr_lock_bh(netdev);
- while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
- list_move(&cur->list, &tmp_list);
- }
-
- netif_addr_unlock_bh(netdev);
-
- while (!list_empty(&tmp_list)) {
- cur = list_entry((&tmp_list)->next,
- struct qlcnic_mac_vlan_list, list);
- if (!qlcnic_sriov_check_any_vlan(vf)) {
- qlcnic_nic_add_mac(adapter, bcast_addr, 0);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
- } else {
- mutex_lock(&vf->vlan_list_lock);
- for (i = 0; i < sriov->num_allowed_vlans; i++) {
- vlan_id = vf->sriov_vlans[i];
- if (vlan_id) {
- qlcnic_nic_add_mac(adapter, bcast_addr,
- vlan_id);
- qlcnic_nic_add_mac(adapter,
- cur->mac_addr,
- vlan_id);
- }
- }
- mutex_unlock(&vf->vlan_list_lock);
- if (qlcnic_84xx_check(adapter)) {
- qlcnic_nic_add_mac(adapter, bcast_addr, 0);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
- }
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, mac, 0);
+ } else {
+ spin_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id)
+ qlcnic_nic_add_mac(adapter, mac, vlan_id);
}
- list_del(&cur->list);
- kfree(cur);
+ spin_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter))
+ qlcnic_nic_add_mac(adapter, mac, 0);
}
}
@@ -1525,6 +1519,7 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
struct list_head *head = &bc->async_list;
struct qlcnic_async_work_list *entry;
+ flush_workqueue(bc->bc_async_wq);
while (!list_empty(head)) {
entry = list_entry(head->next, struct qlcnic_async_work_list,
list);
@@ -1534,10 +1529,14 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
}
}
-static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ struct netdev_hw_addr *ha;
u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -1549,23 +1548,49 @@ static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
} else if ((netdev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(netdev) > ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else {
+ qlcnic_vf_add_mc_list(netdev, bcast_addr);
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
}
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev);
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
+
+ if (adapter->pdev->is_virtfn) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
+ }
+ }
qlcnic_nic_set_promisc(adapter, mode);
}
-static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
struct qlcnic_async_work_list *entry;
- struct net_device *netdev;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args *cmd;
entry = container_of(work, struct qlcnic_async_work_list, work);
- netdev = (struct net_device *)entry->ptr;
-
- qlcnic_sriov_vf_set_multi(netdev);
+ adapter = entry->ptr;
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(adapter, cmd);
return;
}
@@ -1595,8 +1620,9 @@ qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
return entry;
}
-static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
- work_func_t func, void *data)
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+ work_func_t func, void *data,
+ struct qlcnic_cmd_args *cmd)
{
struct qlcnic_async_work_list *entry = NULL;
@@ -1605,21 +1631,23 @@ static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
return;
entry->ptr = data;
+ entry->cmd = cmd;
INIT_WORK(&entry->work, func);
queue_work(bc->bc_async_wq, &entry->work);
}
-void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
if (adapter->need_fw_reset)
- return;
+ return -EIO;
- qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
- netdev);
+ qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
+ adapter, cmd);
+ return 0;
}
static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
@@ -1843,6 +1871,12 @@ static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
return 0;
}
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
{
struct qlcnic_adapter *adapter;
@@ -1874,6 +1908,8 @@ static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
}
idc->prev_state = idc->curr_state;
+ qlcnic_sriov_vf_periodic_tasks(adapter);
+
if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
idc->delay);
@@ -1897,7 +1933,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
if (!vf->sriov_vlans)
return err;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
for (i = 0; i < sriov->num_allowed_vlans; i++) {
if (vf->sriov_vlans[i] == vlan_id) {
@@ -1906,7 +1942,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
}
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
@@ -1915,12 +1951,12 @@ static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
{
int err = 0;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan >= sriov->num_allowed_vlans)
err = -EINVAL;
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
@@ -1973,7 +2009,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
if (!vf->sriov_vlans)
return;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
switch (opcode) {
case QLC_VLAN_ADD:
@@ -1986,7 +2022,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
netdev_err(adapter->netdev, "Invalid VLAN operation\n");
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return;
}
@@ -1994,10 +2030,12 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_vf_info *vf;
struct qlcnic_cmd_args cmd;
int ret;
+ memset(&cmd, 0, sizeof(cmd));
if (vid == 0)
return 0;
@@ -2019,14 +2057,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
dev_err(&adapter->pdev->dev,
"Failed to configure guest VLAN, err=%d\n", ret);
} else {
+ netif_addr_lock_bh(netdev);
qlcnic_free_mac_list(adapter);
+ netif_addr_unlock_bh(netdev);
if (enable)
qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
else
qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
- qlcnic_set_multi(adapter->netdev);
+ netif_addr_lock_bh(netdev);
+ qlcnic_set_multi(netdev);
+ netif_addr_unlock_bh(netdev);
}
qlcnic_free_mbx_args(&cmd);
@@ -2157,11 +2199,11 @@ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
{
bool err = false;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan)
err = true;
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 280137991544..a29538b86edf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -16,6 +16,7 @@
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
+#define QLC_INTR_COAL_TYPE_MASK 0x7
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -83,7 +84,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
info->max_tx_ques = res->num_tx_queues / max;
if (qlcnic_83xx_pf_check(adapter))
- num_macs = 1;
+ num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
@@ -337,9 +338,12 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
cmd.req.arg[1] = 0x4;
if (enable) {
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
cmd.req.arg[1] |= BIT_16;
if (qlcnic_84xx_check(adapter))
cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ } else {
+ adapter->flags &= ~QLCNIC_VLAN_FILTERING;
}
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -471,12 +475,12 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
return -EPERM;
}
+ qlcnic_sriov_pf_disable(adapter);
+
rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
- qlcnic_sriov_pf_disable(adapter);
-
qlcnic_sriov_free_vlans(adapter);
qlcnic_sriov_pf_cleanup(adapter);
@@ -595,7 +599,6 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
qlcnic_sriov_alloc_vlans(adapter);
- err = qlcnic_sriov_pf_enable(adapter, num_vfs);
return err;
del_flr_queue:
@@ -626,25 +629,36 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
__qlcnic_down(adapter, netdev);
err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
- if (err) {
- netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
- adapter->portnum);
+ if (err)
+ goto error;
- err = -EIO;
- if (qlcnic_83xx_configure_opmode(adapter))
- goto error;
- } else {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+ if (!err) {
netdev_info(netdev,
"SR-IOV is enabled successfully on port %d\n",
adapter->portnum);
/* Return number of vfs enabled */
- err = num_vfs;
+ return num_vfs;
}
+
+ rtnl_lock();
if (netif_running(netdev))
- __qlcnic_up(adapter, netdev);
+ __qlcnic_down(adapter, netdev);
error:
+ if (!qlcnic_83xx_configure_opmode(adapter)) {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+ }
+
rtnl_unlock();
+ netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+ adapter->portnum);
+
return err;
}
@@ -773,7 +787,7 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
u16 vlan, u8 op)
{
- struct qlcnic_cmd_args cmd;
+ struct qlcnic_cmd_args *cmd;
struct qlcnic_macvlan_mbx mv;
struct qlcnic_vport *vp;
u8 *addr;
@@ -783,21 +797,27 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
vp = vf->vp;
- if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
return -ENOMEM;
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto free_cmd;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid < 0) {
err = -EINVAL;
- goto out;
+ goto free_args;
}
if (vlan)
op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
- cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
- cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+ cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+ cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
addr = vp->mac;
mv.vlan = vlan;
@@ -807,18 +827,18 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
mv.mac_addr3 = addr[3];
mv.mac_addr4 = addr[4];
mv.mac_addr5 = addr[5];
- buf = &cmd.req.arg[2];
+ buf = &cmd->req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
- err = qlcnic_issue_cmd(adapter, &cmd);
+ err = qlcnic_issue_cmd(adapter, cmd);
- if (err)
- dev_err(&adapter->pdev->dev,
- "MAC-VLAN %s to CAM failed, err=%d.\n",
- ((op == 1) ? "add " : "delete "), err);
+ if (!err)
+ return err;
-out:
- qlcnic_free_mbx_args(&cmd);
+free_args:
+ qlcnic_free_mbx_args(cmd);
+free_cmd:
+ kfree(cmd);
return err;
}
@@ -840,7 +860,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
sriov = adapter->ahw->sriov;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan) {
for (i = 0; i < sriov->num_allowed_vlans; i++) {
vlan = vf->sriov_vlans[i];
@@ -849,7 +869,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
opcode);
}
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
if (vf->vp->vlan_mode != QLC_PVID_MODE) {
if (qlcnic_83xx_pf_check(adapter) &&
@@ -1178,19 +1198,41 @@ static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
{
struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
u16 ctx_id, pkts, time;
+ int err = -EINVAL;
+ u8 type;
+ type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
ctx_id = cmd->req.arg[1] >> 16;
pkts = cmd->req.arg[2] & 0xffff;
time = cmd->req.arg[2] >> 16;
- if (ctx_id != vf->rx_ctx_id)
- return -EINVAL;
- if (pkts > coal->rx_packets)
- return -EINVAL;
- if (time < coal->rx_time_us)
- return -EINVAL;
+ switch (type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+ time < coal->rx_time_us)
+ goto err_label;
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+ time < coal->tx_time_us)
+ goto err_label;
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+ type);
+ return err;
+ }
return 0;
+
+err_label:
+ netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+ vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+ vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+ netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+ ctx_id, pkts, time, type);
+
+ return err;
}
static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
@@ -1214,7 +1256,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_macvlan_mbx *macvlan;
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
@@ -1224,14 +1265,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
cmd->req.arg[1] |= (vf->vp->handle << 16);
cmd->req.arg[1] |= BIT_31;
- macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
- if (!(macvlan->mac_addr0 & BIT_0)) {
- dev_err(&adapter->pdev->dev,
- "MAC address change is not allowed from VF %d",
- vf->pci_func);
- return -EINVAL;
- }
-
if (vp->vlan_mode == QLC_PVID_MODE) {
op = cmd->req.arg[1] & 0x7;
cmd->req.arg[1] &= ~0x7;
@@ -1815,7 +1848,8 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return 0;
}
-int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1830,35 +1864,52 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
if (vf >= sriov->num_vfs)
return -EINVAL;
- if (tx_rate >= 10000 || tx_rate < 100) {
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ vpid = vp->handle;
+
+ if (!min_tx_rate)
+ min_tx_rate = QLC_VF_MIN_TX_RATE;
+
+ if (max_tx_rate &&
+ (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
netdev_err(netdev,
- "Invalid Tx rate, allowed range is [%d - %d]",
- QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE);
+ "Invalid max Tx rate, allowed range is [%d - %d]",
+ min_tx_rate, QLC_VF_MAX_TX_RATE);
return -EINVAL;
}
- if (tx_rate == 0)
- tx_rate = 10000;
+ if (!max_tx_rate)
+ max_tx_rate = 10000;
- vf_info = &sriov->vf_info[vf];
- vp = vf_info->vp;
- vpid = vp->handle;
+ if (min_tx_rate &&
+ (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+ netdev_err(netdev,
+ "Invalid min Tx rate, allowed range is [%d - %d]",
+ QLC_VF_MIN_TX_RATE, max_tx_rate);
+ return -EINVAL;
+ }
if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
return -EIO;
- nic_info.max_tx_bw = tx_rate / 100;
+ nic_info.max_tx_bw = max_tx_rate / 100;
+ nic_info.min_tx_bw = min_tx_rate / 100;
nic_info.bit_offsets = BIT_0;
if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
return -EIO;
}
- vp->max_tx_bw = tx_rate / 100;
+ vp->max_tx_bw = max_tx_rate / 100;
netdev_info(netdev,
- "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
- tx_rate, vp->max_tx_bw, vf);
+ "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ max_tx_rate, vp->max_tx_bw, vf);
+ vp->min_tx_bw = min_tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ min_tx_rate, vp->min_tx_bw, vf);
return 0;
}
@@ -1957,9 +2008,13 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
ivi->qos = vp->qos;
ivi->spoofchk = vp->spoofchk;
if (vp->max_tx_bw == MAX_BW)
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = 0;
+ else
+ ivi->max_tx_rate = vp->max_tx_bw * 100;
+ if (vp->min_tx_bw == MIN_BW)
+ ivi->min_tx_rate = 0;
else
- ivi->tx_rate = vp->max_tx_bw * 100;
+ ivi->min_tx_rate = vp->min_tx_bw * 100;
ivi->vf = vf;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index cd346e27f2e1..f5786d5792df 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -19,6 +19,10 @@
#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
#define QLC_STATUS_UNSUPPORTED_CMD -2
@@ -358,6 +362,8 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
if (adapter->npars[i].pci_func == pci_func)
return i;
}
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
return -EINVAL;
}
@@ -1243,6 +1249,68 @@ static struct bin_attribute bin_attr_flash = {
.write = qlcnic_83xx_sysfs_flash_write_handler,
};
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned int temperature = 0, value = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+ else if (qlcnic_82xx_check(adapter))
+ value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+ temperature = qlcnic_get_temp_val(value);
+ /* display millidegree celcius */
+ temperature *= 1000;
+ return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct device *hwmon_dev;
+
+ /* Skip hwmon registration for a VF device */
+ if (qlcnic_sriov_vf_check(adapter)) {
+ adapter->ahw->hwmon_dev = NULL;
+ return;
+ }
+ hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+ adapter,
+ qlcnic_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+ PTR_ERR(hwmon_dev));
+ hwmon_dev = NULL;
+ }
+ adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+ if (hwmon_dev) {
+ hwmon_device_unregister(hwmon_dev);
+ adapter->ahw->hwmon_dev = NULL;
+ }
+}
+#endif
+
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 0a1d76acab81..b40050e03a56 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3595,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
}
return status;
err_irq:
- netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
ql_free_irq(qdev);
return status;
}
@@ -4770,7 +4770,7 @@ static int qlge_probe(struct pci_dev *pdev,
ndev->irq = pdev->irq;
ndev->netdev_ops = &qlge_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+ ndev->ethtool_ops = &qlge_ethtool_ops;
ndev->watchdog_timeo = 10 * HZ;
err = register_netdev(ndev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index aa1c079f231d..be425ad5e824 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7125,7 +7125,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
- SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+ dev->ethtool_ops = &rtl8169_ethtool_ops;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a9509ccd33b..7622213beef1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
};
static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDMR] = 0x0000,
+ [EDTRR] = 0x0004,
+ [EDRRR] = 0x0008,
+ [TDLAR] = 0x000c,
+ [RDLAR] = 0x0010,
+ [EESR] = 0x0014,
+ [EESIPR] = 0x0018,
+ [TRSCER] = 0x001c,
+ [RMFCR] = 0x0020,
+ [TFTR] = 0x0024,
+ [FDR] = 0x0028,
+ [RMCR] = 0x002c,
+ [EDOCR] = 0x0030,
+ [FCFTR] = 0x0034,
+ [RPADIR] = 0x0038,
+ [TRIMD] = 0x003c,
+ [RBWAR] = 0x0040,
+ [RDFAR] = 0x0044,
+ [TBRAR] = 0x004c,
+ [TDFAR] = 0x0050,
+
[ECMR] = 0x0160,
[ECSR] = 0x0164,
[ECSIPR] = 0x0168,
@@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
.register_type = SH_ETH_REG_FAST_SH4,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
- .rmcr_value = RMCR_RNC,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000072f,
- .rmcr_value = RMCR_RNC,
.irq_flags = IRQF_SHARED,
.apr = 1,
@@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000070f,
- .rmcr_value = RMCR_RNC,
.apr = 1,
.mpr = 1,
@@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000070f,
- .rmcr_value = RMCR_RNC,
.no_psr = 1,
.apr = 1,
@@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
if (!cd->fdr_value)
cd->fdr_value = DEFAULT_FDR_INIT;
- if (!cd->rmcr_value)
- cd->rmcr_value = DEFAULT_RMCR_VALUE;
-
if (!cd->tx_check)
cd->tx_check = DEFAULT_TX_CHECK;
@@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
sh_eth_write(ndev, 0, TFTR);
- /* Frame recv control */
- sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
+ /* Frame recv control (enable multiple-packets per rx irq) */
+ sh_eth_write(ndev, RMCR_RNC, RMCR);
sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
@@ -1385,7 +1399,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
int entry = mdp->cur_rx % mdp->num_rx_ring;
int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
struct sk_buff *skb;
- int exceeded = 0;
u16 pkt_len = 0;
u32 desc_status;
@@ -1397,10 +1410,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (--boguscnt < 0)
break;
- if (*quota <= 0) {
- exceeded = 1;
+ if (*quota <= 0)
break;
- }
+
(*quota)--;
if (!(desc_status & RDFEND))
@@ -1448,7 +1460,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
}
- rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
entry = (++mdp->cur_rx) % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
}
@@ -1494,7 +1505,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
- return exceeded;
+ return *quota <= 0;
}
static void sh_eth_rcv_snd_disable(struct net_device *ndev)
@@ -2627,8 +2638,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
pdev->name, pdev->id);
/* PHY IRQ */
- mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
- GFP_KERNEL);
+ mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
+ GFP_KERNEL);
if (!mdp->mii_bus->irq) {
ret = -ENOMEM;
goto out_free_bus;
@@ -2843,7 +2854,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
else
ndev->netdev_ops = &sh_eth_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
+ ndev->ethtool_ops = &sh_eth_ethtool_ops;
ndev->watchdog_timeo = TX_TIMEOUT;
/* debug message level */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d55e37cd5fec..b37c427144ee 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -319,7 +319,6 @@ enum TD_STS_BIT {
enum RMCR_BIT {
RMCR_RNC = 0x00000001,
};
-#define DEFAULT_RMCR_VALUE 0x00000000
/* ECMR */
enum FELIC_MODE_BIT {
@@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
unsigned long fdr_value;
unsigned long fcftr_value;
unsigned long rpadir_value;
- unsigned long rmcr_value;
/* interrupt checking mask */
unsigned long tx_check;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 0415fa50eeb7..c0981ae45874 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -520,5 +520,5 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
void sxgbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
+ netdev->ethtool_ops = &sxgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 82a9a983869f..698494481d18 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -425,8 +425,8 @@ dmamem_err:
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
-void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
- int rx_rsize)
+static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
+ int rx_rsize)
{
dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
rx_ring->dma_rx, rx_ring->dma_rx_phy);
@@ -519,8 +519,8 @@ error:
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
*/
-void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
- int tx_rsize)
+static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
+ int tx_rsize)
{
dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
tx_ring->dma_tx, tx_ring->dma_tx_phy);
@@ -1221,11 +1221,10 @@ static int sxgbe_release(struct net_device *dev)
return 0;
}
-
/* Prepare first Tx descriptor for doing TSO operation */
-void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
- struct sxgbe_tx_norm_desc *first_desc,
- struct sk_buff *skb)
+static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
+ struct sxgbe_tx_norm_desc *first_desc,
+ struct sk_buff *skb)
{
unsigned int total_hdr_len, tcp_hdr_len;
@@ -1914,40 +1913,6 @@ static void sxgbe_set_rx_mode(struct net_device *dev)
readl(ioaddr + SXGBE_HASH_LOW));
}
-/**
- * sxgbe_config - entry point for changing configuration mode passed on by
- * ifconfig
- * @dev : pointer to the device structure
- * @map : pointer to the device mapping structure
- * Description:
- * This function is a driver entry point which gets called by the kernel
- * whenever some device configuration is changed.
- * Return value:
- * This function returns 0 if success and appropriate error otherwise.
- */
-static int sxgbe_config(struct net_device *dev, struct ifmap *map)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- /* Can't act on a running interface */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != (unsigned long)priv->ioaddr) {
- netdev_warn(dev, "can't change I/O address\n");
- return -EOPNOTSUPP;
- }
-
- /* Don't allow changing the IRQ */
- if (map->irq != priv->irq) {
- netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* sxgbe_poll_controller - entry point for polling receive by device
@@ -2009,7 +1974,6 @@ static const struct net_device_ops sxgbe_netdev_ops = {
.ndo_set_rx_mode = sxgbe_set_rx_mode,
.ndo_tx_timeout = sxgbe_tx_timeout,
.ndo_do_ioctl = sxgbe_ioctl,
- .ndo_set_config = sxgbe_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sxgbe_poll_controller,
#endif
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 56f8bf5a3f1b..81437d91df99 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -188,7 +188,6 @@
/* L3/L4 function registers */
#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
-#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
#define SXGBE_CORE_L34_DATA_REG 0x0C04
/* ARP registers */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 63d595fd3cc5..1e274045970f 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2248,7 +2248,7 @@ static int efx_register_netdev(struct efx_nic *efx)
} else {
net_dev->netdev_ops = &efx_farch_netdev_ops;
}
- SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+ net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 0de8b07c24c2..74739c4b9997 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1033,7 +1033,7 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
0 : ARRAY_SIZE(efx->rx_indir_table));
}
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -1041,8 +1041,8 @@ static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
return 0;
}
-static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
- const u32 *indir)
+static int efx_ethtool_set_rxfh(struct net_device *net_dev,
+ const u32 *indir, const u8 *key)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -1125,8 +1125,8 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
- .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
- .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
+ .get_rxfh = efx_ethtool_get_rxfh,
+ .set_rxfh = efx_ethtool_set_rxfh,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4d3f119b67b3..afb94aa2c15e 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,10 +66,17 @@
#define EFX_USE_QWORD_IO 1
#endif
+/* Hardware issue requires that only 64-bit naturally aligned writes
+ * are seen by hardware. Its not strictly necessary to restrict to
+ * x86_64 arch, but done for safety since unusual write combining behaviour
+ * can break PIO.
+ */
+#ifdef CONFIG_X86_64
/* PIO is a win only if write-combining is possible */
#ifdef ARCH_HAS_IOREMAP_WC
#define EFX_USE_PIO 1
#endif
+#endif
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 9a9205e77896..43d2e64546ed 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1633,7 +1633,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
ivi->vf = vf_i;
ether_addr_copy(ivi->mac, vf->addr.mac_addr);
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = 0;
+ ivi->min_tx_rate = 0;
tci = ntohs(vf->addr.tci);
ivi->vlan = tci & VLAN_VID_MASK;
ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index fa9475300411..ede8dcca0ff3 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
u8 buf[L1_CACHE_BYTES];
};
+/* Copy in explicit 64-bit writes. */
+static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
+{
+ u64 *src64 = src;
+ u64 __iomem *dest64 = dest;
+ size_t l64 = len / 8;
+ size_t i;
+
+ for (i = 0; i < l64; i++)
+ writeq(src64[i], &dest64[i]);
+}
+
/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
* Advances piobuf pointer. Leaves additional data in the copy buffer.
*/
@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
{
int block_len = len & ~(sizeof(copy_buf->buf) - 1);
- memcpy_toio(*piobuf, data, block_len);
+ efx_memcpy_64(*piobuf, data, block_len);
*piobuf += block_len;
len -= block_len;
@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
if (copy_buf->used < sizeof(copy_buf->buf))
return;
- memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
*piobuf += sizeof(copy_buf->buf);
data += copy_to_buf;
len -= copy_to_buf;
@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
{
/* if there's anything in it, write the whole buffer, including junk */
if (copy_buf->used)
- memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
}
/* Traverse skb structure and copy fragments in to PIO buffer.
@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
*/
BUILD_BUG_ON(L1_CACHE_BYTES >
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- memcpy_toio(tx_queue->piobuf, skb->data,
- ALIGN(skb->len, L1_CACHE_BYTES));
+ efx_memcpy_64(tx_queue->piobuf, skb->data,
+ ALIGN(skb->len, L1_CACHE_BYTES));
}
EFX_POPULATE_QWORD_5(buffer->option,
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index acbbe48a519c..a86339903b9b 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1877,7 +1877,7 @@ static int sis190_init_one(struct pci_dev *pdev,
dev->netdev_ops = &sis190_netdev_ops;
- SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
+ dev->ethtool_ops = &sis190_ethtool_ops;
dev->watchdog_timeo = SIS190_TX_TIMEOUT;
spin_lock_init(&tp->lock);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index c7a4868571f9..6b33127ab352 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -318,7 +318,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
/* The SMC91c92-specific entries in the device structure. */
dev->netdev_ops = &smc_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
smc->mii_if.dev = dev;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a0fc151da40d..5e13fa5524ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2477,6 +2477,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
goto out_disable_resources;
}
+ netif_carrier_off(dev);
+
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index c5f9cb85c8ef..c62e67f3c2f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -322,9 +322,7 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
return -EBUSY;
}
cmd->transceiver = XCVR_INTERNAL;
- spin_lock_irq(&priv->lock);
rc = phy_ethtool_gset(phy, cmd);
- spin_unlock_irq(&priv->lock);
return rc;
}
@@ -431,8 +429,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (priv->pcs) /* FIXME */
return;
- spin_lock(&priv->lock);
-
pause->rx_pause = 0;
pause->tx_pause = 0;
pause->autoneg = priv->phydev->autoneg;
@@ -442,7 +438,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (priv->flow_ctrl & FLOW_TX)
pause->tx_pause = 1;
- spin_unlock(&priv->lock);
}
static int
@@ -457,8 +452,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (priv->pcs) /* FIXME */
return -EOPNOTSUPP;
- spin_lock(&priv->lock);
-
if (pause->rx_pause)
new_pause |= FLOW_RX;
if (pause->tx_pause)
@@ -473,7 +466,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
} else
priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
priv->flow_ctrl, priv->pause);
- spin_unlock(&priv->lock);
return ret;
}
@@ -784,5 +776,5 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
void stmmac_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+ netdev->ethtool_ops = &stmmac_ethtool_ops;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0f4841d2e8dc..057a1208e594 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1753,7 +1753,7 @@ static int stmmac_open(struct net_device *dev)
}
/* Request the IRQ lines */
- if (priv->lpi_irq != -ENXIO) {
+ if (priv->lpi_irq > 0) {
ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
dev->name, dev);
if (unlikely(ret < 0)) {
@@ -1813,7 +1813,7 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
if (priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
- if (priv->lpi_irq != -ENXIO)
+ if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
@@ -2212,27 +2212,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
stmmac_tx_err(priv);
}
-/* Configuration changes (passed on by ifconfig) */
-static int stmmac_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP) /* can't act on a running interface */
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != dev->base_addr) {
- pr_warn("%s: can't change I/O address\n", dev->name);
- return -EOPNOTSUPP;
- }
-
- /* Don't allow changing the IRQ */
- if (map->irq != dev->irq) {
- pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
/**
* stmmac_set_rx_mode - entry point for multicast addressing
* @dev : pointer to the device structure
@@ -2598,7 +2577,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
- .ndo_set_config = stmmac_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a468eb107823..a5b1e1b776fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -205,10 +205,13 @@ int stmmac_mdio_register(struct net_device *ndev)
if (new_bus == NULL)
return -ENOMEM;
- if (mdio_bus_data->irqs)
+ if (mdio_bus_data->irqs) {
irqlist = mdio_bus_data->irqs;
- else
+ } else {
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++)
+ priv->mii_irq[addr] = PHY_POLL;
irqlist = priv->mii_irq;
+ }
#ifdef CONFIG_OF
if (priv->device->of_node)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 46aef5108bea..ea7a65be1f9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -237,10 +237,12 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
/* Get the MAC information */
priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
- if (priv->dev->irq == -ENXIO) {
- pr_err("%s: ERROR: MAC IRQ configuration "
- "information not found\n", __func__);
- return -ENXIO;
+ if (priv->dev->irq < 0) {
+ if (priv->dev->irq != -EPROBE_DEFER) {
+ netdev_err(priv->dev,
+ "MAC IRQ configuration information not found\n");
+ }
+ return priv->dev->irq;
}
/*
@@ -252,10 +254,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
* so the driver will continue to use the mac irq (ndev->irq)
*/
priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (priv->wol_irq == -ENXIO)
+ if (priv->wol_irq < 0) {
+ if (priv->wol_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
priv->wol_irq = priv->dev->irq;
+ }
priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (priv->lpi_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
platform_set_drvdata(pdev, priv->dev);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 2ead87759ab4..38da73a2a886 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2413,7 +2413,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
.get_ethtool_stats = bdx_get_ethtool_stats,
};
- SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
+ netdev->ethtool_ops = &bdx_ethtool_ops;
}
/**
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 73f74f369437..7399a52f7c26 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -313,19 +313,6 @@ static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
static struct mii_bus *cpmac_mii;
-static int cpmac_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != dev->base_addr)
- return -EOPNOTSUPP;
-
- /* ignore other fields */
- return 0;
-}
-
static void cpmac_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -1100,7 +1087,6 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
.ndo_do_ioctl = cpmac_ioctl,
- .ndo_set_config = cpmac_config,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 148da9ae8366..aa8bf45e53dc 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -29,6 +29,8 @@
#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+#define GMII_SEL_MODE_MASK 0x3
+
struct cpsw_phy_sel_priv {
struct device *dev;
u32 __iomem *gmii_sel;
@@ -65,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
break;
};
- mask = 0x3 << (slave * 2) | BIT(slave + 6);
+ mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
mode <<= slave * 2;
if (priv->rmii_clock_external) {
@@ -81,6 +83,55 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
writel(reg, priv->gmii_sel);
}
+static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+
+ reg = readl(priv->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ switch (slave) {
+ case 0:
+ mask = GMII_SEL_MODE_MASK;
+ break;
+ case 1:
+ mask = GMII_SEL_MODE_MASK << 4;
+ mode <<= 4;
+ break;
+ default:
+ dev_err(priv->dev, "invalid slave number...\n");
+ return;
+ }
+
+ if (priv->rmii_clock_external)
+ dev_err(priv->dev, "RMII External clock is not supported\n");
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->gmii_sel);
+}
+
static struct platform_driver cpsw_phy_sel_driver;
static int match(struct device *dev, void *data)
{
@@ -112,6 +163,14 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
.compatible = "ti,am3352-cpsw-phy-sel",
.data = &cpsw_gmii_sel_am3352,
},
+ {
+ .compatible = "ti,dra7xx-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_dra7xx,
+ },
+ {
+ .compatible = "ti,am43xx-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_am3352,
+ },
{}
};
MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
@@ -132,6 +191,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ priv->dev = &pdev->dev;
priv->cpsw_phy_sel = of_id->data;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c331b7ebc812..b988d16cd34e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -143,13 +143,13 @@ do { \
u32 i; \
for (i = 0; i < priv->num_irqs; i++) \
enable_irq(priv->irqs_table[i]); \
- } while (0);
+ } while (0)
#define cpsw_disable_irq(priv) \
do { \
u32 i; \
for (i = 0; i < priv->num_irqs; i++) \
disable_irq_nosync(priv->irqs_table[i]); \
- } while (0);
+ } while (0)
#define cpsw_slave_index(priv) \
((priv->data.dual_emac) ? priv->emac_port : \
@@ -248,20 +248,31 @@ struct cpsw_ss_regs {
#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */
+#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
-#define CTRL_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
- TS_ANNEX_D_EN | TS_LTYPE1_EN)
+#define CTRL_V2_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
+
+
+#define CTRL_V3_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+ TS_LTYPE1_EN)
-#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN)
-#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN)
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
@@ -1201,7 +1212,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
for_each_slave(priv, cpsw_slave_open, priv);
/* Add default VLAN */
- cpsw_add_default_vlan(priv);
+ if (!priv->data.dual_emac)
+ cpsw_add_default_vlan(priv);
+ else
+ cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
+ ALE_ALL_PORTS << priv->host_port,
+ ALE_ALL_PORTS << priv->host_port, 0, 0);
if (!cpsw_common_res_usage_state(priv)) {
/* setup tx dma to fixed prio and zero offset */
@@ -1376,13 +1392,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave = &priv->slaves[priv->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL);
- ctrl &= ~CTRL_ALL_TS_MASK;
+ switch (priv->version) {
+ case CPSW_VERSION_2:
+ ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (priv->cpts->tx_enable)
- ctrl |= CTRL_TX_TS_BITS;
+ if (priv->cpts->tx_enable)
+ ctrl |= CTRL_V2_TX_TS_BITS;
- if (priv->cpts->rx_enable)
- ctrl |= CTRL_RX_TS_BITS;
+ if (priv->cpts->rx_enable)
+ ctrl |= CTRL_V2_RX_TS_BITS;
+ break;
+ case CPSW_VERSION_3:
+ default:
+ ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+ if (priv->cpts->tx_enable)
+ ctrl |= CTRL_V3_TX_TS_BITS;
+
+ if (priv->cpts->rx_enable)
+ ctrl |= CTRL_V3_RX_TS_BITS;
+ break;
+ }
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1398,7 +1428,8 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
struct hwtstamp_config cfg;
if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2)
+ priv->version != CPSW_VERSION_2 &&
+ priv->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1443,6 +1474,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
cpsw_hwtstamp_v1(priv);
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
cpsw_hwtstamp_v2(priv);
break;
default:
@@ -1459,7 +1491,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
struct hwtstamp_config cfg;
if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2)
+ priv->version != CPSW_VERSION_2 &&
+ priv->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
cfg.flags = 0;
@@ -1780,25 +1813,25 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
return -EINVAL;
if (of_property_read_u32(node, "slaves", &prop)) {
- pr_err("Missing slaves property in the DT.\n");
+ dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
return -EINVAL;
}
data->slaves = prop;
if (of_property_read_u32(node, "active_slave", &prop)) {
- pr_err("Missing active_slave property in the DT.\n");
+ dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
return -EINVAL;
}
data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
- pr_err("Missing cpts_clock_mult property in the DT.\n");
+ dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
return -EINVAL;
}
data->cpts_clock_mult = prop;
if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
- pr_err("Missing cpts_clock_shift property in the DT.\n");
+ dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
return -EINVAL;
}
data->cpts_clock_shift = prop;
@@ -1810,31 +1843,31 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
return -ENOMEM;
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
- pr_err("Missing cpdma_channels property in the DT.\n");
+ dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
return -EINVAL;
}
data->channels = prop;
if (of_property_read_u32(node, "ale_entries", &prop)) {
- pr_err("Missing ale_entries property in the DT.\n");
+ dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
return -EINVAL;
}
data->ale_entries = prop;
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
- pr_err("Missing bd_ram_size property in the DT.\n");
+ dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
return -EINVAL;
}
data->bd_ram_size = prop;
if (of_property_read_u32(node, "rx_descs", &prop)) {
- pr_err("Missing rx_descs property in the DT.\n");
+ dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
return -EINVAL;
}
data->rx_descs = prop;
if (of_property_read_u32(node, "mac_control", &prop)) {
- pr_err("Missing mac_control property in the DT.\n");
+ dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
return -EINVAL;
}
data->mac_control = prop;
@@ -1848,7 +1881,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
/* We do not want to force this, as in some cases may not have child */
if (ret)
- pr_warn("Doesn't have any child node\n");
+ dev_warn(&pdev->dev, "Doesn't have any child node\n");
for_each_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
@@ -1865,7 +1898,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
- pr_err("Missing slave[%d] phy_id property\n", i);
+ dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
return -EINVAL;
}
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
@@ -1885,18 +1918,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
slave_data->phy_if = of_get_phy_mode(slave_node);
if (slave_data->phy_if < 0) {
- pr_err("Missing or malformed slave[%d] phy-mode property\n",
- i);
+ dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+ i);
return slave_data->phy_if;
}
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
&prop)) {
- pr_err("Missing dual_emac_res_vlan in DT.\n");
+ dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
slave_data->dual_emac_res_vlan = i+1;
- pr_err("Using %d as Reserved VLAN for %d slave\n",
- slave_data->dual_emac_res_vlan, i);
+ dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
+ slave_data->dual_emac_res_vlan, i);
} else {
slave_data->dual_emac_res_vlan = prop;
}
@@ -1920,7 +1953,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
if (!ndev) {
- pr_err("cpsw: error allocating net_device\n");
+ dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
return -ENOMEM;
}
@@ -1936,10 +1969,10 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
ETH_ALEN);
- pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
} else {
random_ether_addr(priv_sl2->mac_addr);
- pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
}
memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
@@ -1970,14 +2003,14 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
ret = register_netdev(ndev);
if (ret) {
- pr_err("cpsw: error registering net device\n");
+ dev_err(&pdev->dev, "cpsw: error registering net device\n");
free_netdev(ndev);
ret = -ENODEV;
}
@@ -1999,7 +2032,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
if (!ndev) {
- pr_err("error allocating net_device\n");
+ dev_err(&pdev->dev, "error allocating net_device\n");
return -ENOMEM;
}
@@ -2014,7 +2047,7 @@ static int cpsw_probe(struct platform_device *pdev)
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
priv->irq_enabled = true;
if (!priv->cpts) {
- pr_err("error allocating cpts\n");
+ dev_err(&pdev->dev, "error allocating cpts\n");
goto clean_ndev_ret;
}
@@ -2027,7 +2060,7 @@ static int cpsw_probe(struct platform_device *pdev)
pinctrl_pm_select_default_state(&pdev->dev);
if (cpsw_probe_dt(&priv->data, pdev)) {
- pr_err("cpsw: platform data missing\n");
+ dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
}
@@ -2035,10 +2068,10 @@ static int cpsw_probe(struct platform_device *pdev)
if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
- pr_info("Detected MACID = %pM\n", priv->mac_addr);
+ dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
} else {
eth_random_addr(priv->mac_addr);
- pr_info("Random MACID = %pM\n", priv->mac_addr);
+ dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
}
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
@@ -2199,7 +2232,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
/* register the network device */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 243513980b51..6b56f85951e5 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -236,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work)
schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
}
-#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
-
-static void cpts_clk_init(struct cpts *cpts)
+static void cpts_clk_init(struct device *dev, struct cpts *cpts)
{
- cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+ cpts->refclk = devm_clk_get(dev, "cpts");
if (IS_ERR(cpts->refclk)) {
- pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+ dev_err(dev, "Failed to get cpts refclk\n");
cpts->refclk = NULL;
return;
}
@@ -252,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts)
static void cpts_clk_release(struct cpts *cpts)
{
clk_disable(cpts->refclk);
- clk_put(cpts->refclk);
}
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -390,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts,
for (i = 0; i < CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
- cpts_clk_init(cpts);
+ cpts_clk_init(dev, cpts);
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 88ef27067bf2..4a000f6dd6fc 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -158,9 +158,9 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
int bitmap_size;
struct cpdma_desc_pool *pool;
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
if (!pool)
- return NULL;
+ goto fail;
spin_lock_init(&pool->lock);
@@ -170,7 +170,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
pool->num_desc = size / pool->desc_size;
bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
- pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
if (!pool->bitmap)
goto fail;
@@ -187,10 +187,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
if (pool->iomap)
return pool;
-
fail:
- kfree(pool->bitmap);
- kfree(pool);
return NULL;
}
@@ -203,7 +200,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
spin_lock_irqsave(&pool->lock, flags);
WARN_ON(pool->used_desc);
- kfree(pool->bitmap);
if (pool->cpumap) {
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
pool->phys);
@@ -211,7 +207,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
iounmap(pool->iomap);
}
spin_unlock_irqrestore(&pool->lock, flags);
- kfree(pool);
}
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -276,7 +271,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
{
struct cpdma_ctlr *ctlr;
- ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+ ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
if (!ctlr)
return NULL;
@@ -290,10 +285,8 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->params.desc_hw_addr,
ctlr->params.desc_mem_size,
ctlr->params.desc_align);
- if (!ctlr->pool) {
- kfree(ctlr);
+ if (!ctlr->pool)
return NULL;
- }
if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
ctlr->num_chan = CPDMA_MAX_CHANNELS;
@@ -468,7 +461,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
cpdma_desc_pool_destroy(ctlr->pool);
spin_unlock_irqrestore(&ctlr->lock, flags);
- kfree(ctlr);
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -507,21 +499,22 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler)
{
struct cpdma_chan *chan;
- int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+ int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
unsigned long flags;
if (__chan_linear(chan_num) >= ctlr->num_chan)
return NULL;
- ret = -ENOMEM;
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
if (!chan)
- goto err_chan_alloc;
+ return ERR_PTR(-ENOMEM);
spin_lock_irqsave(&ctlr->lock, flags);
- ret = -EBUSY;
- if (ctlr->channels[chan_num])
- goto err_chan_busy;
+ if (ctlr->channels[chan_num]) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ devm_kfree(ctlr->dev, chan);
+ return ERR_PTR(-EBUSY);
+ }
chan->ctlr = ctlr;
chan->state = CPDMA_STATE_IDLE;
@@ -551,12 +544,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
ctlr->channels[chan_num] = chan;
spin_unlock_irqrestore(&ctlr->lock, flags);
return chan;
-
-err_chan_busy:
- spin_unlock_irqrestore(&ctlr->lock, flags);
- kfree(chan);
-err_chan_alloc:
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(cpdma_chan_create);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 8f0e69ce07ca..35a139e9a833 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1567,7 +1567,6 @@ static int emac_dev_open(struct net_device *ndev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
res_num))) {
for (irq_num = res->start; irq_num <= res->end; irq_num++) {
- dev_err(emac_dev, "Request IRQ %d\n", irq_num);
if (request_irq(irq_num, emac_irq, 0, ndev->name,
ndev)) {
dev_err(emac_dev,
@@ -1865,7 +1864,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
struct emac_priv *priv;
unsigned long hw_ram_addr;
struct emac_platform_data *pdata;
- struct device *emac_dev;
struct cpdma_params dma_params;
struct clk *emac_clk;
unsigned long emac_bus_frequency;
@@ -1911,7 +1909,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
priv->coal_intvl = 0;
priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
- emac_dev = &ndev->dev;
/* Get EMAC platform data */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
@@ -1930,7 +1927,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
memset(&dma_params, 0, sizeof(dma_params));
- dma_params.dev = emac_dev;
+ dma_params.dev = &pdev->dev;
dma_params.dmaregs = priv->emac_base;
dma_params.rxthresh = priv->emac_base + 0x120;
dma_params.rxfree = priv->emac_base + 0x140;
@@ -1980,7 +1977,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &emac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ethtool_ops);
+ ndev->ethtool_ops = &ethtool_ops;
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
/* register the network device */
@@ -1994,7 +1991,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (netif_msg_probe(priv)) {
- dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
+ dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 0cca9dec5d82..735dc53d4b01 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -303,7 +303,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
return -EINVAL;
if (of_property_read_u32(node, "bus_freq", &prop)) {
- pr_err("Missing bus_freq property in the DT.\n");
+ dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
return -EINVAL;
}
data->bus_freq = prop;
@@ -321,15 +321,14 @@ static int davinci_mdio_probe(struct platform_device *pdev)
struct phy_device *phy;
int ret, addr;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->bus = mdiobus_alloc();
+ data->bus = devm_mdiobus_alloc(dev);
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
- ret = -ENOMEM;
- goto bail_out;
+ return -ENOMEM;
}
if (dev->of_node) {
@@ -349,12 +348,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->bus->parent = dev;
data->bus->priv = data;
- /* Select default pin state */
- pinctrl_pm_select_default_state(&pdev->dev);
-
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- data->clk = clk_get(&pdev->dev, "fck");
+ data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
@@ -367,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
spin_lock_init(&data->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "could not find register map resource\n");
- ret = -ENOENT;
- goto bail_out;
- }
-
- res = devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(dev));
- if (!res) {
- dev_err(dev, "could not allocate register map resource\n");
- ret = -ENXIO;
- goto bail_out;
- }
-
- data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!data->regs) {
- dev_err(dev, "could not map mdio registers\n");
- ret = -ENOMEM;
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ ret = PTR_ERR(data->regs);
goto bail_out;
}
@@ -406,16 +387,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
return 0;
bail_out:
- if (data->bus)
- mdiobus_free(data->bus);
-
- if (data->clk)
- clk_put(data->clk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(data);
-
return ret;
}
@@ -423,18 +397,12 @@ static int davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
- if (data->bus) {
+ if (data->bus)
mdiobus_unregister(data->bus);
- mdiobus_free(data->bus);
- }
- if (data->clk)
- clk_put(data->clk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(data);
-
return 0;
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 449011b0e007..4c70360967c2 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2191,8 +2191,6 @@ static void tile_net_setup(struct net_device *dev)
static void tile_net_dev_init(const char *name, const uint8_t *mac)
{
int ret;
- int i;
- int nz_addr = 0;
struct net_device *dev;
struct tile_net_priv *priv;
@@ -2212,7 +2210,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
/* Initialize "priv". */
priv = netdev_priv(dev);
- memset(priv, 0, sizeof(*priv));
priv->dev = dev;
priv->channel = -1;
priv->loopify_channel = -1;
@@ -2223,15 +2220,10 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
* be done before the device is opened. If the MAC is all zeroes,
* we use a random address, since we're probably on the simulator.
*/
- for (i = 0; i < 6; i++)
- nz_addr |= mac[i];
-
- if (nz_addr) {
- memcpy(dev->dev_addr, mac, ETH_ALEN);
- dev->addr_len = 6;
- } else {
+ if (!is_zero_ether_addr(mac))
+ ether_addr_copy(dev->dev_addr, mac);
+ else
eth_hw_addr_random(dev);
- }
/* Register the network device. */
ret = register_netdev(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d899d0072ae0..bb7992804664 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1561,7 +1561,7 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
* alloc netdev
*/
*netdev = alloc_etherdev(sizeof(struct gelic_port));
- if (!netdev) {
+ if (!*netdev) {
kfree(card->unalign);
return NULL;
}
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 8a049a2b4474..f66ddaee0c87 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
- depends on PCI
+ depends on (PCI || USE_OF)
select CRC32
select MII
---help---
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f61dc2b72bb2..2d72f96a9e2c 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -116,13 +120,6 @@ static const int multicast_filter_limit = 32;
static const char version[] =
"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
-/* This driver was written to use PCI memory space. Some early versions
- of the Rhine may only work correctly with I/O space accesses. */
-#ifdef CONFIG_VIA_RHINE_MMIO
-#define USE_MMIO
-#else
-#endif
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
@@ -260,6 +257,12 @@ enum rhine_quirks {
rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
rqRhineI = 0x0100, /* See comment below */
+ rqIntPHY = 0x0200, /* Integrated PHY */
+ rqMgmt = 0x0400, /* Management adapter */
+ rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
+ * switched from PIO mode to MMIO
+ * (only applies to PCI)
+ */
};
/*
* rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
@@ -279,6 +282,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
+/* OpenFirmware identifiers for platform-bus devices
+ * The .data field is currently only used to store quirks
+ */
+static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
+static struct of_device_id rhine_of_tbl[] = {
+ { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(of, rhine_of_tbl);
/* Offsets to the device registers. */
enum register_offsets {
@@ -338,13 +350,11 @@ enum bcr1_bits {
BCR1_MED1=0x80, /* for VT6102 */
};
-#ifdef USE_MMIO
/* Registers we check that mmio and reg are the same. */
static const int mmio_verify_registers[] = {
RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
0
};
-#endif
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
@@ -446,7 +456,7 @@ struct rhine_private {
unsigned char *tx_bufs;
dma_addr_t tx_bufs_dma;
- struct pci_dev *pdev;
+ int irq;
long pioaddr;
struct net_device *dev;
struct napi_struct napi;
@@ -649,20 +659,46 @@ static void rhine_chip_reset(struct net_device *dev)
"failed" : "succeeded");
}
-#ifdef USE_MMIO
static void enable_mmio(long pioaddr, u32 quirks)
{
int n;
- if (quirks & rqRhineI) {
- /* More recent docs say that this bit is reserved ... */
- n = inb(pioaddr + ConfigA) | 0x20;
- outb(n, pioaddr + ConfigA);
- } else {
- n = inb(pioaddr + ConfigD) | 0x80;
- outb(n, pioaddr + ConfigD);
+
+ if (quirks & rqNeedEnMMIO) {
+ if (quirks & rqRhineI) {
+ /* More recent docs say that this bit is reserved */
+ n = inb(pioaddr + ConfigA) | 0x20;
+ outb(n, pioaddr + ConfigA);
+ } else {
+ n = inb(pioaddr + ConfigD) | 0x80;
+ outb(n, pioaddr + ConfigD);
+ }
}
}
-#endif
+
+static inline int verify_mmio(struct device *hwdev,
+ long pioaddr,
+ void __iomem *ioaddr,
+ u32 quirks)
+{
+ if (quirks & rqNeedEnMMIO) {
+ int i = 0;
+
+ /* Check that selected MMIO registers match the PIO ones */
+ while (mmio_verify_registers[i]) {
+ int reg = mmio_verify_registers[i++];
+ unsigned char a = inb(pioaddr+reg);
+ unsigned char b = readb(ioaddr+reg);
+
+ if (a != b) {
+ dev_err(hwdev,
+ "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+ reg, a, b);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+}
/*
* Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
@@ -682,14 +718,12 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
if (i > 512)
pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
-#ifdef USE_MMIO
/*
* Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
* MMIO. If reloading EEPROM was done first this could be avoided, but
* it is not known if that still works with the "win98-reboot" problem.
*/
enable_mmio(pioaddr, rp->quirks);
-#endif
/* Turn off EEPROM-controlled wake-up (magic packet) */
if (rp->quirks & rqWOL)
@@ -701,7 +735,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
static void rhine_poll(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- const int irq = rp->pdev->irq;
+ const int irq = rp->irq;
disable_irq(irq);
rhine_interrupt(irq, dev);
@@ -846,7 +880,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
msleep(5);
/* Reload EEPROM controlled bytes cleared by soft reset */
- rhine_reload_eeprom(pioaddr, dev);
+ if (dev_is_pci(dev->dev.parent))
+ rhine_reload_eeprom(pioaddr, dev);
}
static const struct net_device_ops rhine_netdev_ops = {
@@ -867,125 +902,37 @@ static const struct net_device_ops rhine_netdev_ops = {
#endif
};
-static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rhine_init_one_common(struct device *hwdev, u32 quirks,
+ long pioaddr, void __iomem *ioaddr, int irq)
{
struct net_device *dev;
struct rhine_private *rp;
- int i, rc;
- u32 quirks;
- long pioaddr;
- long memaddr;
- void __iomem *ioaddr;
- int io_size, phy_id;
+ int i, rc, phy_id;
const char *name;
-#ifdef USE_MMIO
- int bar = 1;
-#else
- int bar = 0;
-#endif
-
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- pr_info_once("%s\n", version);
-#endif
-
- io_size = 256;
- phy_id = 0;
- quirks = 0;
- name = "Rhine";
- if (pdev->revision < VTunknown0) {
- quirks = rqRhineI;
- io_size = 128;
- }
- else if (pdev->revision >= VT6102) {
- quirks = rqWOL | rqForceReset;
- if (pdev->revision < VT6105) {
- name = "Rhine II";
- quirks |= rqStatusWBRace; /* Rhine-II exclusive */
- }
- else {
- phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
- if (pdev->revision >= VT6105_B0)
- quirks |= rq6patterns;
- if (pdev->revision < VT6105M)
- name = "Rhine III";
- else
- name = "Rhine III (Management Adapter)";
- }
- }
-
- rc = pci_enable_device(pdev);
- if (rc)
- goto err_out;
/* this should always be supported */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
if (rc) {
- dev_err(&pdev->dev,
- "32-bit PCI DMA addresses not supported by the card!?\n");
- goto err_out_pci_disable;
- }
-
- /* sanity check */
- if ((pci_resource_len(pdev, 0) < io_size) ||
- (pci_resource_len(pdev, 1) < io_size)) {
- rc = -EIO;
- dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
- goto err_out_pci_disable;
+ dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
+ goto err_out;
}
- pioaddr = pci_resource_start(pdev, 0);
- memaddr = pci_resource_start(pdev, 1);
-
- pci_set_master(pdev);
-
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- goto err_out_pci_disable;
+ goto err_out;
}
- SET_NETDEV_DEV(dev, &pdev->dev);
+ SET_NETDEV_DEV(dev, hwdev);
rp = netdev_priv(dev);
rp->dev = dev;
rp->quirks = quirks;
rp->pioaddr = pioaddr;
- rp->pdev = pdev;
+ rp->base = ioaddr;
+ rp->irq = irq;
rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out_free_netdev;
-
- ioaddr = pci_iomap(pdev, bar, io_size);
- if (!ioaddr) {
- rc = -EIO;
- dev_err(&pdev->dev,
- "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
- pci_name(pdev), io_size, memaddr);
- goto err_out_free_res;
- }
-
-#ifdef USE_MMIO
- enable_mmio(pioaddr, quirks);
-
- /* Check that selected MMIO registers match the PIO ones */
- i = 0;
- while (mmio_verify_registers[i]) {
- int reg = mmio_verify_registers[i++];
- unsigned char a = inb(pioaddr+reg);
- unsigned char b = readb(ioaddr+reg);
- if (a != b) {
- rc = -EIO;
- dev_err(&pdev->dev,
- "MMIO do not match PIO [%02x] (%02x != %02x)\n",
- reg, a, b);
- goto err_out_unmap;
- }
- }
-#endif /* USE_MMIO */
-
- rp->base = ioaddr;
+ phy_id = rp->quirks & rqIntPHY ? 1 : 0;
u64_stats_init(&rp->tx_stats.syncp);
u64_stats_init(&rp->rx_stats.syncp);
@@ -1030,7 +977,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
- if (pdev->revision >= VT6105M)
+ if (rp->quirks & rqMgmt)
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1038,18 +985,21 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
if (rc)
- goto err_out_unmap;
+ goto err_out_free_netdev;
+
+ if (rp->quirks & rqRhineI)
+ name = "Rhine";
+ else if (rp->quirks & rqStatusWBRace)
+ name = "Rhine II";
+ else if (rp->quirks & rqMgmt)
+ name = "Rhine III (Management Adapter)";
+ else
+ name = "Rhine III";
netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
- name,
-#ifdef USE_MMIO
- memaddr,
-#else
- (long)ioaddr,
-#endif
- dev->dev_addr, pdev->irq);
+ name, (long)ioaddr, dev->dev_addr, rp->irq);
- pci_set_drvdata(pdev, dev);
+ dev_set_drvdata(hwdev, dev);
{
u16 mii_cmd;
@@ -1078,41 +1028,158 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+err_out_free_netdev:
+ free_netdev(dev);
+err_out:
+ return rc;
+}
+
+static int rhine_init_one_pci(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *hwdev = &pdev->dev;
+ int rc;
+ long pioaddr, memaddr;
+ void __iomem *ioaddr;
+ int io_size = pdev->revision < VTunknown0 ? 128 : 256;
+
+/* This driver was written to use PCI memory space. Some early versions
+ * of the Rhine may only work correctly with I/O space accesses.
+ * TODO: determine for which revisions this is true and assign the flag
+ * in code as opposed to this Kconfig option (???)
+ */
+#ifdef CONFIG_VIA_RHINE_MMIO
+ u32 quirks = rqNeedEnMMIO;
+#else
+ u32 quirks = 0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ pr_info_once("%s\n", version);
+#endif
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out;
+
+ if (pdev->revision < VTunknown0) {
+ quirks |= rqRhineI;
+ } else if (pdev->revision >= VT6102) {
+ quirks |= rqWOL | rqForceReset;
+ if (pdev->revision < VT6105) {
+ quirks |= rqStatusWBRace;
+ } else {
+ quirks |= rqIntPHY;
+ if (pdev->revision >= VT6105_B0)
+ quirks |= rq6patterns;
+ if (pdev->revision >= VT6105M)
+ quirks |= rqMgmt;
+ }
+ }
+
+ /* sanity check */
+ if ((pci_resource_len(pdev, 0) < io_size) ||
+ (pci_resource_len(pdev, 1) < io_size)) {
+ rc = -EIO;
+ dev_err(hwdev, "Insufficient PCI resources, aborting\n");
+ goto err_out_pci_disable;
+ }
+
+ pioaddr = pci_resource_start(pdev, 0);
+ memaddr = pci_resource_start(pdev, 1);
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_pci_disable;
+
+ ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
+ if (!ioaddr) {
+ rc = -EIO;
+ dev_err(hwdev,
+ "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+ dev_name(hwdev), io_size, memaddr);
+ goto err_out_free_res;
+ }
+
+ enable_mmio(pioaddr, quirks);
+
+ rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
+ if (rc)
+ goto err_out_unmap;
+
+ rc = rhine_init_one_common(&pdev->dev, quirks,
+ pioaddr, ioaddr, pdev->irq);
+ if (!rc)
+ return 0;
+
err_out_unmap:
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
-err_out_free_netdev:
- free_netdev(dev);
err_out_pci_disable:
pci_disable_device(pdev);
err_out:
return rc;
}
+static int rhine_init_one_platform(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const u32 *quirks;
+ int irq;
+ struct resource *res;
+ void __iomem *ioaddr;
+
+ match = of_match_device(rhine_of_tbl, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq)
+ return -EINVAL;
+
+ quirks = match->data;
+ if (!quirks)
+ return -EINVAL;
+
+ return rhine_init_one_common(&pdev->dev, *quirks,
+ (long)ioaddr, ioaddr, irq);
+}
+
static int alloc_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
void *ring;
dma_addr_t ring_dma;
- ring = pci_alloc_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- &ring_dma);
+ ring = dma_alloc_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ &ring_dma,
+ GFP_ATOMIC);
if (!ring) {
netdev_err(dev, "Could not allocate DMA memory\n");
return -ENOMEM;
}
if (rp->quirks & rqRhineI) {
- rp->tx_bufs = pci_alloc_consistent(rp->pdev,
- PKT_BUF_SZ * TX_RING_SIZE,
- &rp->tx_bufs_dma);
+ rp->tx_bufs = dma_alloc_coherent(hwdev,
+ PKT_BUF_SZ * TX_RING_SIZE,
+ &rp->tx_bufs_dma,
+ GFP_ATOMIC);
if (rp->tx_bufs == NULL) {
- pci_free_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- ring, ring_dma);
+ dma_free_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ ring, ring_dma);
return -ENOMEM;
}
}
@@ -1128,16 +1195,17 @@ static int alloc_ring(struct net_device* dev)
static void free_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
- pci_free_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- rp->rx_ring, rp->rx_ring_dma);
+ dma_free_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ rp->rx_ring, rp->rx_ring_dma);
rp->tx_ring = NULL;
if (rp->tx_bufs)
- pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
- rp->tx_bufs, rp->tx_bufs_dma);
+ dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
+ rp->tx_bufs, rp->tx_bufs_dma);
rp->tx_bufs = NULL;
@@ -1146,6 +1214,7 @@ static void free_ring(struct net_device* dev)
static void alloc_rbufs(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
dma_addr_t next;
int i;
@@ -1174,9 +1243,9 @@ static void alloc_rbufs(struct net_device *dev)
break;
rp->rx_skbuff_dma[i] =
- pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
+ dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
rp->rx_skbuff_dma[i] = 0;
dev_kfree_skb(skb);
break;
@@ -1190,6 +1259,7 @@ static void alloc_rbufs(struct net_device *dev)
static void free_rbufs(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int i;
/* Free all the skbuffs in the Rx queue. */
@@ -1197,9 +1267,9 @@ static void free_rbufs(struct net_device* dev)
rp->rx_ring[i].rx_status = 0;
rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (rp->rx_skbuff[i]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->rx_skbuff_dma[i],
- rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ rp->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(rp->rx_skbuff[i]);
}
rp->rx_skbuff[i] = NULL;
@@ -1230,6 +1300,7 @@ static void alloc_tbufs(struct net_device* dev)
static void free_tbufs(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int i;
for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1238,10 +1309,10 @@ static void free_tbufs(struct net_device* dev)
rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (rp->tx_skbuff[i]) {
if (rp->tx_skbuff_dma[i]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->tx_skbuff_dma[i],
rp->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
dev_kfree_skb(rp->tx_skbuff[i]);
}
@@ -1278,8 +1349,9 @@ static void rhine_set_carrier(struct mii_if_info *mii)
/* autoneg is off: Link is always assumed to be up */
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
- } else /* Let MMI library update carrier status */
- rhine_check_media(dev, 0);
+ }
+
+ rhine_check_media(dev, 0);
netif_info(rp, link, dev, "force_media %d, carrier %d\n",
mii->force_media, netif_carrier_ok(dev));
@@ -1469,7 +1541,7 @@ static void init_registers(struct net_device *dev)
rhine_set_rx_mode(dev);
- if (rp->pdev->revision >= VT6105M)
+ if (rp->quirks & rqMgmt)
rhine_init_cam_filter(dev);
napi_enable(&rp->napi);
@@ -1581,16 +1653,15 @@ static int rhine_open(struct net_device *dev)
void __iomem *ioaddr = rp->base;
int rc;
- rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
- dev);
+ rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
+ netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
rc = alloc_ring(dev);
if (rc) {
- free_irq(rp->pdev->irq, dev);
+ free_irq(rp->irq, dev);
return rc;
}
alloc_rbufs(dev);
@@ -1659,6 +1730,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
void __iomem *ioaddr = rp->base;
unsigned entry;
@@ -1695,9 +1767,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_bufs));
} else {
rp->tx_skbuff_dma[entry] =
- pci_map_single(rp->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
+ dma_map_single(hwdev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
dev_kfree_skb_any(skb);
rp->tx_skbuff_dma[entry] = 0;
dev->stats.tx_dropped++;
@@ -1788,6 +1860,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
static void rhine_tx(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
/* find and cleanup dirty tx descriptors */
@@ -1831,10 +1904,10 @@ static void rhine_tx(struct net_device *dev)
}
/* Free the original skb. */
if (rp->tx_skbuff_dma[entry]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->tx_skbuff_dma[entry],
rp->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
dev_consume_skb_any(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
@@ -1863,6 +1936,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
static int rhine_rx(struct net_device *dev, int limit)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int count;
int entry = rp->cur_rx % RX_RING_SIZE;
@@ -1924,19 +1998,19 @@ static int rhine_rx(struct net_device *dev, int limit)
if (pkt_len < rx_copybreak)
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
if (skb) {
- pci_dma_sync_single_for_cpu(rp->pdev,
- rp->rx_skbuff_dma[entry],
- rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(hwdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(rp->pdev,
- rp->rx_skbuff_dma[entry],
- rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(hwdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
} else {
skb = rp->rx_skbuff[entry];
if (skb == NULL) {
@@ -1945,10 +2019,10 @@ static int rhine_rx(struct net_device *dev, int limit)
}
rp->rx_skbuff[entry] = NULL;
skb_put(skb, pkt_len);
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (unlikely(desc_length & DescTag))
@@ -1979,10 +2053,11 @@ static int rhine_rx(struct net_device *dev, int limit)
if (skb == NULL)
break; /* Better luck next round. */
rp->rx_skbuff_dma[entry] =
- pci_map_single(rp->pdev, skb->data,
+ dma_map_single(hwdev, skb->data,
rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hwdev,
+ rp->rx_skbuff_dma[entry])) {
dev_kfree_skb(skb);
rp->rx_skbuff_dma[entry] = 0;
break;
@@ -2103,7 +2178,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- } else if (rp->pdev->revision >= VT6105M) {
+ } else if (rp->quirks & rqMgmt) {
int i = 0;
u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
netdev_for_each_mc_addr(ha, dev) {
@@ -2125,7 +2200,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
}
/* enable/disable VLAN receive filtering */
- if (rp->pdev->revision >= VT6105M) {
+ if (rp->quirks & rqMgmt) {
if (dev->flags & IFF_PROMISC)
BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
else
@@ -2136,11 +2211,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
+ strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2277,7 +2352,7 @@ static int rhine_close(struct net_device *dev)
/* Stop the chip's Tx and Rx processes. */
iowrite16(CmdStop, ioaddr + ChipCmd);
- free_irq(rp->pdev->irq, dev);
+ free_irq(rp->irq, dev);
free_rbufs(dev);
free_tbufs(dev);
free_ring(dev);
@@ -2286,7 +2361,7 @@ static int rhine_close(struct net_device *dev)
}
-static void rhine_remove_one(struct pci_dev *pdev)
+static void rhine_remove_one_pci(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2300,7 +2375,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static void rhine_shutdown (struct pci_dev *pdev)
+static int rhine_remove_one_platform(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ iounmap(rp->base);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static void rhine_shutdown_pci(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2354,8 +2443,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int rhine_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rhine_private *rp = netdev_priv(dev);
if (!netif_running(dev))
@@ -2367,23 +2455,21 @@ static int rhine_suspend(struct device *device)
netif_device_detach(dev);
- rhine_shutdown(pdev);
+ if (dev_is_pci(device))
+ rhine_shutdown_pci(to_pci_dev(device));
return 0;
}
static int rhine_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rhine_private *rp = netdev_priv(dev);
if (!netif_running(dev))
return 0;
-#ifdef USE_MMIO
enable_mmio(rp->pioaddr, rp->quirks);
-#endif
rhine_power_init(dev);
free_tbufs(dev);
free_rbufs(dev);
@@ -2408,15 +2494,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
#endif /* !CONFIG_PM_SLEEP */
-static struct pci_driver rhine_driver = {
+static struct pci_driver rhine_driver_pci = {
.name = DRV_NAME,
.id_table = rhine_pci_tbl,
- .probe = rhine_init_one,
- .remove = rhine_remove_one,
- .shutdown = rhine_shutdown,
+ .probe = rhine_init_one_pci,
+ .remove = rhine_remove_one_pci,
+ .shutdown = rhine_shutdown_pci,
.driver.pm = RHINE_PM_OPS,
};
+static struct platform_driver rhine_driver_platform = {
+ .probe = rhine_init_one_platform,
+ .remove = rhine_remove_one_platform,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = rhine_of_tbl,
+ .pm = RHINE_PM_OPS,
+ }
+};
+
static struct dmi_system_id rhine_dmi_table[] __initdata = {
{
.ident = "EPIA-M",
@@ -2437,6 +2534,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
static int __init rhine_init(void)
{
+ int ret_pci, ret_platform;
+
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
pr_info("%s\n", version);
@@ -2449,13 +2548,19 @@ static int __init rhine_init(void)
else if (avoid_D3)
pr_info("avoid_D3 set\n");
- return pci_register_driver(&rhine_driver);
+ ret_pci = pci_register_driver(&rhine_driver_pci);
+ ret_platform = platform_driver_register(&rhine_driver_platform);
+ if ((ret_pci < 0) && (ret_platform < 0))
+ return ret_pci;
+
+ return 0;
}
static void __exit rhine_cleanup(void)
{
- pci_unregister_driver(&rhine_driver);
+ platform_driver_unregister(&rhine_driver_platform);
+ pci_unregister_driver(&rhine_driver_pci);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index fa193c4688da..4ef818a7a6c6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -75,7 +75,7 @@ int temac_indirect_busywait(struct temac_local *lp)
long end = jiffies + 2;
while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
- if (end - jiffies <= 0) {
+ if (time_before_eq(end, jiffies)) {
WARN_ON(1);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 64b4639f43b6..d4abf478e2bb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -22,7 +22,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
long end = jiffies + 2;
while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
XAE_MDIO_MCR_READY_MASK)) {
- if (end - jiffies <= 0) {
+ if (time_before_eq(end, jiffies)) {
WARN_ON(1);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0d87c67a5ff7..8c4aed3053eb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -702,7 +702,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
*/
while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
- if (end - jiffies <= 0) {
+ if (time_before_eq(end, jiffies)) {
WARN_ON(1);
return -ETIMEDOUT;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d18f711d0b0c..6cc37c15e0bf 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -28,50 +28,119 @@
#include <linux/hyperv.h>
#include <linux/rndis.h>
-/* Fwd declaration */
-struct hv_netvsc_packet;
-struct ndis_tcp_ip_checksum_info;
+/* RSS related */
+#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
+#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204 /* query and set */
-/* Represent the xfer page packet which contains 1 or more netvsc packet */
-struct xferpage_packet {
- struct list_head list_ent;
- u32 status;
+#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
+#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
- /* # of netvsc packets this xfer packet contains */
- u32 count;
+#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
+#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
+
+struct ndis_obj_header {
+ u8 type;
+ u8 rev;
+ u16 size;
+} __packed;
+
+/* ndis_recv_scale_cap/cap_flag */
+#define NDIS_RSS_CAPS_MESSAGE_SIGNALED_INTERRUPTS 0x01000000
+#define NDIS_RSS_CAPS_CLASSIFICATION_AT_ISR 0x02000000
+#define NDIS_RSS_CAPS_CLASSIFICATION_AT_DPC 0x04000000
+#define NDIS_RSS_CAPS_USING_MSI_X 0x08000000
+#define NDIS_RSS_CAPS_RSS_AVAILABLE_ON_PORTS 0x10000000
+#define NDIS_RSS_CAPS_SUPPORTS_MSI_X 0x20000000
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV4 0x00000100
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6 0x00000200
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6_EX 0x00000400
+
+struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */
+ struct ndis_obj_header hdr;
+ u32 cap_flag;
+ u32 num_int_msg;
+ u32 num_recv_que;
+ u16 num_indirect_tabent;
+} __packed;
+
+
+/* ndis_recv_scale_param flags */
+#define NDIS_RSS_PARAM_FLAG_BASE_CPU_UNCHANGED 0x0001
+#define NDIS_RSS_PARAM_FLAG_HASH_INFO_UNCHANGED 0x0002
+#define NDIS_RSS_PARAM_FLAG_ITABLE_UNCHANGED 0x0004
+#define NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED 0x0008
+#define NDIS_RSS_PARAM_FLAG_DISABLE_RSS 0x0010
+
+/* Hash info bits */
+#define NDIS_HASH_FUNC_TOEPLITZ 0x00000001
+#define NDIS_HASH_IPV4 0x00000100
+#define NDIS_HASH_TCP_IPV4 0x00000200
+#define NDIS_HASH_IPV6 0x00000400
+#define NDIS_HASH_IPV6_EX 0x00000800
+#define NDIS_HASH_TCP_IPV6 0x00001000
+#define NDIS_HASH_TCP_IPV6_EX 0x00002000
+
+#define NDIS_RSS_INDIRECTION_TABLE_MAX_SIZE_REVISION_2 (128 * 4)
+#define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40
+
+#define ITAB_NUM 128
+#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2
+extern u8 netvsc_hash_key[];
+
+struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
+ struct ndis_obj_header hdr;
+
+ /* Qualifies the rest of the information */
+ u16 flag;
+
+ /* The base CPU number to do receive processing. not used */
+ u16 base_cpu_number;
+
+ /* This describes the hash function and type being enabled */
+ u32 hashinfo;
+
+ /* The size of indirection table array */
+ u16 indirect_tabsize;
+
+ /* The offset of the indirection table from the beginning of this
+ * structure
+ */
+ u32 indirect_taboffset;
+
+ /* The size of the hash secret key */
+ u16 hashkey_size;
+
+ /* The offset of the secret key from the beginning of this structure */
+ u32 kashkey_offset;
+
+ u32 processor_masks_offset;
+ u32 num_processor_masks;
+ u32 processor_masks_entry_size;
};
+/* Fwd declaration */
+struct ndis_tcp_ip_checksum_info;
+
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
* within the RNDIS
*/
struct hv_netvsc_packet {
/* Bookkeeping stuff */
- struct list_head list_ent;
u32 status;
struct hv_device *device;
bool is_data_pkt;
u16 vlan_tci;
- /*
- * Valid only for receives when we break a xfer page packet
- * into multiple netvsc packets
- */
- struct xferpage_packet *xfer_page_pkt;
+ u16 q_idx;
+ struct vmbus_channel *channel;
- union {
- struct {
- u64 recv_completion_tid;
- void *recv_completion_ctx;
- void (*recv_completion)(void *context);
- } recv;
- struct {
- u64 send_completion_tid;
- void *send_completion_ctx;
- void (*send_completion)(void *context);
- } send;
- } completion;
+ u64 send_completion_tid;
+ void *send_completion_ctx;
+ void (*send_completion)(void *context);
+
+ u32 send_buf_index;
/* This points to the memory after page_buf */
struct rndis_message *rndis_msg;
@@ -120,6 +189,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet,
struct ndis_tcp_ip_checksum_info *csum_info);
+void netvsc_channel_cb(void *context);
int rndis_filter_open(struct hv_device *dev);
int rndis_filter_close(struct hv_device *dev);
int rndis_filter_device_add(struct hv_device *dev,
@@ -514,14 +584,16 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
+#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024) /* 1MB */
+#define NETVSC_INVALID_INDEX -1
-#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
-/* Preallocated receive packets */
-#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
+#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
#define NETVSC_PACKET_SIZE 2048
+#define VRSS_SEND_TAB_SIZE 16
+
/* Per netvsc channel-specific */
struct netvsc_device {
struct hv_device *dev;
@@ -532,12 +604,6 @@ struct netvsc_device {
wait_queue_head_t wait_drain;
bool start_remove;
bool destroy;
- /*
- * List of free preallocated hv_netvsc_packet to represent receive
- * packet
- */
- struct list_head recv_pkt_list;
- spinlock_t recv_pkt_list_lock;
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
@@ -546,6 +612,15 @@ struct netvsc_device {
u32 recv_section_cnt;
struct nvsp_1_receive_buffer_section *recv_section;
+ /* Send buffer allocated by us */
+ void *send_buf;
+ u32 send_buf_size;
+ u32 send_buf_gpadl_handle;
+ u32 send_section_cnt;
+ u32 send_section_size;
+ unsigned long *send_section_map;
+ int map_words;
+
/* Used for NetVSP initialization protocol */
struct completion channel_init_wait;
struct nvsp_message channel_init_pkt;
@@ -555,10 +630,20 @@ struct netvsc_device {
struct net_device *ndev;
+ struct vmbus_channel *chn_table[NR_CPUS];
+ u32 send_table[VRSS_SEND_TAB_SIZE];
+ u32 num_chn;
+ atomic_t queue_sends[NR_CPUS];
+
/* Holds rndis device info */
void *extension;
- /* The recive buffer for this device */
+
+ int ring_size;
+
+ /* The primary channel callback buffer */
unsigned char cb_buffer[NETVSC_PACKET_SIZE];
+ /* The sub channel callback buffer */
+ unsigned char *sub_cb_buf;
};
/* NdisInitialize message */
@@ -706,6 +791,7 @@ enum ndis_per_pkt_info_type {
IEEE_8021Q_INFO,
ORIGINAL_PKTINFO,
PACKET_CANCEL_ID,
+ NBL_HASH_VALUE = PACKET_CANCEL_ID,
ORIGINAL_NET_BUFLIST,
CACHED_NET_BUFLIST,
SHORT_PKT_PADINFO,
@@ -852,6 +938,9 @@ struct ndis_tcp_lso_info {
#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
sizeof(struct ndis_tcp_lso_info))
+#define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(u32))
+
/* Format of Information buffer passed in a SetRequest for the OID */
/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
struct rndis_config_parameter_info {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f7629ecefa84..4ed38eaecea8 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
+#include <asm/sync_bitops.h>
#include "hyperv_net.h"
@@ -80,7 +81,7 @@ get_in_err:
}
-static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
+static int netvsc_destroy_buf(struct netvsc_device *net_device)
{
struct nvsp_message *revoke_packet;
int ret = 0;
@@ -146,10 +147,62 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
net_device->recv_section = NULL;
}
+ /* Deal with the send buffer we may have setup.
+ * If we got a send section size, it means we received a
+ * SendsendBufferComplete msg (ie sent
+ * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+ * to send a revoke msg here
+ */
+ if (net_device->send_section_size) {
+ /* Send the revoke receive buffer */
+ revoke_packet = &net_device->revoke_packet;
+ memset(revoke_packet, 0, sizeof(struct nvsp_message));
+
+ revoke_packet->hdr.msg_type =
+ NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
+ revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+
+ ret = vmbus_sendpacket(net_device->dev->channel,
+ revoke_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)revoke_packet,
+ VM_PKT_DATA_INBAND, 0);
+ /* If we failed here, we might as well return and
+ * have a leak rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev, "unable to send "
+ "revoke send buffer to netvsp\n");
+ return ret;
+ }
+ }
+ /* Teardown the gpadl on the vsp end */
+ if (net_device->send_buf_gpadl_handle) {
+ ret = vmbus_teardown_gpadl(net_device->dev->channel,
+ net_device->send_buf_gpadl_handle);
+
+ /* If we failed here, we might as well return and have a leak
+ * rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to teardown send buffer's gpadl\n");
+ return ret;
+ }
+ net_device->send_buf_gpadl_handle = 0;
+ }
+ if (net_device->send_buf) {
+ /* Free up the receive buffer */
+ free_pages((unsigned long)net_device->send_buf,
+ get_order(net_device->send_buf_size));
+ net_device->send_buf = NULL;
+ }
+ kfree(net_device->send_section_map);
+
return ret;
}
-static int netvsc_init_recv_buf(struct hv_device *device)
+static int netvsc_init_buf(struct hv_device *device)
{
int ret = 0;
int t;
@@ -248,10 +301,90 @@ static int netvsc_init_recv_buf(struct hv_device *device)
goto cleanup;
}
+ /* Now setup the send buffer.
+ */
+ net_device->send_buf =
+ (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ get_order(net_device->send_buf_size));
+ if (!net_device->send_buf) {
+ netdev_err(ndev, "unable to allocate send "
+ "buffer of size %d\n", net_device->send_buf_size);
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Establish the gpadl handle for this buffer on this
+ * channel. Note: This call uses the vmbus connection rather
+ * than the channel to establish the gpadl handle.
+ */
+ ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
+ net_device->send_buf_size,
+ &net_device->send_buf_gpadl_handle);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to establish send buffer's gpadl\n");
+ goto cleanup;
+ }
+
+ /* Notify the NetVsp of the gpadl handle */
+ init_packet = &net_device->channel_init_pkt;
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
+ init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+ net_device->send_buf_gpadl_handle;
+ init_packet->msg.v1_msg.send_recv_buf.id = 0;
+
+ /* Send the gpadl notification request */
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to send send buffer's gpadl to netvsp\n");
+ goto cleanup;
+ }
+
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
+ BUG_ON(t == 0);
+
+ /* Check the response */
+ if (init_packet->msg.v1_msg.
+ send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
+ netdev_err(ndev, "Unable to complete send buffer "
+ "initialization with NetVsp - status %d\n",
+ init_packet->msg.v1_msg.
+ send_recv_buf_complete.status);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Parse the response */
+ net_device->send_section_size = init_packet->msg.
+ v1_msg.send_send_buf_complete.section_size;
+
+ /* Section count is simply the size divided by the section size.
+ */
+ net_device->send_section_cnt =
+ net_device->send_buf_size/net_device->send_section_size;
+
+ dev_info(&device->device, "Send section size: %d, Section count:%d\n",
+ net_device->send_section_size, net_device->send_section_cnt);
+
+ /* Setup state for managing the send buffer. */
+ net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
+ BITS_PER_LONG);
+
+ net_device->send_section_map =
+ kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
+ if (net_device->send_section_map == NULL)
+ goto cleanup;
+
goto exit;
cleanup:
- netvsc_destroy_recv_buf(net_device);
+ netvsc_destroy_buf(net_device);
exit:
return ret;
@@ -369,8 +502,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
else
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+ net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
- ret = netvsc_init_recv_buf(device);
+ ret = netvsc_init_buf(device);
cleanup:
return ret;
@@ -378,7 +512,7 @@ cleanup:
static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
{
- netvsc_destroy_recv_buf(net_device);
+ netvsc_destroy_buf(net_device);
}
/*
@@ -387,7 +521,6 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
int netvsc_device_remove(struct hv_device *device)
{
struct netvsc_device *net_device;
- struct hv_netvsc_packet *netvsc_packet, *pos;
unsigned long flags;
net_device = hv_get_drvdata(device);
@@ -416,11 +549,8 @@ int netvsc_device_remove(struct hv_device *device)
vmbus_close(device->channel);
/* Release all resources */
- list_for_each_entry_safe(netvsc_packet, pos,
- &net_device->recv_pkt_list, list_ent) {
- list_del(&netvsc_packet->list_ent);
- kfree(netvsc_packet);
- }
+ if (net_device->sub_cb_buf)
+ vfree(net_device->sub_cb_buf);
kfree(net_device);
return 0;
@@ -444,6 +574,12 @@ static inline u32 hv_ringbuf_avail_percent(
return avail_write * 100 / ring_info->ring_datasize;
}
+static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
+ u32 index)
+{
+ sync_change_bit(index, net_device->send_section_map);
+}
+
static void netvsc_send_completion(struct netvsc_device *net_device,
struct hv_device *device,
struct vmpacket_descriptor *packet)
@@ -451,6 +587,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev;
+ u32 send_index;
ndev = net_device->ndev;
@@ -461,7 +598,9 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
(nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
(nvsp_packet->hdr.msg_type ==
- NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
+ NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
+ (nvsp_packet->hdr.msg_type ==
+ NVSP_MSG5_TYPE_SUBCHANNEL)) {
/* Copy the response back */
memcpy(&net_device->channel_init_pkt, nvsp_packet,
sizeof(struct nvsp_message));
@@ -469,28 +608,39 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
} else if (nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
int num_outstanding_sends;
+ u16 q_idx = 0;
+ struct vmbus_channel *channel = device->channel;
+ int queue_sends;
/* Get the send context */
nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
packet->trans_id;
/* Notify the layer above us */
- if (nvsc_packet)
- nvsc_packet->completion.send.send_completion(
- nvsc_packet->completion.send.
- send_completion_ctx);
+ if (nvsc_packet) {
+ send_index = nvsc_packet->send_buf_index;
+ if (send_index != NETVSC_INVALID_INDEX)
+ netvsc_free_send_slot(net_device, send_index);
+ q_idx = nvsc_packet->q_idx;
+ channel = nvsc_packet->channel;
+ nvsc_packet->send_completion(nvsc_packet->
+ send_completion_ctx);
+ }
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
+ queue_sends = atomic_dec_return(&net_device->
+ queue_sends[q_idx]);
if (net_device->destroy && num_outstanding_sends == 0)
wake_up(&net_device->wait_drain);
- if (netif_queue_stopped(ndev) && !net_device->start_remove &&
- (hv_ringbuf_avail_percent(&device->channel->outbound)
- > RING_AVAIL_PERCENT_HIWATER ||
- num_outstanding_sends < 1))
- netif_wake_queue(ndev);
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
+ !net_device->start_remove &&
+ (hv_ringbuf_avail_percent(&channel->outbound) >
+ RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
+ netif_tx_wake_queue(netdev_get_tx_queue(
+ ndev, q_idx));
} else {
netdev_err(ndev, "Unknown send completion packet type- "
"%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -498,6 +648,52 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
}
+static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
+{
+ unsigned long index;
+ u32 max_words = net_device->map_words;
+ unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
+ u32 section_cnt = net_device->send_section_cnt;
+ int ret_val = NETVSC_INVALID_INDEX;
+ int i;
+ int prev_val;
+
+ for (i = 0; i < max_words; i++) {
+ if (!~(map_addr[i]))
+ continue;
+ index = ffz(map_addr[i]);
+ prev_val = sync_test_and_set_bit(index, &map_addr[i]);
+ if (prev_val)
+ continue;
+ if ((index + (i * BITS_PER_LONG)) >= section_cnt)
+ break;
+ ret_val = (index + (i * BITS_PER_LONG));
+ break;
+ }
+ return ret_val;
+}
+
+u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ unsigned int section_index,
+ struct hv_netvsc_packet *packet)
+{
+ char *start = net_device->send_buf;
+ char *dest = (start + (section_index * net_device->send_section_size));
+ int i;
+ u32 msg_size = 0;
+
+ for (i = 0; i < packet->page_buf_cnt; i++) {
+ char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
+ u32 offset = packet->page_buf[i].offset;
+ u32 len = packet->page_buf[i].len;
+
+ memcpy(dest, (src + offset), len);
+ msg_size += len;
+ dest += len;
+ }
+ return msg_size;
+}
+
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet)
{
@@ -505,7 +701,12 @@ int netvsc_send(struct hv_device *device,
int ret = 0;
struct nvsp_message sendMessage;
struct net_device *ndev;
+ struct vmbus_channel *out_channel = NULL;
u64 req_id;
+ unsigned int section_index = NETVSC_INVALID_INDEX;
+ u32 msg_size = 0;
+ struct sk_buff *skb;
+
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -521,25 +722,46 @@ int netvsc_send(struct hv_device *device,
sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
}
- /* Not using send buffer section */
+ /* Attempt to send via sendbuf */
+ if (packet->total_data_buflen < net_device->send_section_size) {
+ section_index = netvsc_get_next_send_section(net_device);
+ if (section_index != NETVSC_INVALID_INDEX) {
+ msg_size = netvsc_copy_to_send_buf(net_device,
+ section_index,
+ packet);
+ skb = (struct sk_buff *)
+ (unsigned long)packet->send_completion_tid;
+ if (skb)
+ dev_kfree_skb_any(skb);
+ packet->page_buf_cnt = 0;
+ }
+ }
+ packet->send_buf_index = section_index;
+
+
sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
- 0xFFFFFFFF;
- sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
+ section_index;
+ sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
- if (packet->completion.send.send_completion)
+ if (packet->send_completion)
req_id = (ulong)packet;
else
req_id = 0;
+ out_channel = net_device->chn_table[packet->q_idx];
+ if (out_channel == NULL)
+ out_channel = device->channel;
+ packet->channel = out_channel;
+
if (packet->page_buf_cnt) {
- ret = vmbus_sendpacket_pagebuffer(device->channel,
+ ret = vmbus_sendpacket_pagebuffer(out_channel,
packet->page_buf,
packet->page_buf_cnt,
&sendMessage,
sizeof(struct nvsp_message),
req_id);
} else {
- ret = vmbus_sendpacket(device->channel, &sendMessage,
+ ret = vmbus_sendpacket(out_channel, &sendMessage,
sizeof(struct nvsp_message),
req_id,
VM_PKT_DATA_INBAND,
@@ -548,17 +770,24 @@ int netvsc_send(struct hv_device *device,
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
- if (hv_ringbuf_avail_percent(&device->channel->outbound) <
+ atomic_inc(&net_device->queue_sends[packet->q_idx]);
+
+ if (hv_ringbuf_avail_percent(&out_channel->outbound) <
RING_AVAIL_PERCENT_LOWATER) {
- netif_stop_queue(ndev);
+ netif_tx_stop_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
+
if (atomic_read(&net_device->
- num_outstanding_sends) < 1)
- netif_wake_queue(ndev);
+ queue_sends[packet->q_idx]) < 1)
+ netif_tx_wake_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
}
} else if (ret == -EAGAIN) {
- netif_stop_queue(ndev);
- if (atomic_read(&net_device->num_outstanding_sends) < 1) {
- netif_wake_queue(ndev);
+ netif_tx_stop_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
+ if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
+ netif_tx_wake_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
ret = -ENOSPC;
}
} else {
@@ -570,6 +799,7 @@ int netvsc_send(struct hv_device *device,
}
static void netvsc_send_recv_completion(struct hv_device *device,
+ struct vmbus_channel *channel,
struct netvsc_device *net_device,
u64 transaction_id, u32 status)
{
@@ -587,7 +817,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
retry_send_cmplt:
/* Send the completion */
- ret = vmbus_sendpacket(device->channel, &recvcompMessage,
+ ret = vmbus_sendpacket(channel, &recvcompMessage,
sizeof(struct nvsp_message), transaction_id,
VM_PKT_COMP, 0);
if (ret == 0) {
@@ -613,76 +843,20 @@ retry_send_cmplt:
}
}
-/* Send a receive completion packet to RNDIS device (ie NetVsp) */
-static void netvsc_receive_completion(void *context)
-{
- struct hv_netvsc_packet *packet = context;
- struct hv_device *device = packet->device;
- struct netvsc_device *net_device;
- u64 transaction_id = 0;
- bool fsend_receive_comp = false;
- unsigned long flags;
- struct net_device *ndev;
- u32 status = NVSP_STAT_NONE;
-
- /*
- * Even though it seems logical to do a GetOutboundNetDevice() here to
- * send out receive completion, we are using GetInboundNetDevice()
- * since we may have disable outbound traffic already.
- */
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
- ndev = net_device->ndev;
-
- /* Overloading use of the lock. */
- spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
-
- if (packet->status != NVSP_STAT_SUCCESS)
- packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
-
- packet->xfer_page_pkt->count--;
-
- /*
- * Last one in the line that represent 1 xfer page packet.
- * Return the xfer page packet itself to the freelist
- */
- if (packet->xfer_page_pkt->count == 0) {
- fsend_receive_comp = true;
- transaction_id = packet->completion.recv.recv_completion_tid;
- status = packet->xfer_page_pkt->status;
- list_add_tail(&packet->xfer_page_pkt->list_ent,
- &net_device->recv_pkt_list);
-
- }
-
- /* Put the packet back */
- list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
- spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
- /* Send a receive completion for the xfer page packet */
- if (fsend_receive_comp)
- netvsc_send_recv_completion(device, net_device, transaction_id,
- status);
-
-}
-
static void netvsc_receive(struct netvsc_device *net_device,
+ struct vmbus_channel *channel,
struct hv_device *device,
struct vmpacket_descriptor *packet)
{
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
- struct hv_netvsc_packet *netvsc_packet = NULL;
- /* struct netvsc_driver *netvscDriver; */
- struct xferpage_packet *xferpage_packet = NULL;
+ struct hv_netvsc_packet nv_pkt;
+ struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
+ u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
- unsigned long flags;
struct net_device *ndev;
- LIST_HEAD(listHead);
-
ndev = net_device->ndev;
/*
@@ -715,77 +889,14 @@ static void netvsc_receive(struct netvsc_device *net_device,
return;
}
- /*
- * Grab free packets (range count + 1) to represent this xfer
- * page packet. +1 to represent the xfer page packet itself.
- * We grab it here so that we know exactly how many we can
- * fulfil
- */
- spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
- while (!list_empty(&net_device->recv_pkt_list)) {
- list_move_tail(net_device->recv_pkt_list.next, &listHead);
- if (++count == vmxferpage_packet->range_cnt + 1)
- break;
- }
- spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
- /*
- * We need at least 2 netvsc pkts (1 to represent the xfer
- * page and at least 1 for the range) i.e. we can handled
- * some of the xfer page packet ranges...
- */
- if (count < 2) {
- netdev_err(ndev, "Got only %d netvsc pkt...needed "
- "%d pkts. Dropping this xfer page packet completely!\n",
- count, vmxferpage_packet->range_cnt + 1);
-
- /* Return it to the freelist */
- spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
- for (i = count; i != 0; i--) {
- list_move_tail(listHead.next,
- &net_device->recv_pkt_list);
- }
- spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
- flags);
-
- netvsc_send_recv_completion(device, net_device,
- vmxferpage_packet->d.trans_id,
- NVSP_STAT_FAIL);
-
- return;
- }
-
- /* Remove the 1st packet to represent the xfer page packet itself */
- xferpage_packet = (struct xferpage_packet *)listHead.next;
- list_del(&xferpage_packet->list_ent);
- xferpage_packet->status = NVSP_STAT_SUCCESS;
-
- /* This is how much we can satisfy */
- xferpage_packet->count = count - 1;
-
- if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
- netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
- "this xfer page...got %d\n",
- vmxferpage_packet->range_cnt, xferpage_packet->count);
- }
+ count = vmxferpage_packet->range_cnt;
+ netvsc_packet->device = device;
+ netvsc_packet->channel = channel;
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
- for (i = 0; i < (count - 1); i++) {
- netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
- list_del(&netvsc_packet->list_ent);
-
+ for (i = 0; i < count; i++) {
/* Initialize the netvsc packet */
netvsc_packet->status = NVSP_STAT_SUCCESS;
- netvsc_packet->xfer_page_pkt = xferpage_packet;
- netvsc_packet->completion.recv.recv_completion =
- netvsc_receive_completion;
- netvsc_packet->completion.recv.recv_completion_ctx =
- netvsc_packet;
- netvsc_packet->device = device;
- /* Save this so that we can send it back */
- netvsc_packet->completion.recv.recv_completion_tid =
- vmxferpage_packet->d.trans_id;
-
netvsc_packet->data = (void *)((unsigned long)net_device->
recv_buf + vmxferpage_packet->ranges[i].byte_offset);
netvsc_packet->total_data_buflen =
@@ -794,16 +905,53 @@ static void netvsc_receive(struct netvsc_device *net_device,
/* Pass it to the upper layer */
rndis_filter_receive(device, netvsc_packet);
- netvsc_receive_completion(netvsc_packet->
- completion.recv.recv_completion_ctx);
+ if (netvsc_packet->status != NVSP_STAT_SUCCESS)
+ status = NVSP_STAT_FAIL;
+ }
+
+ netvsc_send_recv_completion(device, channel, net_device,
+ vmxferpage_packet->d.trans_id, status);
+}
+
+
+static void netvsc_send_table(struct hv_device *hdev,
+ struct vmpacket_descriptor *vmpkt)
+{
+ struct netvsc_device *nvscdev;
+ struct net_device *ndev;
+ struct nvsp_message *nvmsg;
+ int i;
+ u32 count, *tab;
+
+ nvscdev = get_outbound_net_device(hdev);
+ if (!nvscdev)
+ return;
+ ndev = nvscdev->ndev;
+
+ nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
+ (vmpkt->offset8 << 3));
+
+ if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
+ return;
+
+ count = nvmsg->msg.v5_msg.send_table.count;
+ if (count != VRSS_SEND_TAB_SIZE) {
+ netdev_err(ndev, "Received wrong send-table size:%u\n", count);
+ return;
}
+ tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
+ nvmsg->msg.v5_msg.send_table.offset);
+
+ for (i = 0; i < count; i++)
+ nvscdev->send_table[i] = tab[i];
}
-static void netvsc_channel_cb(void *context)
+void netvsc_channel_cb(void *context)
{
int ret;
- struct hv_device *device = context;
+ struct vmbus_channel *channel = (struct vmbus_channel *)context;
+ struct hv_device *device;
struct netvsc_device *net_device;
u32 bytes_recvd;
u64 request_id;
@@ -812,14 +960,19 @@ static void netvsc_channel_cb(void *context)
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
+ if (channel->primary_channel != NULL)
+ device = channel->primary_channel->device_obj;
+ else
+ device = channel->device_obj;
+
net_device = get_inbound_net_device(device);
if (!net_device)
return;
ndev = net_device->ndev;
- buffer = net_device->cb_buffer;
+ buffer = get_per_channel_state(channel);
do {
- ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
+ ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
&bytes_recvd, &request_id);
if (ret == 0) {
if (bytes_recvd > 0) {
@@ -831,8 +984,12 @@ static void netvsc_channel_cb(void *context)
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(net_device,
- device, desc);
+ netvsc_receive(net_device, channel,
+ device, desc);
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ netvsc_send_table(device, desc);
break;
default:
@@ -880,11 +1037,9 @@ static void netvsc_channel_cb(void *context)
int netvsc_device_add(struct hv_device *device, void *additional_info)
{
int ret = 0;
- int i;
int ring_size =
((struct netvsc_device_info *)additional_info)->ring_size;
struct netvsc_device *net_device;
- struct hv_netvsc_packet *packet, *pos;
struct net_device *ndev;
net_device = alloc_net_device(device);
@@ -893,6 +1048,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
goto cleanup;
}
+ net_device->ring_size = ring_size;
+
/*
* Coming into this function, struct net_device * is
* registered as the driver private data.
@@ -903,24 +1060,14 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
ndev = net_device->ndev;
/* Initialize the NetVSC channel extension */
- spin_lock_init(&net_device->recv_pkt_list_lock);
-
- INIT_LIST_HEAD(&net_device->recv_pkt_list);
-
- for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
- packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
- if (!packet)
- break;
-
- list_add_tail(&packet->list_ent,
- &net_device->recv_pkt_list);
- }
init_completion(&net_device->channel_init_wait);
+ set_per_channel_state(device->channel, net_device->cb_buffer);
+
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
- netvsc_channel_cb, device);
+ netvsc_channel_cb, device->channel);
if (ret != 0) {
netdev_err(ndev, "unable to open channel: %d\n", ret);
@@ -930,6 +1077,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
/* Channel is opened */
pr_info("hv_netvsc channel opened successfully\n");
+ net_device->chn_table[0] = device->channel;
+
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device);
if (ret != 0) {
@@ -946,16 +1095,8 @@ close:
cleanup:
- if (net_device) {
- list_for_each_entry_safe(packet, pos,
- &net_device->recv_pkt_list,
- list_ent) {
- list_del(&packet->list_ent);
- kfree(packet);
- }
-
+ if (net_device)
kfree(net_device);
- }
return ret;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7918d5132c1f..4fd71b75e666 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -101,7 +101,7 @@ static int netvsc_open(struct net_device *net)
return ret;
}
- netif_start_queue(net);
+ netif_tx_start_all_queues(net);
nvdev = hv_get_drvdata(device_obj);
rdev = nvdev->extension;
@@ -149,15 +149,100 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
+union sub_key {
+ u64 k;
+ struct {
+ u8 pad[3];
+ u8 kb;
+ u32 ka;
+ };
+};
+
+/* Toeplitz hash function
+ * data: network byte order
+ * return: host byte order
+ */
+static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen)
+{
+ union sub_key subk;
+ int k_next = 4;
+ u8 dt;
+ int i, j;
+ u32 ret = 0;
+
+ subk.k = 0;
+ subk.ka = ntohl(*(u32 *)key);
+
+ for (i = 0; i < dlen; i++) {
+ subk.kb = key[k_next];
+ k_next = (k_next + 1) % klen;
+ dt = data[i];
+ for (j = 0; j < 8; j++) {
+ if (dt & 0x80)
+ ret ^= subk.ka;
+ dt <<= 1;
+ subk.k <<= 1;
+ }
+ }
+
+ return ret;
+}
+
+static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
+{
+ struct iphdr *iphdr;
+ int data_len;
+ bool ret = false;
+
+ if (eth_hdr(skb)->h_proto != htons(ETH_P_IP))
+ return false;
+
+ iphdr = ip_hdr(skb);
+
+ if (iphdr->version == 4) {
+ if (iphdr->protocol == IPPROTO_TCP)
+ data_len = 12;
+ else
+ data_len = 8;
+ *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN,
+ (u8 *)&iphdr->saddr, data_len);
+ ret = true;
+ }
+
+ return ret;
+}
+
+static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct hv_device *hdev = net_device_ctx->device_ctx;
+ struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
+ u32 hash;
+ u16 q_idx = 0;
+
+ if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
+ return 0;
+
+ if (netvsc_set_hash(&hash, skb)) {
+ q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
+ ndev->real_num_tx_queues;
+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
+ }
+
+ return q_idx;
+}
+
static void netvsc_xmit_completion(void *context)
{
struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
struct sk_buff *skb = (struct sk_buff *)
- (unsigned long)packet->completion.send.send_completion_tid;
+ (unsigned long)packet->send_completion_tid;
+ u32 index = packet->send_buf_index;
kfree(packet);
- if (skb)
+ if (skb && (index == NETVSC_INVALID_INDEX))
dev_kfree_skb_any(skb);
}
@@ -301,6 +386,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct ndis_tcp_lso_info *lso_info;
int hdr_offset;
u32 net_trans_info;
+ u32 hash;
/* We will atmost need two pages to describe the rndis
@@ -319,9 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
(num_data_pgs * sizeof(struct hv_page_buffer)) +
sizeof(struct rndis_message) +
- NDIS_VLAN_PPI_SIZE +
- NDIS_CSUM_PPI_SIZE +
- NDIS_LSO_PPI_SIZE, GFP_ATOMIC);
+ NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
+ NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC);
if (!packet) {
/* out of memory, drop packet */
netdev_err(net, "unable to allocate hv_netvsc_packet\n");
@@ -333,6 +418,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->vlan_tci = skb->vlan_tci;
+ packet->q_idx = skb_get_queue_mapping(skb);
+
packet->is_data_pkt = true;
packet->total_data_buflen = skb->len;
@@ -341,9 +428,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
(num_data_pgs * sizeof(struct hv_page_buffer)));
/* Set the completion routine */
- packet->completion.send.send_completion = netvsc_xmit_completion;
- packet->completion.send.send_completion_ctx = packet;
- packet->completion.send.send_completion_tid = (unsigned long)skb;
+ packet->send_completion = netvsc_xmit_completion;
+ packet->send_completion_ctx = packet;
+ packet->send_completion_tid = (unsigned long)skb;
isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
@@ -358,6 +445,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
+ hash = skb_get_hash_raw(skb);
+ if (hash != 0 && net->real_num_tx_queues > 1) {
+ rndis_msg_size += NDIS_HASH_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
+ NBL_HASH_VALUE);
+ *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
+ }
+
if (isvlan) {
struct ndis_pkt_8021q_info *vlan;
@@ -558,6 +653,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
packet->vlan_tci);
+ skb_record_rx_queue(skb, packet->channel->
+ offermsg.offer.sub_channel_index);
+
net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen;
@@ -606,7 +704,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
hv_set_drvdata(hdev, ndev);
device_info.ring_size = ring_size;
rndis_filter_device_add(hdev, &device_info);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
return 0;
}
@@ -652,6 +750,7 @@ static const struct net_device_ops device_ops = {
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
+ .ndo_select_queue = netvsc_select_queue,
};
/*
@@ -698,9 +797,11 @@ static int netvsc_probe(struct hv_device *dev,
struct net_device *net = NULL;
struct net_device_context *net_device_ctx;
struct netvsc_device_info device_info;
+ struct netvsc_device *nvdev;
int ret;
- net = alloc_etherdev(sizeof(struct net_device_context));
+ net = alloc_etherdev_mq(sizeof(struct net_device_context),
+ num_online_cpus());
if (!net)
return -ENOMEM;
@@ -719,7 +820,7 @@ static int netvsc_probe(struct hv_device *dev,
net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_TSO;
- SET_ETHTOOL_OPS(net, &ethtool_ops);
+ net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
/* Notify the netvsc driver of the new device */
@@ -733,6 +834,10 @@ static int netvsc_probe(struct hv_device *dev,
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ nvdev = hv_get_drvdata(dev);
+ netif_set_real_num_tx_queues(net, nvdev->num_chn);
+ netif_set_real_num_rx_queues(net, nvdev->num_chn);
+
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 143a98caf618..99c527adae5b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -31,7 +31,7 @@
#include "hyperv_net.h"
-#define RNDIS_EXT_LEN 100
+#define RNDIS_EXT_LEN PAGE_SIZE
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
@@ -94,6 +94,8 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
rndis_msg->ndis_msg_type = msg_type;
rndis_msg->msg_len = msg_len;
+ request->pkt.q_idx = 0;
+
/*
* Set the request id. This field is always after the rndis header for
* request/response packet types so we just used the SetRequest as a
@@ -234,7 +236,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf[0].len;
}
- packet->completion.send.send_completion = NULL;
+ packet->send_completion = NULL;
ret = netvsc_send(dev->net_dev->dev, packet);
return ret;
@@ -399,8 +401,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
pkt->total_data_buflen = rndis_pkt->data_len;
pkt->data = (void *)((unsigned long)pkt->data + data_offset);
- pkt->is_data_pkt = true;
-
vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
if (vlan) {
pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
@@ -509,6 +509,19 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
query->info_buflen = 0;
query->dev_vc_handle = 0;
+ if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
+ struct ndis_recv_scale_cap *cap;
+
+ request->request_msg.msg_len +=
+ sizeof(struct ndis_recv_scale_cap);
+ query->info_buflen = sizeof(struct ndis_recv_scale_cap);
+ cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
+ query->info_buf_offset);
+ cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
+ cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
+ cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
+ }
+
ret = rndis_filter_send_request(dev, request);
if (ret != 0)
goto cleanup;
@@ -695,6 +708,89 @@ cleanup:
return ret;
}
+u8 netvsc_hash_key[HASH_KEYLEN] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+};
+
+int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
+{
+ struct net_device *ndev = rdev->net_dev->ndev;
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct rndis_set_complete *set_complete;
+ u32 extlen = sizeof(struct ndis_recv_scale_param) +
+ 4*ITAB_NUM + HASH_KEYLEN;
+ struct ndis_recv_scale_param *rssp;
+ u32 *itab;
+ u8 *keyp;
+ int i, t, ret;
+
+ request = get_rndis_request(
+ rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ rssp = (struct ndis_recv_scale_param *)(set + 1);
+ rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
+ rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
+ rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
+ rssp->flag = 0;
+ rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
+ NDIS_HASH_TCP_IPV4;
+ rssp->indirect_tabsize = 4*ITAB_NUM;
+ rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
+ rssp->hashkey_size = HASH_KEYLEN;
+ rssp->kashkey_offset = rssp->indirect_taboffset +
+ rssp->indirect_tabsize;
+
+ /* Set indirection table entries */
+ itab = (u32 *)(rssp + 1);
+ for (i = 0; i < ITAB_NUM; i++)
+ itab[i] = i % num_queue;
+
+ /* Set hask key values */
+ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
+ for (i = 0; i < HASH_KEYLEN; i++)
+ keyp[i] = netvsc_hash_key[i];
+
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ netdev_err(ndev, "timeout before we got a set response...\n");
+ /* can't put_rndis_request, since we may still receive a
+ * send-completion.
+ */
+ return -ETIMEDOUT;
+ } else {
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
+ }
+ }
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
+
+
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
@@ -886,6 +982,28 @@ static int rndis_filter_close_device(struct rndis_device *dev)
return ret;
}
+static void netvsc_sc_open(struct vmbus_channel *new_sc)
+{
+ struct netvsc_device *nvscdev;
+ u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
+ int ret;
+
+ nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
+
+ if (chn_index >= nvscdev->num_chn)
+ return;
+
+ set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
+ NETVSC_PACKET_SIZE);
+
+ ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
+ nvscdev->ring_size * PAGE_SIZE, NULL, 0,
+ netvsc_channel_cb, new_sc);
+
+ if (ret == 0)
+ nvscdev->chn_table[chn_index] = new_sc;
+}
+
int rndis_filter_device_add(struct hv_device *dev,
void *additional_info)
{
@@ -894,6 +1012,10 @@ int rndis_filter_device_add(struct hv_device *dev,
struct rndis_device *rndis_device;
struct netvsc_device_info *device_info = additional_info;
struct ndis_offload_params offloads;
+ struct nvsp_message *init_packet;
+ int t;
+ struct ndis_recv_scale_cap rsscap;
+ u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -913,6 +1035,7 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Initialize the rndis device */
net_device = hv_get_drvdata(dev);
+ net_device->num_chn = 1;
net_device->extension = rndis_device;
rndis_device->net_dev = net_device;
@@ -952,7 +1075,6 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret)
goto err_dev_remv;
-
rndis_filter_query_device_link_status(rndis_device);
device_info->link_state = rndis_device->link_state;
@@ -961,7 +1083,66 @@ int rndis_filter_device_add(struct hv_device *dev,
rndis_device->hw_mac_adr,
device_info->link_state ? "down" : "up");
- return ret;
+ if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
+ return 0;
+
+ /* vRSS setup */
+ memset(&rsscap, 0, rsscap_size);
+ ret = rndis_filter_query_device(rndis_device,
+ OID_GEN_RECEIVE_SCALE_CAPABILITIES,
+ &rsscap, &rsscap_size);
+ if (ret || rsscap.num_recv_que < 2)
+ goto out;
+
+ net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
+ num_online_cpus() : rsscap.num_recv_que;
+ if (net_device->num_chn == 1)
+ goto out;
+
+ net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
+ NETVSC_PACKET_SIZE);
+ if (!net_device->sub_cb_buf) {
+ net_device->num_chn = 1;
+ dev_info(&dev->device, "No memory for subchannels.\n");
+ goto out;
+ }
+
+ vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
+
+ init_packet = &net_device->channel_init_pkt;
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
+ init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
+ init_packet->msg.v5_msg.subchn_req.num_subchannels =
+ net_device->num_chn - 1;
+ ret = vmbus_sendpacket(dev->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ goto out;
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ if (init_packet->msg.v5_msg.subchn_comp.status !=
+ NVSP_STAT_SUCCESS) {
+ ret = -ENODEV;
+ goto out;
+ }
+ net_device->num_chn = 1 +
+ init_packet->msg.v5_msg.subchn_comp.num_subchannels;
+
+ vmbus_are_subchannels_present(dev->channel);
+
+ ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
+
+out:
+ if (ret)
+ net_device->num_chn = 1;
+ return 0; /* return 0 because primary channel can be used alone */
err_dev_remv:
rndis_filter_device_remove(dev);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index e36f194673a4..50899416f668 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -692,10 +693,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
if (rc < 0)
goto err_rx;
- rc = at86rf230_start(dev);
-
- return rc;
-
+ return at86rf230_start(dev);
err_rx:
at86rf230_start(dev);
err:
@@ -963,33 +961,24 @@ static irqreturn_t at86rf230_isr_level(int irq, void *data)
return at86rf230_isr(irq, data);
}
-static int at86rf230_irq_polarity(struct at86rf230_local *lp, int pol)
-{
- return at86rf230_write_subreg(lp, SR_IRQ_POLARITY, pol);
-}
-
static int at86rf230_hw_init(struct at86rf230_local *lp)
{
- struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
- int rc, irq_pol;
- u8 status;
+ int rc, irq_pol, irq_type;
+ u8 dvdd;
u8 csma_seed[2];
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
-
rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
if (rc)
return rc;
+ irq_type = irq_get_trigger_type(lp->spi->irq);
/* configure irq polarity, defaults to high active */
- if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
+ if (irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
irq_pol = IRQ_ACTIVE_LOW;
else
irq_pol = IRQ_ACTIVE_HIGH;
- rc = at86rf230_irq_polarity(lp, irq_pol);
+ rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
if (rc)
return rc;
@@ -1017,10 +1006,10 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
/* Wait the next SLEEP cycle */
msleep(100);
- rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+ rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
if (rc)
return rc;
- if (!status) {
+ if (!dvdd) {
dev_err(&lp->spi->dev, "DVDD error\n");
return -EINVAL;
}
@@ -1032,7 +1021,6 @@ static struct at86rf230_platform_data *
at86rf230_get_pdata(struct spi_device *spi)
{
struct at86rf230_platform_data *pdata;
- const char *irq_type;
if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
return spi->dev.platform_data;
@@ -1044,19 +1032,6 @@ at86rf230_get_pdata(struct spi_device *spi)
pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
- pdata->irq_type = IRQF_TRIGGER_RISING;
- of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
- if (!strcmp(irq_type, "level-high"))
- pdata->irq_type = IRQF_TRIGGER_HIGH;
- else if (!strcmp(irq_type, "level-low"))
- pdata->irq_type = IRQF_TRIGGER_LOW;
- else if (!strcmp(irq_type, "edge-rising"))
- pdata->irq_type = IRQF_TRIGGER_RISING;
- else if (!strcmp(irq_type, "edge-falling"))
- pdata->irq_type = IRQF_TRIGGER_FALLING;
- else
- dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
-
spi->dev.platform_data = pdata;
done:
return pdata;
@@ -1071,7 +1046,7 @@ static int at86rf230_probe(struct spi_device *spi)
u8 part = 0, version = 0, status;
irq_handler_t irq_handler;
work_func_t irq_worker;
- int rc;
+ int rc, irq_type;
const char *chip;
struct ieee802154_ops *ops = NULL;
@@ -1087,27 +1062,17 @@ static int at86rf230_probe(struct spi_device *spi)
}
if (gpio_is_valid(pdata->rstn)) {
- rc = gpio_request(pdata->rstn, "rstn");
+ rc = devm_gpio_request_one(&spi->dev, pdata->rstn,
+ GPIOF_OUT_INIT_HIGH, "rstn");
if (rc)
return rc;
}
if (gpio_is_valid(pdata->slp_tr)) {
- rc = gpio_request(pdata->slp_tr, "slp_tr");
- if (rc)
- goto err_slp_tr;
- }
-
- if (gpio_is_valid(pdata->rstn)) {
- rc = gpio_direction_output(pdata->rstn, 1);
- if (rc)
- goto err_gpio_dir;
- }
-
- if (gpio_is_valid(pdata->slp_tr)) {
- rc = gpio_direction_output(pdata->slp_tr, 0);
+ rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr,
+ GPIOF_OUT_INIT_LOW, "slp_tr");
if (rc)
- goto err_gpio_dir;
+ return rc;
}
/* Reset */
@@ -1121,13 +1086,12 @@ static int at86rf230_probe(struct spi_device *spi)
rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
if (rc < 0)
- goto err_gpio_dir;
+ return rc;
if (man_id != 0x001f) {
dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
man_id >> 8, man_id & 0xFF);
- rc = -EINVAL;
- goto err_gpio_dir;
+ return -EINVAL;
}
switch (part) {
@@ -1154,16 +1118,12 @@ static int at86rf230_probe(struct spi_device *spi)
}
dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
- if (!ops) {
- rc = -ENOTSUPP;
- goto err_gpio_dir;
- }
+ if (!ops)
+ return -ENOTSUPP;
dev = ieee802154_alloc_device(sizeof(*lp), ops);
- if (!dev) {
- rc = -ENOMEM;
- goto err_gpio_dir;
- }
+ if (!dev)
+ return -ENOMEM;
lp = dev->priv;
lp->dev = dev;
@@ -1176,7 +1136,10 @@ static int at86rf230_probe(struct spi_device *spi)
dev->extra_tx_headroom = 0;
dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
- if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ irq_type = irq_get_trigger_type(spi->irq);
+ if (!irq_type)
+ irq_type = IRQF_TRIGGER_RISING;
+ if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
irq_worker = at86rf230_irqwork;
irq_handler = at86rf230_isr;
} else {
@@ -1202,75 +1165,66 @@ static int at86rf230_probe(struct spi_device *spi)
if (rc)
goto err_hw_init;
- rc = request_irq(spi->irq, irq_handler,
- IRQF_SHARED | pdata->irq_type,
- dev_name(&spi->dev), lp);
+ /* Read irq status register to reset irq line */
+ rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
if (rc)
goto err_hw_init;
- /* Read irq status register to reset irq line */
- rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
+ rc = devm_request_irq(&spi->dev, spi->irq, irq_handler,
+ IRQF_SHARED | irq_type,
+ dev_name(&spi->dev), lp);
if (rc)
- goto err_irq;
+ goto err_hw_init;
rc = ieee802154_register_device(lp->dev);
if (rc)
- goto err_irq;
+ goto err_hw_init;
return rc;
-err_irq:
- free_irq(spi->irq, lp);
err_hw_init:
flush_work(&lp->irqwork);
- spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
-err_gpio_dir:
- if (gpio_is_valid(pdata->slp_tr))
- gpio_free(pdata->slp_tr);
-err_slp_tr:
- if (gpio_is_valid(pdata->rstn))
- gpio_free(pdata->rstn);
return rc;
}
static int at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
- struct at86rf230_platform_data *pdata = spi->dev.platform_data;
/* mask all at86rf230 irq's */
at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
ieee802154_unregister_device(lp->dev);
-
- free_irq(spi->irq, lp);
flush_work(&lp->irqwork);
-
- if (gpio_is_valid(pdata->slp_tr))
- gpio_free(pdata->slp_tr);
- if (gpio_is_valid(pdata->rstn))
- gpio_free(pdata->rstn);
-
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
-
dev_dbg(&spi->dev, "unregistered at86rf230\n");
+
return 0;
}
-#if IS_ENABLED(CONFIG_OF)
-static struct of_device_id at86rf230_of_match[] = {
+static const struct of_device_id at86rf230_of_match[] = {
{ .compatible = "atmel,at86rf230", },
{ .compatible = "atmel,at86rf231", },
{ .compatible = "atmel,at86rf233", },
{ .compatible = "atmel,at86rf212", },
{ },
};
-#endif
+MODULE_DEVICE_TABLE(of, at86rf230_of_match);
+
+static const struct spi_device_id at86rf230_device_id[] = {
+ { .name = "at86rf230", },
+ { .name = "at86rf231", },
+ { .name = "at86rf233", },
+ { .name = "at86rf212", },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, at86rf230_device_id);
static struct spi_driver at86rf230_driver = {
+ .id_table = at86rf230_device_id,
.driver = {
.of_match_table = of_match_ptr(at86rf230_of_match),
.name = "at86rf230",
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index b8d22173925d..27d83207d24c 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -26,6 +26,7 @@
#include <linux/timer.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
+#include <linux/device.h>
#include <linux/spinlock.h>
#include <net/mac802154.h>
#include <net/wpan-phy.h>
@@ -228,7 +229,8 @@ static int fakelb_probe(struct platform_device *pdev)
int err = -ENOMEM;
int i;
- priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
+ GFP_KERNEL);
if (!priv)
goto err_alloc;
@@ -248,7 +250,6 @@ static int fakelb_probe(struct platform_device *pdev)
err_slave:
list_for_each_entry(dp, &priv->list, list)
fakelb_del(dp);
- kfree(priv);
err_alloc:
return err;
}
@@ -260,7 +261,6 @@ static int fakelb_remove(struct platform_device *pdev)
list_for_each_entry_safe(dp, temp, &priv->list, list)
fakelb_del(dp);
- kfree(priv);
return 0;
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 78a6552ed707..4048062011ba 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -618,12 +618,12 @@ static int mrf24j40_probe(struct spi_device *spi)
printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
- devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL);
+ devrec = devm_kzalloc(&spi->dev, sizeof(struct mrf24j40), GFP_KERNEL);
if (!devrec)
- goto err_devrec;
- devrec->buf = kzalloc(3, GFP_KERNEL);
+ goto err_ret;
+ devrec->buf = devm_kzalloc(&spi->dev, 3, GFP_KERNEL);
if (!devrec->buf)
- goto err_buf;
+ goto err_ret;
spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
@@ -638,7 +638,7 @@ static int mrf24j40_probe(struct spi_device *spi)
devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
if (!devrec->dev)
- goto err_alloc_dev;
+ goto err_ret;
devrec->dev->priv = devrec;
devrec->dev->parent = &devrec->spi->dev;
@@ -676,12 +676,13 @@ static int mrf24j40_probe(struct spi_device *spi)
val &= ~0x3; /* Clear RX mode (normal) */
write_short_reg(devrec, REG_RXMCR, val);
- ret = request_threaded_irq(spi->irq,
- NULL,
- mrf24j40_isr,
- IRQF_TRIGGER_LOW|IRQF_ONESHOT,
- dev_name(&spi->dev),
- devrec);
+ ret = devm_request_threaded_irq(&spi->dev,
+ spi->irq,
+ NULL,
+ mrf24j40_isr,
+ IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+ dev_name(&spi->dev),
+ devrec);
if (ret) {
dev_err(printdev(devrec), "Unable to get IRQ");
@@ -695,11 +696,7 @@ err_read_reg:
ieee802154_unregister_device(devrec->dev);
err_register_device:
ieee802154_free_device(devrec->dev);
-err_alloc_dev:
- kfree(devrec->buf);
-err_buf:
- kfree(devrec);
-err_devrec:
+err_ret:
return ret;
}
@@ -709,15 +706,11 @@ static int mrf24j40_remove(struct spi_device *spi)
dev_dbg(printdev(devrec), "remove\n");
- free_irq(spi->irq, devrec);
ieee802154_unregister_device(devrec->dev);
ieee802154_free_device(devrec->dev);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
* complete? */
- /* Clean up the SPI stuff. */
- kfree(devrec->buf);
- kfree(devrec);
return 0;
}
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 3da44d5d9149..8d101d63abca 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -396,7 +396,8 @@ config MCS_FIR
config SH_IRDA
tristate "SuperH IrDA driver"
- depends on IRDA && ARCH_SHMOBILE
+ depends on IRDA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if your want to enable SuperH IrDA devices.
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 2900af091c2d..998bb89ede71 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -510,10 +510,8 @@ static void via_hw_init(struct via_ircc_cb *self)
*/
static int via_ircc_read_dongle_id(int iobase)
{
- int dongle_id = 9; /* Default to IBM */
-
IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
- return dongle_id;
+ return 9; /* Default to IBM */
}
/*
@@ -926,7 +924,6 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
{
int iobase;
- int ret = TRUE;
u8 Tx_status;
IRDA_DEBUG(3, "%s()\n", __func__);
@@ -983,7 +980,7 @@ F01_E*/
// Tell the network layer, that we can accept more frames
netif_wake_queue(self->netdev);
//F01 }
- return ret;
+ return TRUE;
}
/*
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index e641bb240362..11dbdf36d9c1 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -62,10 +62,6 @@
#include "w83977af.h"
#include "w83977af_ir.h"
-#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
-#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
-#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
-#endif
#define CONFIG_USE_W977_PNP /* Currently needed */
#define PIO_MAX_SPEED 115200
@@ -332,7 +328,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
w977_write_reg(0x74, dma+1, efbase[i]);
#else
w977_write_reg(0x74, dma, efbase[i]);
-#endif /*CONFIG_ARCH_NETWINDER */
+#endif /* CONFIG_ARCH_NETWINDER */
w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
/* Set append hardware CRC, enable IR bank selection */
@@ -563,10 +559,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
{
__u8 set;
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
- unsigned long flags;
- __u8 hcr;
-#endif
IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
/* Save current set */
@@ -579,30 +571,13 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
/* Choose transmit DMA channel */
switch_bank(iobase, SET2);
outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
- spin_lock_irqsave(&self->lock, flags);
-
- disable_dma(self->io.dma);
- clear_dma_ff(self->io.dma);
- set_dma_mode(self->io.dma, DMA_MODE_READ);
- set_dma_addr(self->io.dma, self->tx_buff_dma);
- set_dma_count(self->io.dma, self->tx_buff.len);
-#else
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
DMA_MODE_WRITE);
-#endif
self->io.direction = IO_XMIT;
/* Enable DMA */
switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
- hcr = inb(iobase+HCR);
- outb(hcr | HCR_EN_DMA, iobase+HCR);
- enable_dma(self->io.dma);
- spin_unlock_irqrestore(&self->lock, flags);
-#else
outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
-#endif
/* Restore set register */
outb(set, iobase+SSR);
@@ -711,7 +686,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
{
int iobase;
__u8 set;
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
unsigned long flags;
__u8 hcr;
#endif
@@ -736,7 +711,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
self->io.direction = IO_RECV;
self->rx_buff.data = self->rx_buff.head;
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
spin_lock_irqsave(&self->lock, flags);
disable_dma(self->io.dma);
@@ -759,7 +734,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
/* Enable DMA */
switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
hcr = inb(iobase+HCR);
outb(hcr | HCR_EN_DMA, iobase+HCR);
enable_dma(self->io.dma);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d53e299ae1d9..958df383068a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -30,8 +30,10 @@
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
#include <linux/hash.h>
+#include <linux/workqueue.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
+#include <linux/netpoll.h>
#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
@@ -40,10 +42,19 @@ struct macvlan_port {
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
struct rcu_head rcu;
+ struct sk_buff_head bc_queue;
+ struct work_struct bc_work;
bool passthru;
- int count;
};
+#define MACVLAN_PORT_IS_EMPTY(port) list_empty(&port->vlans)
+
+struct macvlan_skb_cb {
+ const struct macvlan_dev *src;
+};
+
+#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
+
static void macvlan_port_destroy(struct net_device *dev);
static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
@@ -120,7 +131,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
struct net_device *dev = vlan->dev;
if (local)
- return dev_forward_skb(dev, skb);
+ return __dev_forward_skb(dev, skb);
skb->dev = dev;
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@ -128,7 +139,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
else
skb->pkt_type = PACKET_MULTICAST;
- return netif_rx(skb);
+ return 0;
}
static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@ -175,32 +186,32 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (likely(nskb))
err = macvlan_broadcast_one(
nskb, vlan, eth,
- mode == MACVLAN_MODE_BRIDGE);
+ mode == MACVLAN_MODE_BRIDGE) ?:
+ netif_rx_ni(nskb);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
}
}
}
-/* called under rcu_read_lock() from netif_receive_skb */
-static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+static void macvlan_process_broadcast(struct work_struct *w)
{
- struct macvlan_port *port;
- struct sk_buff *skb = *pskb;
- const struct ethhdr *eth = eth_hdr(skb);
- const struct macvlan_dev *vlan;
- const struct macvlan_dev *src;
- struct net_device *dev;
- unsigned int len = 0;
- int ret = NET_RX_DROP;
+ struct macvlan_port *port = container_of(w, struct macvlan_port,
+ bc_work);
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+
+ skb_queue_head_init(&list);
+
+ spin_lock_bh(&port->bc_queue.lock);
+ skb_queue_splice_tail_init(&port->bc_queue, &list);
+ spin_unlock_bh(&port->bc_queue.lock);
+
+ while ((skb = __skb_dequeue(&list))) {
+ const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+
+ rcu_read_lock();
- port = macvlan_port_get_rcu(skb->dev);
- if (is_multicast_ether_addr(eth->h_dest)) {
- skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
- if (!skb)
- return RX_HANDLER_CONSUMED;
- eth = eth_hdr(skb);
- src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
/* frame comes from an external address */
macvlan_broadcast(skb, port, NULL,
@@ -213,20 +224,80 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA |
MACVLAN_MODE_BRIDGE);
- else if (src->mode == MACVLAN_MODE_BRIDGE)
+ else
/*
* flood only to VEPA ports, bridge ports
* already saw the frame on the way out.
*/
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA);
- else {
+
+ rcu_read_unlock();
+
+ kfree_skb(skb);
+ }
+}
+
+static void macvlan_broadcast_enqueue(struct macvlan_port *port,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int err = -ENOMEM;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto err;
+
+ spin_lock(&port->bc_queue.lock);
+ if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
+ __skb_queue_tail(&port->bc_queue, nskb);
+ err = 0;
+ }
+ spin_unlock(&port->bc_queue.lock);
+
+ if (err)
+ goto free_nskb;
+
+ schedule_work(&port->bc_work);
+ return;
+
+free_nskb:
+ kfree_skb(nskb);
+err:
+ atomic_long_inc(&skb->dev->rx_dropped);
+}
+
+/* called under rcu_read_lock() from netif_receive_skb */
+static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+{
+ struct macvlan_port *port;
+ struct sk_buff *skb = *pskb;
+ const struct ethhdr *eth = eth_hdr(skb);
+ const struct macvlan_dev *vlan;
+ const struct macvlan_dev *src;
+ struct net_device *dev;
+ unsigned int len = 0;
+ int ret = NET_RX_DROP;
+
+ port = macvlan_port_get_rcu(skb->dev);
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
+ eth = eth_hdr(skb);
+ src = macvlan_hash_lookup(port, eth->h_source);
+ if (src && src->mode != MACVLAN_MODE_VEPA &&
+ src->mode != MACVLAN_MODE_BRIDGE) {
/* forward to original port. */
vlan = src;
- ret = macvlan_broadcast_one(skb, vlan, eth, 0);
+ ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
+ netif_rx(skb);
goto out;
}
+ MACVLAN_SKB_CB(skb)->src = src;
+ macvlan_broadcast_enqueue(port, skb);
+
return RX_HANDLER_PASS;
}
@@ -287,12 +358,26 @@ xmit_world:
return dev_queue_xmit(skb);
}
+static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (vlan->netpoll)
+ netpoll_send_skb(vlan->netpoll, skb);
+#else
+ BUG();
+#endif
+ return NETDEV_TX_OK;
+}
+
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
unsigned int len = skb->len;
int ret;
- const struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ if (unlikely(netpoll_tx_running(dev)))
+ return macvlan_netpoll_send_skb(vlan, skb);
if (vlan->fwd_priv) {
skb->dev = vlan->lowerdev;
@@ -424,35 +509,49 @@ hash_del:
return 0;
}
-static int macvlan_set_mac_address(struct net_device *dev, void *p)
+static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- struct sockaddr *addr = p;
int err;
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, addr);
} else {
/* Rehash and update the device filters */
- if (macvlan_addr_busy(vlan->port, addr->sa_data))
+ if (macvlan_addr_busy(vlan->port, addr))
return -EBUSY;
- err = dev_uc_add(lowerdev, addr->sa_data);
- if (err)
- return err;
+ if (!vlan->port->passthru) {
+ err = dev_uc_add(lowerdev, addr);
+ if (err)
+ return err;
- dev_uc_del(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
+ }
- macvlan_hash_change_addr(vlan, addr->sa_data);
+ macvlan_hash_change_addr(vlan, addr);
}
return 0;
}
+static int macvlan_set_mac_address(struct net_device *dev, void *p)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+ dev_set_mac_address(vlan->lowerdev, addr);
+ return 0;
+ }
+
+ return macvlan_sync_address(dev, addr->sa_data);
+}
+
static void macvlan_change_rx_flags(struct net_device *dev, int change)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -567,8 +666,7 @@ static void macvlan_uninit(struct net_device *dev)
free_percpu(vlan->pcpu_stats);
- port->count -= 1;
- if (!port->count)
+ if (MACVLAN_PORT_IS_EMPTY(port))
macvlan_port_destroy(port->dev);
}
@@ -705,6 +803,50 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
return features;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void macvlan_dev_poll_controller(struct net_device *dev)
+{
+ return;
+}
+
+static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *real_dev = vlan->lowerdev;
+ struct netpoll *netpoll;
+ int err = 0;
+
+ netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!netpoll)
+ goto out;
+
+ err = __netpoll_setup(netpoll, real_dev);
+ if (err) {
+ kfree(netpoll);
+ goto out;
+ }
+
+ vlan->netpoll = netpoll;
+
+out:
+ return err;
+}
+
+static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct netpoll *netpoll = vlan->netpoll;
+
+ if (!netpoll)
+ return;
+
+ vlan->netpoll = NULL;
+
+ __netpoll_free_async(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
static const struct ethtool_ops macvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = macvlan_ethtool_get_settings,
@@ -730,6 +872,11 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_fdb_del = macvlan_fdb_del,
.ndo_fdb_dump = ndo_dflt_fdb_dump,
.ndo_get_lock_subclass = macvlan_get_nest_level,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = macvlan_dev_poll_controller,
+ .ndo_netpoll_setup = macvlan_dev_netpoll_setup,
+ .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
+#endif
};
void macvlan_common_setup(struct net_device *dev)
@@ -770,6 +917,9 @@ static int macvlan_port_create(struct net_device *dev)
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
INIT_HLIST_HEAD(&port->vlan_hash[i]);
+ skb_queue_head_init(&port->bc_queue);
+ INIT_WORK(&port->bc_work, macvlan_process_broadcast);
+
err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
if (err)
kfree(port);
@@ -782,6 +932,7 @@ static void macvlan_port_destroy(struct net_device *dev)
{
struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+ cancel_work_sync(&port->bc_work);
dev->priv_flags &= ~IFF_MACVLAN_PORT;
netdev_rx_handler_unregister(dev);
kfree_rcu(port, rcu);
@@ -868,13 +1019,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
- if (port->count)
+ if (!MACVLAN_PORT_IS_EMPTY(port))
return -EINVAL;
port->passthru = true;
eth_hw_addr_inherit(dev, lowerdev);
}
- port->count += 1;
err = register_netdevice(dev);
if (err < 0)
goto destroy_port;
@@ -892,8 +1042,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
unregister_netdev:
unregister_netdevice(dev);
destroy_port:
- port->count -= 1;
- if (!port->count)
+ if (MACVLAN_PORT_IS_EMPTY(port))
macvlan_port_destroy(lowerdev);
return err;
@@ -1028,6 +1177,25 @@ static int macvlan_device_event(struct notifier_block *unused,
netdev_update_features(vlan->dev);
}
break;
+ case NETDEV_CHANGEMTU:
+ list_for_each_entry(vlan, &port->vlans, list) {
+ if (vlan->dev->mtu <= dev->mtu)
+ continue;
+ dev_set_mtu(vlan->dev, dev->mtu);
+ }
+ break;
+ case NETDEV_CHANGEADDR:
+ if (!port->passthru)
+ return NOTIFY_DONE;
+
+ vlan = list_first_entry_or_null(&port->vlans,
+ struct macvlan_dev,
+ list);
+
+ if (macvlan_sync_address(vlan->dev, dev->dev_addr))
+ return NOTIFY_BAD;
+
+ break;
case NETDEV_UNREGISTER:
/* twiddle thumbs on netns device moves */
if (dev->reg_state != NETREG_UNREGISTERING)
@@ -1036,11 +1204,17 @@ static int macvlan_device_event(struct notifier_block *unused,
list_for_each_entry_safe(vlan, next, &port->vlans, list)
vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
unregister_netdevice_many(&list_kill);
- list_del(&list_kill);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
+
+ case NETDEV_NOTIFY_PEERS:
+ case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to all vlans */
+ list_for_each_entry(vlan, &port->vlans, list)
+ call_netdevice_notifiers(event, vlan->dev);
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 63aa9d9e34c5..5a7e6397440a 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -298,7 +298,6 @@ static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = SUPPORTED_Backplane;
cmd->advertising = ADVERTISED_Backplane;
- cmd->speed = SPEED_UNKNOWN;
ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_OTHER;
@@ -348,7 +347,7 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
ndev->netdev_ops = &ntb_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
+ ndev->ethtool_ops = &ntb_ethtool_ops;
dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
if (!dev->qp) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6a17f92153b3..65de0cab8d07 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -24,6 +24,12 @@ config AMD_PHY
---help---
Currently supports the am79c874
+config AMD_XGBE_PHY
+ tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
+ depends on OF
+ ---help---
+ Currently supports the AMD 10GbE PHY
+
config MARVELL_PHY
tristate "Drivers for Marvell PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 07d24024863e..7dc3d5b304cf 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
+obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
new file mode 100644
index 000000000000..b57c22442867
--- /dev/null
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -0,0 +1,1357 @@
+/*
+ * AMD 10Gb Ethernet PHY driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("1.0.0-a");
+MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
+
+#define XGBE_PHY_ID 0x000162d0
+#define XGBE_PHY_MASK 0xfffffff0
+
+#define XGBE_AN_INT_CMPLT 0x01
+#define XGBE_AN_INC_LINK 0x02
+#define XGBE_AN_PG_RCV 0x04
+
+#define XNP_MCF_NULL_MESSAGE 0x001
+#define XNP_ACK_PROCESSED (1 << 12)
+#define XNP_MP_FORMATTED (1 << 13)
+#define XNP_NP_EXCHANGE (1 << 15)
+
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+#ifndef MDIO_PMA_10GBR_FEC_CTRL
+#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
+#endif
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+/* SerDes integration register offsets */
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
+#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+#define SPEED_10000_CDR 0x7
+#define SPEED_10000_PLL 0x1
+#define SPEED_10000_RATE 0x0
+#define SPEED_10000_TXAMP 0xa
+#define SPEED_10000_WORD 0x7
+
+#define SPEED_2500_CDR 0x2
+#define SPEED_2500_PLL 0x0
+#define SPEED_2500_RATE 0x2
+#define SPEED_2500_TXAMP 0xf
+#define SPEED_2500_WORD 0x1
+
+#define SPEED_1000_CDR 0x2
+#define SPEED_1000_PLL 0x0
+#define SPEED_1000_RATE 0x3
+#define SPEED_1000_TXAMP 0xf
+#define SPEED_1000_WORD 0x1
+
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG20 0x0050
+#define RXTX_REG114 0x01c8
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+
+#define RXTX_10000_BLWC 0
+#define RXTX_10000_PQ 0x1e
+
+#define RXTX_2500_BLWC 1
+#define RXTX_2500_PQ 0xa
+
+#define RXTX_1000_BLWC 1
+#define RXTX_1000_PQ 0xa
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+/* Macros for reading or writing SerDes integration registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XSIR0_IOREAD(_priv, _reg) \
+ ioread16((_priv)->sir0_regs + _reg)
+
+#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_priv), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_priv, _reg, _val) \
+ iowrite16((_val), (_priv)->sir0_regs + _reg)
+
+#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_priv), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_priv, _reg) \
+ ioread16((_priv)->sir1_regs + _reg)
+
+#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_priv), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_priv, _reg, _val) \
+ iowrite16((_val), (_priv)->sir1_regs + _reg)
+
+#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_priv), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for reading or writing SerDes RxTx registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XRXTX_IOREAD(_priv, _reg) \
+ ioread16((_priv)->rxtx_regs + _reg)
+
+#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_priv), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_priv, _reg, _val) \
+ iowrite16((_val), (_priv)->rxtx_regs + _reg)
+
+#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_priv), _reg, reg_val); \
+} while (0)
+
+
+enum amd_xgbe_phy_an {
+ AMD_XGBE_AN_READY = 0,
+ AMD_XGBE_AN_START,
+ AMD_XGBE_AN_EVENT,
+ AMD_XGBE_AN_PAGE_RECEIVED,
+ AMD_XGBE_AN_INCOMPAT_LINK,
+ AMD_XGBE_AN_COMPLETE,
+ AMD_XGBE_AN_NO_LINK,
+ AMD_XGBE_AN_EXIT,
+ AMD_XGBE_AN_ERROR,
+};
+
+enum amd_xgbe_phy_rx {
+ AMD_XGBE_RX_READY = 0,
+ AMD_XGBE_RX_BPA,
+ AMD_XGBE_RX_XNP,
+ AMD_XGBE_RX_COMPLETE,
+};
+
+enum amd_xgbe_phy_mode {
+ AMD_XGBE_MODE_KR,
+ AMD_XGBE_MODE_KX,
+};
+
+struct amd_xgbe_phy_priv {
+ struct platform_device *pdev;
+ struct device *dev;
+
+ struct phy_device *phydev;
+
+ /* SerDes related mmio resources */
+ struct resource *rxtx_res;
+ struct resource *sir0_res;
+ struct resource *sir1_res;
+
+ /* SerDes related mmio registers */
+ void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
+ void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
+ void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
+
+ /* Maintain link status for re-starting auto-negotiation */
+ unsigned int link;
+ enum amd_xgbe_phy_mode mode;
+
+ /* Auto-negotiation state machine support */
+ struct mutex an_mutex;
+ enum amd_xgbe_phy_an an_result;
+ enum amd_xgbe_phy_an an_state;
+ enum amd_xgbe_phy_rx kr_state;
+ enum amd_xgbe_phy_rx kx_state;
+ struct work_struct an_work;
+ struct workqueue_struct *an_workqueue;
+};
+
+static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret |= 0x02;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return 0;
+}
+
+static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~0x02;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ usleep_range(75, 100);
+
+ ret &= ~MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ return 0;
+}
+
+static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+
+ /* Assert Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
+}
+
+static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+
+ /* Release Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
+
+ /* Wait for Rx and Tx ready */
+ while (!XSIR0_IOREAD_BITS(priv, SIR0_STATUS, RX_READY) &&
+ !XSIR0_IOREAD_BITS(priv, SIR0_STATUS, TX_READY))
+ usleep_range(10, 20);
+}
+
+static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Enable KR training */
+ ret = amd_xgbe_an_enable_kr_training(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to KR/10G speed */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBR;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED10G;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = amd_xgbe_phy_pcs_power_cycle(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set SerDes to 10G speed */
+ amd_xgbe_phy_serdes_start_ratechange(phydev);
+
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+ priv->mode = AMD_XGBE_MODE_KR;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Disable KR training */
+ ret = amd_xgbe_an_disable_kr_training(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to KX/1G speed */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBX;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED1G;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = amd_xgbe_phy_pcs_power_cycle(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set SerDes to 2.5G speed */
+ amd_xgbe_phy_serdes_start_ratechange(phydev);
+
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+ priv->mode = AMD_XGBE_MODE_KX;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Disable KR training */
+ ret = amd_xgbe_an_disable_kr_training(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to KX/1G speed */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBX;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED1G;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = amd_xgbe_phy_pcs_power_cycle(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set SerDes to 1G speed */
+ amd_xgbe_phy_serdes_start_ratechange(phydev);
+
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+ priv->mode = AMD_XGBE_MODE_KX;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* If we are in KR switch to KX, and vice-versa */
+ if (priv->mode == AMD_XGBE_MODE_KR)
+ ret = amd_xgbe_phy_gmii_mode(phydev);
+ else
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
+
+ return ret;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = amd_xgbe_phy_switch_mode(phydev);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ return AMD_XGBE_AN_START;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ad_reg, lp_reg, ret;
+
+ *state = AMD_XGBE_RX_COMPLETE;
+
+ /* If we're in KX mode then we're done */
+ if (priv->mode == AMD_XGBE_MODE_KX)
+ return AMD_XGBE_AN_EVENT;
+
+ /* Enable/Disable FEC */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ ret |= 0x01;
+ else
+ ret &= ~0x01;
+
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
+
+ /* Start KR training */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ ret |= 0x01;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return AMD_XGBE_AN_EVENT;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ u16 msg;
+
+ *state = AMD_XGBE_RX_XNP;
+
+ msg = XNP_MCF_NULL_MESSAGE;
+ msg |= XNP_MP_FORMATTED;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return AMD_XGBE_AN_EVENT;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ unsigned int link_support;
+ int ret, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20;
+ if (!(ret & link_support))
+ return amd_xgbe_an_switch_mode(phydev);
+
+ /* Check Extended Next Page support */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
+ amd_xgbe_an_tx_xnp(phydev, state) :
+ amd_xgbe_an_tx_training(phydev, state);
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
+ amd_xgbe_an_tx_xnp(phydev, state) :
+ amd_xgbe_an_tx_training(phydev, state);
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Be sure we aren't looping trying to negotiate */
+ if (priv->mode == AMD_XGBE_MODE_KR) {
+ if (priv->kr_state != AMD_XGBE_RX_READY)
+ return AMD_XGBE_AN_NO_LINK;
+ priv->kr_state = AMD_XGBE_RX_BPA;
+ } else {
+ if (priv->kx_state != AMD_XGBE_RX_READY)
+ return AMD_XGBE_AN_NO_LINK;
+ priv->kx_state = AMD_XGBE_RX_BPA;
+ }
+
+ /* Set up Advertisement register 3 first */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ if (phydev->supported & SUPPORTED_10000baseR_FEC)
+ ret |= 0xc000;
+ else
+ ret &= ~0xc000;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
+
+ /* Set up Advertisement register 2 next */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ ret |= 0x80;
+ else
+ ret &= ~0x80;
+
+ if (phydev->supported & SUPPORTED_1000baseKX_Full)
+ ret |= 0x20;
+ else
+ ret &= ~0x20;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
+
+ /* Set up Advertisement register 1 last */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ if (phydev->supported & SUPPORTED_Pause)
+ ret |= 0x400;
+ else
+ ret &= ~0x400;
+
+ if (phydev->supported & SUPPORTED_Asym_Pause)
+ ret |= 0x800;
+ else
+ ret &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ ret &= ~XNP_NP_EXCHANGE;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
+
+ /* Enable and start auto-negotiation */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ ret |= MDIO_AN_CTRL1_ENABLE;
+ ret |= MDIO_AN_CTRL1_RESTART;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+
+ return AMD_XGBE_AN_EVENT;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
+{
+ enum amd_xgbe_phy_an new_state;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ new_state = AMD_XGBE_AN_EVENT;
+ if (ret & XGBE_AN_PG_RCV)
+ new_state = AMD_XGBE_AN_PAGE_RECEIVED;
+ else if (ret & XGBE_AN_INC_LINK)
+ new_state = AMD_XGBE_AN_INCOMPAT_LINK;
+ else if (ret & XGBE_AN_INT_CMPLT)
+ new_state = AMD_XGBE_AN_COMPLETE;
+
+ if (new_state != AMD_XGBE_AN_EVENT)
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ return new_state;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ enum amd_xgbe_phy_rx *state;
+ int ret;
+
+ state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state
+ : &priv->kx_state;
+
+ switch (*state) {
+ case AMD_XGBE_RX_BPA:
+ ret = amd_xgbe_an_rx_bpa(phydev, state);
+ break;
+
+ case AMD_XGBE_RX_XNP:
+ ret = amd_xgbe_an_rx_xnp(phydev, state);
+ break;
+
+ default:
+ ret = AMD_XGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
+{
+ return amd_xgbe_an_switch_mode(phydev);
+}
+
+static void amd_xgbe_an_state_machine(struct work_struct *work)
+{
+ struct amd_xgbe_phy_priv *priv = container_of(work,
+ struct amd_xgbe_phy_priv,
+ an_work);
+ struct phy_device *phydev = priv->phydev;
+ enum amd_xgbe_phy_an cur_state;
+ int sleep;
+
+ while (1) {
+ mutex_lock(&priv->an_mutex);
+
+ cur_state = priv->an_state;
+
+ switch (priv->an_state) {
+ case AMD_XGBE_AN_START:
+ priv->an_state = amd_xgbe_an_start(phydev);
+ break;
+
+ case AMD_XGBE_AN_EVENT:
+ priv->an_state = amd_xgbe_an_event(phydev);
+ break;
+
+ case AMD_XGBE_AN_PAGE_RECEIVED:
+ priv->an_state = amd_xgbe_an_page_received(phydev);
+ break;
+
+ case AMD_XGBE_AN_INCOMPAT_LINK:
+ priv->an_state = amd_xgbe_an_incompat_link(phydev);
+ break;
+
+ case AMD_XGBE_AN_COMPLETE:
+ case AMD_XGBE_AN_NO_LINK:
+ case AMD_XGBE_AN_EXIT:
+ goto exit_unlock;
+
+ default:
+ priv->an_state = AMD_XGBE_AN_ERROR;
+ }
+
+ if (priv->an_state == AMD_XGBE_AN_ERROR) {
+ netdev_err(phydev->attached_dev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+ goto exit_unlock;
+ }
+
+ sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
+
+ mutex_unlock(&priv->an_mutex);
+
+ if (sleep)
+ usleep_range(20, 50);
+ }
+
+exit_unlock:
+ priv->an_result = priv->an_state;
+ priv->an_state = AMD_XGBE_AN_READY;
+
+ mutex_unlock(&priv->an_mutex);
+}
+
+static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
+{
+ int count, ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_CTRL1_RESET;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ count = 50;
+ do {
+ msleep(20);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+ } while ((ret & MDIO_CTRL1_RESET) && --count);
+
+ if (ret & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_config_init(struct phy_device *phydev)
+{
+ /* Initialize supported features */
+ phydev->supported = SUPPORTED_Autoneg;
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phydev->supported |= SUPPORTED_Backplane;
+ phydev->supported |= SUPPORTED_1000baseKX_Full |
+ SUPPORTED_2500baseX_Full;
+ phydev->supported |= SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseR_FEC;
+ phydev->advertising = phydev->supported;
+
+ /* Turn off and clear interrupts */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Disable auto-negotiation */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_AN_CTRL1_ENABLE;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+
+ /* Validate/Set specified speed */
+ switch (phydev->speed) {
+ case SPEED_10000:
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
+ break;
+
+ case SPEED_2500:
+ ret = amd_xgbe_phy_gmii_2500_mode(phydev);
+ break;
+
+ case SPEED_1000:
+ ret = amd_xgbe_phy_gmii_mode(phydev);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* Validate duplex mode */
+ if (phydev->duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+ int ret;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return amd_xgbe_phy_setup_forced(phydev);
+
+ /* Make sure we have the AN MMD present */
+ if (!(mmd_mask & MDIO_DEVS_AN))
+ return -EINVAL;
+
+ /* Get the current speed mode */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ /* Start/Restart the auto-negotiation state machine */
+ mutex_lock(&priv->an_mutex);
+ priv->an_result = AMD_XGBE_AN_READY;
+ priv->an_state = AMD_XGBE_AN_START;
+ priv->kr_state = AMD_XGBE_RX_READY;
+ priv->kx_state = AMD_XGBE_RX_READY;
+ mutex_unlock(&priv->an_mutex);
+
+ queue_work(priv->an_workqueue, &priv->an_work);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ enum amd_xgbe_phy_an state;
+
+ mutex_lock(&priv->an_mutex);
+ state = priv->an_result;
+ mutex_unlock(&priv->an_mutex);
+
+ return (state == AMD_XGBE_AN_COMPLETE);
+}
+
+static int amd_xgbe_phy_update_link(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ enum amd_xgbe_phy_an state;
+ unsigned int check_again, autoneg;
+ int ret;
+
+ /* If we're doing auto-negotiation don't report link down */
+ mutex_lock(&priv->an_mutex);
+ state = priv->an_state;
+ mutex_unlock(&priv->an_mutex);
+
+ if (state != AMD_XGBE_AN_READY) {
+ phydev->link = 1;
+ return 0;
+ }
+
+ /* Since the device can be in the wrong mode when a link is
+ * (re-)established (cable connected after the interface is
+ * up, etc.), the link status may report no link. If there
+ * is no link, try switching modes and checking the status
+ * again.
+ */
+ check_again = 1;
+again:
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+ if (!phydev->link) {
+ ret = amd_xgbe_phy_switch_mode(phydev);
+ if (check_again) {
+ check_again = 0;
+ goto again;
+ }
+ }
+
+ autoneg = (phydev->link && !priv->link) ? 1 : 0;
+ priv->link = phydev->link;
+ if (autoneg) {
+ /* Link is (back) up, re-start auto-negotiation */
+ ret = amd_xgbe_phy_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amd_xgbe_phy_read_status(struct phy_device *phydev)
+{
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+ int ret, mode, ad_ret, lp_ret;
+
+ ret = amd_xgbe_phy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (mode < 0)
+ return mode;
+ mode &= MDIO_PCS_CTRL2_TYPE;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ if (!(mmd_mask & MDIO_DEVS_AN))
+ return -EINVAL;
+
+ if (!amd_xgbe_phy_aneg_done(phydev))
+ return 0;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ad_ret < 0)
+ return ad_ret;
+ lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_ret < 0)
+ return lp_ret;
+
+ ad_ret &= lp_ret;
+ phydev->pause = (ad_ret & 0x400) ? 1 : 0;
+ phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_ADVERTISE + 1);
+ if (ad_ret < 0)
+ return ad_ret;
+ lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_ret < 0)
+ return lp_ret;
+
+ ad_ret &= lp_ret;
+ if (ad_ret & 0x80) {
+ phydev->speed = SPEED_10000;
+ if (mode != MDIO_PCS_CTRL2_10GBR) {
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ phydev->speed = SPEED_1000;
+ if (mode == MDIO_PCS_CTRL2_10GBR) {
+ ret = amd_xgbe_phy_gmii_mode(phydev);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ phydev->duplex = DUPLEX_FULL;
+ } else {
+ phydev->speed = (mode == MDIO_PCS_CTRL2_10GBR) ? SPEED_10000
+ : SPEED_1000;
+ phydev->duplex = DUPLEX_FULL;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ }
+
+ return 0;
+}
+
+static int amd_xgbe_phy_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ goto unlock;
+
+ ret |= MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+static int amd_xgbe_phy_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ goto unlock;
+
+ ret &= ~MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+static int amd_xgbe_phy_probe(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv;
+ struct platform_device *pdev;
+ struct device *dev;
+ char *wq_name;
+ int ret;
+
+ if (!phydev->dev.of_node)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(phydev->dev.of_node);
+ if (!pdev)
+ return -EINVAL;
+ dev = &pdev->dev;
+
+ wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
+ if (!wq_name) {
+ ret = -ENOMEM;
+ goto err_pdev;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_name;
+ }
+
+ priv->pdev = pdev;
+ priv->dev = dev;
+ priv->phydev = phydev;
+
+ /* Get the device mmio areas */
+ priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
+ if (IS_ERR(priv->rxtx_regs)) {
+ dev_err(dev, "rxtx ioremap failed\n");
+ ret = PTR_ERR(priv->rxtx_regs);
+ goto err_priv;
+ }
+
+ priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
+ if (IS_ERR(priv->sir0_regs)) {
+ dev_err(dev, "sir0 ioremap failed\n");
+ ret = PTR_ERR(priv->sir0_regs);
+ goto err_rxtx;
+ }
+
+ priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
+ if (IS_ERR(priv->sir1_regs)) {
+ dev_err(dev, "sir1 ioremap failed\n");
+ ret = PTR_ERR(priv->sir1_regs);
+ goto err_sir0;
+ }
+
+ priv->link = 1;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ goto err_sir1;
+ if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
+ priv->mode = AMD_XGBE_MODE_KR;
+ else
+ priv->mode = AMD_XGBE_MODE_KX;
+
+ mutex_init(&priv->an_mutex);
+ INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
+ priv->an_workqueue = create_singlethread_workqueue(wq_name);
+ if (!priv->an_workqueue) {
+ ret = -ENOMEM;
+ goto err_sir1;
+ }
+
+ phydev->priv = priv;
+
+ kfree(wq_name);
+ of_dev_put(pdev);
+
+ return 0;
+
+err_sir1:
+ devm_iounmap(dev, priv->sir1_regs);
+ devm_release_mem_region(dev, priv->sir1_res->start,
+ resource_size(priv->sir1_res));
+
+err_sir0:
+ devm_iounmap(dev, priv->sir0_regs);
+ devm_release_mem_region(dev, priv->sir0_res->start,
+ resource_size(priv->sir0_res));
+
+err_rxtx:
+ devm_iounmap(dev, priv->rxtx_regs);
+ devm_release_mem_region(dev, priv->rxtx_res->start,
+ resource_size(priv->rxtx_res));
+
+err_priv:
+ devm_kfree(dev, priv);
+
+err_name:
+ kfree(wq_name);
+
+err_pdev:
+ of_dev_put(pdev);
+
+ return ret;
+}
+
+static void amd_xgbe_phy_remove(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ struct device *dev = priv->dev;
+
+ /* Stop any in process auto-negotiation */
+ mutex_lock(&priv->an_mutex);
+ priv->an_state = AMD_XGBE_AN_EXIT;
+ mutex_unlock(&priv->an_mutex);
+
+ flush_workqueue(priv->an_workqueue);
+ destroy_workqueue(priv->an_workqueue);
+
+ /* Release resources */
+ devm_iounmap(dev, priv->sir1_regs);
+ devm_release_mem_region(dev, priv->sir1_res->start,
+ resource_size(priv->sir1_res));
+
+ devm_iounmap(dev, priv->sir0_regs);
+ devm_release_mem_region(dev, priv->sir0_res->start,
+ resource_size(priv->sir0_res));
+
+ devm_iounmap(dev, priv->rxtx_regs);
+ devm_release_mem_region(dev, priv->rxtx_res->start,
+ resource_size(priv->rxtx_res));
+
+ devm_kfree(dev, priv);
+}
+
+static int amd_xgbe_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
+}
+
+static struct phy_driver amd_xgbe_phy_driver[] = {
+ {
+ .phy_id = XGBE_PHY_ID,
+ .phy_id_mask = XGBE_PHY_MASK,
+ .name = "AMD XGBE PHY",
+ .features = 0,
+ .probe = amd_xgbe_phy_probe,
+ .remove = amd_xgbe_phy_remove,
+ .soft_reset = amd_xgbe_phy_soft_reset,
+ .config_init = amd_xgbe_phy_config_init,
+ .suspend = amd_xgbe_phy_suspend,
+ .resume = amd_xgbe_phy_resume,
+ .config_aneg = amd_xgbe_phy_config_aneg,
+ .aneg_done = amd_xgbe_phy_aneg_done,
+ .read_status = amd_xgbe_phy_read_status,
+ .match_phy_device = amd_xgbe_match_phy_device,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+ },
+};
+
+static int __init amd_xgbe_phy_init(void)
+{
+ return phy_drivers_register(amd_xgbe_phy_driver,
+ ARRAY_SIZE(amd_xgbe_phy_driver));
+}
+
+static void __exit amd_xgbe_phy_exit(void)
+{
+ phy_drivers_unregister(amd_xgbe_phy_driver,
+ ARRAY_SIZE(amd_xgbe_phy_driver));
+}
+
+module_init(amd_xgbe_phy_init);
+module_exit(amd_xgbe_phy_exit);
+
+static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
+ { XGBE_PHY_ID, XGBE_PHY_MASK },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 643464d5a727..fdc1b418fa6a 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -16,9 +16,13 @@
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#define AT803X_INTR_ENABLE 0x12
#define AT803X_INTR_STATUS 0x13
+#define AT803X_SMART_SPEED 0x14
+#define AT803X_LED_CONTROL 0x18
#define AT803X_WOL_ENABLE 0x01
#define AT803X_DEVICE_ADDR 0x03
#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
@@ -35,10 +39,52 @@
#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
#define AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8)
+#define ATH8030_PHY_ID 0x004dd076
+#define ATH8031_PHY_ID 0x004dd074
+#define ATH8035_PHY_ID 0x004dd072
+
MODULE_DESCRIPTION("Atheros 803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
+struct at803x_priv {
+ bool phy_reset:1;
+ struct gpio_desc *gpiod_reset;
+};
+
+struct at803x_context {
+ u16 bmcr;
+ u16 advertise;
+ u16 control1000;
+ u16 int_enable;
+ u16 smart_speed;
+ u16 led_control;
+};
+
+/* save relevant PHY registers to private copy */
+static void at803x_context_save(struct phy_device *phydev,
+ struct at803x_context *context)
+{
+ context->bmcr = phy_read(phydev, MII_BMCR);
+ context->advertise = phy_read(phydev, MII_ADVERTISE);
+ context->control1000 = phy_read(phydev, MII_CTRL1000);
+ context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
+ context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
+ context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
+}
+
+/* restore relevant PHY registers from private copy */
+static void at803x_context_restore(struct phy_device *phydev,
+ const struct at803x_context *context)
+{
+ phy_write(phydev, MII_BMCR, context->bmcr);
+ phy_write(phydev, MII_ADVERTISE, context->advertise);
+ phy_write(phydev, MII_CTRL1000, context->control1000);
+ phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
+ phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
+ phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
+}
+
static int at803x_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
@@ -142,43 +188,33 @@ static int at803x_resume(struct phy_device *phydev)
return 0;
}
+static int at803x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct at803x_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->gpiod_reset = devm_gpiod_get(dev, "reset");
+ if (IS_ERR(priv->gpiod_reset))
+ priv->gpiod_reset = NULL;
+ else
+ gpiod_direction_output(priv->gpiod_reset, 1);
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static int at803x_config_init(struct phy_device *phydev)
{
- int val;
int ret;
- u32 features;
-
- features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
- SUPPORTED_FIBRE | SUPPORTED_BNC;
-
- val = phy_read(phydev, MII_BMSR);
- if (val < 0)
- return val;
-
- if (val & BMSR_ANEGCAPABLE)
- features |= SUPPORTED_Autoneg;
- if (val & BMSR_100FULL)
- features |= SUPPORTED_100baseT_Full;
- if (val & BMSR_100HALF)
- features |= SUPPORTED_100baseT_Half;
- if (val & BMSR_10FULL)
- features |= SUPPORTED_10baseT_Full;
- if (val & BMSR_10HALF)
- features |= SUPPORTED_10baseT_Half;
-
- if (val & BMSR_ESTATEN) {
- val = phy_read(phydev, MII_ESTATUS);
- if (val < 0)
- return val;
-
- if (val & ESTATUS_1000_TFULL)
- features |= SUPPORTED_1000baseT_Full;
- if (val & ESTATUS_1000_THALF)
- features |= SUPPORTED_1000baseT_Half;
- }
- phydev->supported = features;
- phydev->advertising = features;
+ ret = genphy_config_init(phydev);
+ if (ret < 0)
+ return ret;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
ret = phy_write(phydev, AT803X_DEBUG_ADDR,
@@ -219,58 +255,99 @@ static int at803x_config_intr(struct phy_device *phydev)
return err;
}
+static void at803x_link_change_notify(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /*
+ * Conduct a hardware reset for AT8030 every time a link loss is
+ * signalled. This is necessary to circumvent a hardware bug that
+ * occurs when the cable is unplugged while TX packets are pending
+ * in the FIFO. In such cases, the FIFO enters an error mode it
+ * cannot recover from by software.
+ */
+ if (phydev->drv->phy_id == ATH8030_PHY_ID) {
+ if (phydev->state == PHY_NOLINK) {
+ if (priv->gpiod_reset && !priv->phy_reset) {
+ struct at803x_context context;
+
+ at803x_context_save(phydev, &context);
+
+ gpiod_set_value(priv->gpiod_reset, 0);
+ msleep(1);
+ gpiod_set_value(priv->gpiod_reset, 1);
+ msleep(1);
+
+ at803x_context_restore(phydev, &context);
+
+ dev_dbg(&phydev->dev, "%s(): phy was reset\n",
+ __func__);
+ priv->phy_reset = true;
+ }
+ } else {
+ priv->phy_reset = false;
+ }
+ }
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
- .phy_id = 0x004dd072,
- .name = "Atheros 8035 ethernet",
- .phy_id_mask = 0xffffffef,
- .config_init = at803x_config_init,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .driver = {
+ .phy_id = ATH8035_PHY_ID,
+ .name = "Atheros 8035 ethernet",
+ .phy_id_mask = 0xffffffef,
+ .probe = at803x_probe,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = {
.owner = THIS_MODULE,
},
}, {
/* ATHEROS 8030 */
- .phy_id = 0x004dd076,
- .name = "Atheros 8030 ethernet",
- .phy_id_mask = 0xffffffef,
- .config_init = at803x_config_init,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .driver = {
+ .phy_id = ATH8030_PHY_ID,
+ .name = "Atheros 8030 ethernet",
+ .phy_id_mask = 0xffffffef,
+ .probe = at803x_probe,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = {
.owner = THIS_MODULE,
},
}, {
/* ATHEROS 8031 */
- .phy_id = 0x004dd074,
- .name = "Atheros 8031 ethernet",
- .phy_id_mask = 0xffffffef,
- .config_init = at803x_config_init,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .ack_interrupt = &at803x_ack_interrupt,
- .config_intr = &at803x_config_intr,
- .driver = {
+ .phy_id = ATH8031_PHY_ID,
+ .name = "Atheros 8031 ethernet",
+ .phy_id_mask = 0xffffffef,
+ .probe = at803x_probe,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = &at803x_ack_interrupt,
+ .config_intr = &at803x_config_intr,
+ .driver = {
.owner = THIS_MODULE,
},
} };
@@ -283,17 +360,16 @@ static int __init atheros_init(void)
static void __exit atheros_exit(void)
{
- return phy_drivers_unregister(at803x_driver,
- ARRAY_SIZE(at803x_driver));
+ phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
}
module_init(atheros_init);
module_exit(atheros_exit);
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
- { 0x004dd076, 0xffffffef },
- { 0x004dd074, 0xffffffef },
- { 0x004dd072, 0xffffffef },
+ { ATH8030_PHY_ID, 0xffffffef },
+ { ATH8031_PHY_ID, 0xffffffef },
+ { ATH8035_PHY_ID, 0xffffffef },
{ }
};
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index ba55adfc7aae..d60d875cb445 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -21,6 +21,7 @@
#include <linux/phy_fixed.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/of.h>
#define MII_REGS_NUM 29
@@ -31,7 +32,7 @@ struct fixed_mdio_bus {
};
struct fixed_phy {
- int id;
+ int addr;
u16 regs[MII_REGS_NUM];
struct phy_device *phydev;
struct fixed_phy_status status;
@@ -104,8 +105,8 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
if (fp->status.asym_pause)
lpa |= LPA_PAUSE_ASYM;
- fp->regs[MII_PHYSID1] = fp->id >> 16;
- fp->regs[MII_PHYSID2] = fp->id;
+ fp->regs[MII_PHYSID1] = 0;
+ fp->regs[MII_PHYSID2] = 0;
fp->regs[MII_BMSR] = bmsr;
fp->regs[MII_BMCR] = bmcr;
@@ -115,7 +116,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
return 0;
}
-static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
+static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
{
struct fixed_mdio_bus *fmb = bus->priv;
struct fixed_phy *fp;
@@ -124,7 +125,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
return -1;
list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->id == phy_id) {
+ if (fp->addr == phy_addr) {
/* Issue callback if user registered it. */
if (fp->link_update) {
fp->link_update(fp->phydev->attached_dev,
@@ -138,7 +139,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
return 0xFFFF;
}
-static int fixed_mdio_write(struct mii_bus *bus, int phy_id, int reg_num,
+static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
u16 val)
{
return 0;
@@ -160,7 +161,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
return -EINVAL;
list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->id == phydev->phy_id) {
+ if (fp->addr == phydev->addr) {
fp->link_update = link_update;
fp->phydev = phydev;
return 0;
@@ -171,7 +172,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-int fixed_phy_add(unsigned int irq, int phy_id,
+int fixed_phy_add(unsigned int irq, int phy_addr,
struct fixed_phy_status *status)
{
int ret;
@@ -184,9 +185,9 @@ int fixed_phy_add(unsigned int irq, int phy_id,
memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
- fmb->irqs[phy_id] = irq;
+ fmb->irqs[phy_addr] = irq;
- fp->id = phy_id;
+ fp->addr = phy_addr;
fp->status = *status;
ret = fixed_phy_update_regs(fp);
@@ -203,6 +204,66 @@ err_regs:
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
+void fixed_phy_del(int phy_addr)
+{
+ struct fixed_mdio_bus *fmb = &platform_fmb;
+ struct fixed_phy *fp, *tmp;
+
+ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
+ if (fp->addr == phy_addr) {
+ list_del(&fp->node);
+ kfree(fp);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(fixed_phy_del);
+
+static int phy_fixed_addr;
+static DEFINE_SPINLOCK(phy_fixed_addr_lock);
+
+int fixed_phy_register(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct device_node *np)
+{
+ struct fixed_mdio_bus *fmb = &platform_fmb;
+ struct phy_device *phy;
+ int phy_addr;
+ int ret;
+
+ /* Get the next available PHY address, up to PHY_MAX_ADDR */
+ spin_lock(&phy_fixed_addr_lock);
+ if (phy_fixed_addr == PHY_MAX_ADDR) {
+ spin_unlock(&phy_fixed_addr_lock);
+ return -ENOSPC;
+ }
+ phy_addr = phy_fixed_addr++;
+ spin_unlock(&phy_fixed_addr_lock);
+
+ ret = fixed_phy_add(PHY_POLL, phy_addr, status);
+ if (ret < 0)
+ return ret;
+
+ phy = get_phy_device(fmb->mii_bus, phy_addr, false);
+ if (!phy || IS_ERR(phy)) {
+ fixed_phy_del(phy_addr);
+ return -EINVAL;
+ }
+
+ of_node_get(np);
+ phy->dev.of_node = np;
+
+ ret = phy_device_register(phy);
+ if (ret) {
+ phy_device_free(phy);
+ of_node_put(np);
+ fixed_phy_del(phy_addr);
+ return ret;
+ }
+
+ return 0;
+}
+
static int __init fixed_mdio_bus_init(void)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 76f54b32a120..2e58aa54484c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -69,6 +69,73 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
}
EXPORT_SYMBOL(mdiobus_alloc_size);
+static void _devm_mdiobus_free(struct device *dev, void *res)
+{
+ mdiobus_free(*(struct mii_bus **)res);
+}
+
+static int devm_mdiobus_match(struct device *dev, void *res, void *data)
+{
+ struct mii_bus **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+/**
+ * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size()
+ * @dev: Device to allocate mii_bus for
+ * @sizeof_priv: Space to allocate for private structure.
+ *
+ * Managed mdiobus_alloc_size. mii_bus allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an mii_bus allocated with this function needs to be freed separately,
+ * devm_mdiobus_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated mii_bus on success, NULL on failure.
+ */
+struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
+{
+ struct mii_bus **ptr, *bus;
+
+ ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ /* use raw alloc_dr for kmalloc caller tracing */
+ bus = mdiobus_alloc_size(sizeof_priv);
+ if (bus) {
+ *ptr = bus;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return bus;
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size);
+
+/**
+ * devm_mdiobus_free - Resource-managed mdiobus_free()
+ * @dev: Device this mii_bus belongs to
+ * @bus: the mii_bus associated with the device
+ *
+ * Free mii_bus allocated with devm_mdiobus_alloc_size().
+ */
+void devm_mdiobus_free(struct device *dev, struct mii_bus *bus)
+{
+ int rc;
+
+ rc = devres_release(dev, _devm_mdiobus_free,
+ devm_mdiobus_match, bus);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_free);
+
/**
* mdiobus_release - mii_bus device release callback
* @d: the target struct device that contains the mii_bus
@@ -233,6 +300,12 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
if (IS_ERR(phydev) || phydev == NULL)
return phydev;
+ /*
+ * For DT, see if the auto-probed phy has a correspoding child
+ * in the bus node, and set the of_node pointer in this case.
+ */
+ of_mdiobus_link_phydev(bus, phydev);
+
err = phy_device_register(phydev);
if (err) {
phy_device_free(phydev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index d849684231c1..bc7c7d2f75f2 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -283,6 +283,110 @@ static int ksz9021_config_init(struct phy_device *phydev)
return 0;
}
+#define MII_KSZ9031RN_MMD_CTRL_REG 0x0d
+#define MII_KSZ9031RN_MMD_REGDATA_REG 0x0e
+#define OP_DATA 1
+#define KSZ9031_PS_TO_REG 60
+
+/* Extended registers */
+#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
+#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
+#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
+#define MII_KSZ9031RN_CLK_PAD_SKEW 8
+
+static int ksz9031_extended_write(struct phy_device *phydev,
+ u8 mode, u32 dev_addr, u32 regnum, u16 val)
+{
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+ phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+ return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val);
+}
+
+static int ksz9031_extended_read(struct phy_device *phydev,
+ u8 mode, u32 dev_addr, u32 regnum)
+{
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+ phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+ return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG);
+}
+
+static int ksz9031_of_load_skew_values(struct phy_device *phydev,
+ struct device_node *of_node,
+ u16 reg, size_t field_sz,
+ char *field[], u8 numfields)
+{
+ int val[4] = {-1, -2, -3, -4};
+ int matches = 0;
+ u16 mask;
+ u16 maxval;
+ u16 newval;
+ int i;
+
+ for (i = 0; i < numfields; i++)
+ if (!of_property_read_u32(of_node, field[i], val + i))
+ matches++;
+
+ if (!matches)
+ return 0;
+
+ if (matches < numfields)
+ newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+ else
+ newval = 0;
+
+ maxval = (field_sz == 4) ? 0xf : 0x1f;
+ for (i = 0; i < numfields; i++)
+ if (val[i] != -(i + 1)) {
+ mask = 0xffff;
+ mask ^= maxval << (field_sz * i);
+ newval = (newval & mask) |
+ (((val[i] / KSZ9031_PS_TO_REG) & maxval)
+ << (field_sz * i));
+ }
+
+ return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+}
+
+static int ksz9031_config_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+ char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+ char *rx_data_skews[4] = {
+ "rxd0-skew-ps", "rxd1-skew-ps",
+ "rxd2-skew-ps", "rxd3-skew-ps"
+ };
+ char *tx_data_skews[4] = {
+ "txd0-skew-ps", "txd1-skew-ps",
+ "txd2-skew-ps", "txd3-skew-ps"
+ };
+ char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (of_node) {
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CLK_PAD_SKEW, 5,
+ clk_skews, 2);
+
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
+ control_skews, 2);
+
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
+ rx_data_skews, 4);
+
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
+ tx_data_skews, 4);
+ }
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
@@ -469,7 +573,7 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ksz9031_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3bc079a67a3d..f7c61812ea4a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -720,6 +720,9 @@ void phy_state_machine(struct work_struct *work)
mutex_lock(&phydev->lock);
+ if (phydev->drv->link_change_notify)
+ phydev->drv->link_change_notify(phydev);
+
switch (phydev->state) {
case PHY_DOWN:
case PHY_STARTING:
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4987a1c6dc52..35d753d22f78 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -33,6 +33,7 @@
#include <linux/mdio.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <asm/irq.h>
@@ -1067,14 +1068,11 @@ int genphy_soft_reset(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_soft_reset);
-static int genphy_config_init(struct phy_device *phydev)
+int genphy_config_init(struct phy_device *phydev)
{
int val;
u32 features;
- /* For now, I'll claim that the generic driver supports
- * all possible port types
- */
features = (SUPPORTED_TP | SUPPORTED_MII
| SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_BNC);
@@ -1107,8 +1105,8 @@ static int genphy_config_init(struct phy_device *phydev)
features |= SUPPORTED_1000baseT_Half;
}
- phydev->supported = features;
- phydev->advertising = features;
+ phydev->supported &= features;
+ phydev->advertising &= features;
return 0;
}
@@ -1118,6 +1116,7 @@ static int gen10g_soft_reset(struct phy_device *phydev)
/* Do nothing for now */
return 0;
}
+EXPORT_SYMBOL(genphy_config_init);
static int gen10g_config_init(struct phy_device *phydev)
{
@@ -1168,6 +1167,38 @@ static int gen10g_resume(struct phy_device *phydev)
return 0;
}
+static void of_set_phy_supported(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->dev.of_node;
+ u32 max_speed;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return;
+
+ if (!node)
+ return;
+
+ if (!of_property_read_u32(node, "max-speed", &max_speed)) {
+ /* The default values for phydev->supported are provided by the PHY
+ * driver "features" member, we want to reset to sane defaults fist
+ * before supporting higher speeds.
+ */
+ phydev->supported &= PHY_DEFAULT_FEATURES;
+
+ switch (max_speed) {
+ default:
+ return;
+
+ case SPEED_1000:
+ phydev->supported |= PHY_1000BT_FEATURES;
+ case SPEED_100:
+ phydev->supported |= PHY_100BT_FEATURES;
+ case SPEED_10:
+ phydev->supported |= PHY_10BT_FEATURES;
+ }
+ }
+}
+
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@@ -1202,7 +1233,8 @@ static int phy_probe(struct device *dev)
* or both of these values
*/
phydev->supported = phydrv->features;
- phydev->advertising = phydrv->features;
+ of_set_phy_supported(phydev);
+ phydev->advertising = phydev->supported;
/* Set the state to READY by default */
phydev->state = PHY_READY;
@@ -1295,7 +1327,9 @@ static struct phy_driver genphy_driver[] = {
.name = "Generic PHY",
.soft_reset = genphy_soft_reset,
.config_init = genphy_config_init,
- .features = 0,
+ .features = PHY_GBIT_FEATURES | SUPPORTED_MII |
+ SUPPORTED_AUI | SUPPORTED_FIBRE |
+ SUPPORTED_BNC,
.config_aneg = genphy_config_aneg,
.aneg_done = genphy_aneg_done,
.read_status = genphy_read_status,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index fa1d69a38ccf..45483fdfbe06 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -64,65 +64,51 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
return err;
}
-/* RTL8201CP */
-static struct phy_driver rtl8201cp_driver = {
- .phy_id = 0x00008201,
- .name = "RTL8201CP Ethernet",
- .phy_id_mask = 0x0000ffff,
- .features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
- .driver = { .owner = THIS_MODULE,},
-};
-
-/* RTL8211B */
-static struct phy_driver rtl8211b_driver = {
- .phy_id = 0x001cc912,
- .name = "RTL8211B Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
- .ack_interrupt = &rtl821x_ack_interrupt,
- .config_intr = &rtl8211b_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-/* RTL8211E */
-static struct phy_driver rtl8211e_driver = {
- .phy_id = 0x001cc915,
- .name = "RTL8211E Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
- .ack_interrupt = &rtl821x_ack_interrupt,
- .config_intr = &rtl8211e_config_intr,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- .driver = { .owner = THIS_MODULE,},
+static struct phy_driver realtek_drvs[] = {
+ {
+ .phy_id = 0x00008201,
+ .name = "RTL8201CP Ethernet",
+ .phy_id_mask = 0x0000ffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .driver = { .owner = THIS_MODULE,},
+ }, {
+ .phy_id = 0x001cc912,
+ .name = "RTL8211B Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl821x_ack_interrupt,
+ .config_intr = &rtl8211b_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+ }, {
+ .phy_id = 0x001cc915,
+ .name = "RTL8211E Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl821x_ack_interrupt,
+ .config_intr = &rtl8211e_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+ },
};
static int __init realtek_init(void)
{
- int ret;
-
- ret = phy_driver_register(&rtl8201cp_driver);
- if (ret < 0)
- return -ENODEV;
- ret = phy_driver_register(&rtl8211b_driver);
- if (ret < 0)
- return -ENODEV;
- return phy_driver_register(&rtl8211e_driver);
+ return phy_drivers_register(realtek_drvs, ARRAY_SIZE(realtek_drvs));
}
static void __exit realtek_exit(void)
{
- phy_driver_unregister(&rtl8211b_driver);
- phy_driver_unregister(&rtl8211e_driver);
+ phy_drivers_unregister(realtek_drvs, ARRAY_SIZE(realtek_drvs));
}
module_init(realtek_init);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 11f34813e23f..180c49479c42 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -249,8 +249,7 @@ static int __init smsc_init(void)
static void __exit smsc_exit(void)
{
- return phy_drivers_unregister(smsc_phy_driver,
- ARRAY_SIZE(smsc_phy_driver));
+ phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
}
MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 14372c65a7e8..5dc0935da99c 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -319,8 +319,7 @@ static int __init vsc82xx_init(void)
static void __exit vsc82xx_exit(void)
{
- return phy_drivers_unregister(vsc82xx_driver,
- ARRAY_SIZE(vsc82xx_driver));
+ phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
}
module_init(vsc82xx_init);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e3923ebb693f..91d6c1272fcf 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -757,7 +757,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = get_filter(argp, &code);
if (err >= 0) {
- struct sock_fprog fprog = {
+ struct sock_fprog_kern fprog = {
.len = err,
.filter = code,
};
@@ -778,7 +778,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = get_filter(argp, &code);
if (err >= 0) {
- struct sock_fprog fprog = {
+ struct sock_fprog_kern fprog = {
.len = err,
.filter = code,
};
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 01805319e1e0..1aff970be33e 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
nf_reset(skb);
skb->ip_summed = CHECKSUM_NONE;
- ip_select_ident(skb, &rt->dst, NULL);
+ ip_select_ident(skb, NULL);
ip_send_check(iph);
ip_local_out(skb);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index a8497183ff8b..dac7a0d9bb46 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -494,7 +494,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
ndev->mtu = RIO_MAX_MSG_SIZE - 14;
ndev->features = NETIF_F_LLTX;
SET_NETDEV_DEV(ndev, &mport->dev);
- SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
+ ndev->ethtool_ops = &rionet_ethtool_ops;
spin_lock_init(&rnet->lock);
spin_lock_init(&rnet->tx_lock);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index ad4a94e9ff57..87526443841f 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -83,6 +83,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "slip.h"
#ifdef CONFIG_INET
#include <linux/ip.h>
@@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
#endif
}
-/*
- * Called by the driver when there's room for more data. If we have
- * more packets to send, we send them here.
- */
-static void slip_write_wakeup(struct tty_struct *tty)
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slip_transmit(struct work_struct *work)
{
+ struct slip *sl = container_of(work, struct slip, tx_work);
int actual;
- struct slip *sl = tty->disc_data;
+ spin_lock_bh(&sl->lock);
/* First make sure we're connected. */
- if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
+ if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
+ spin_unlock_bh(&sl->lock);
return;
+ }
- spin_lock_bh(&sl->lock);
if (sl->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->dev->stats.tx_packets++;
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
spin_unlock_bh(&sl->lock);
sl_unlock(sl);
return;
}
- actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+ actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
sl->xleft -= actual;
sl->xhead += actual;
spin_unlock_bh(&sl->lock);
}
+/*
+ * Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slip_write_wakeup(struct tty_struct *tty)
+{
+ struct slip *sl = tty->disc_data;
+
+ schedule_work(&sl->tx_work);
+}
+
static void sl_tx_timeout(struct net_device *dev)
{
struct slip *sl = netdev_priv(dev);
@@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
sl->magic = SLIP_MAGIC;
sl->dev = dev;
spin_lock_init(&sl->lock);
+ INIT_WORK(&sl->tx_work, slip_transmit);
sl->mode = SL_MODE_DEFAULT;
#ifdef CONFIG_SLIP_SMART
/* initialize timer_list struct */
@@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
return;
+ spin_lock_bh(&sl->lock);
tty->disc_data = NULL;
sl->tty = NULL;
+ spin_unlock_bh(&sl->lock);
+
+ flush_work(&sl->tx_work);
/* VSV = very important to remove timers */
#ifdef CONFIG_SLIP_SMART
diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
index 67673cf1266b..cf32aadf508f 100644
--- a/drivers/net/slip/slip.h
+++ b/drivers/net/slip/slip.h
@@ -53,6 +53,7 @@ struct slip {
struct tty_struct *tty; /* ptr to TTY structure */
struct net_device *dev; /* easy for intr handling */
spinlock_t lock;
+ struct work_struct tx_work; /* Flushes transmit buffer */
#ifdef SL_INCLUDE_CSLIP
struct slcompress *slcomp; /* for header compression */
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ce4989be86d9..b4958c7ffa84 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -968,7 +968,7 @@ static void team_port_disable(struct team *team,
static void __team_compute_features(struct team *team)
{
struct team_port *port;
- u32 vlan_features = TEAM_VLAN_FEATURES;
+ u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index dbde3412ee5e..a58dfebb5512 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -49,7 +49,7 @@ struct lb_port_mapping {
struct lb_priv_ex {
struct team *team;
struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
- struct sock_fprog *orig_fprog;
+ struct sock_fprog_kern *orig_fprog;
struct {
unsigned int refresh_interval; /* in tenths of second */
struct delayed_work refresh_dw;
@@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
return 0;
}
-static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
+static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
const void *data)
{
- struct sock_fprog *fprog;
+ struct sock_fprog_kern *fprog;
struct sock_filter *filter = (struct sock_filter *) data;
if (data_len % sizeof(struct sock_filter))
return -EINVAL;
- fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
+ fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
if (!fprog)
return -ENOMEM;
fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
return 0;
}
-static void __fprog_destroy(struct sock_fprog *fprog)
+static void __fprog_destroy(struct sock_fprog_kern *fprog)
{
kfree(fprog->filter);
kfree(fprog);
@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
struct lb_priv *lb_priv = get_lb_priv(team);
struct sk_filter *fp = NULL;
struct sk_filter *orig_fp;
- struct sock_fprog *fprog = NULL;
+ struct sock_fprog_kern *fprog = NULL;
int err;
if (ctx->data.bin_val.len) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ee328ba101e7..98bad1fb1bfb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -498,12 +498,12 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
- wake_up_all(&tfile->wq.wait);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
--tun->numqueues;
}
list_for_each_entry(tfile, &tun->disabled, next) {
- wake_up_all(&tfile->wq.wait);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
}
BUG_ON(tun->numqueues != 0);
@@ -807,8 +807,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
- POLLRDNORM | POLLRDBAND);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
rcu_read_unlock();
return NETDEV_TX_OK;
@@ -965,7 +964,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
- poll_wait(file, &tfile->wq.wait, wait);
+ poll_wait(file, sk_sleep(sk), wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -1330,47 +1329,26 @@ done:
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
const struct iovec *iv, ssize_t len, int noblock)
{
- DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
ssize_t ret = 0;
+ int peeked, err, off = 0;
tun_debug(KERN_INFO, tun, "tun_do_read\n");
- if (unlikely(!noblock))
- add_wait_queue(&tfile->wq.wait, &wait);
- while (len) {
- if (unlikely(!noblock))
- current->state = TASK_INTERRUPTIBLE;
+ if (!len)
+ return ret;
- /* Read frames from the queue */
- if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
- if (noblock) {
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- if (tun->dev->reg_state != NETREG_REGISTERED) {
- ret = -EIO;
- break;
- }
-
- /* Nothing to read, let's sleep */
- schedule();
- continue;
- }
+ if (tun->dev->reg_state != NETREG_REGISTERED)
+ return -EIO;
+ /* Read frames from queue */
+ skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
+ &peeked, &off, &err);
+ if (skb) {
ret = tun_put_user(tun, tfile, skb, iv, len);
kfree_skb(skb);
- break;
- }
-
- if (unlikely(!noblock)) {
- current->state = TASK_RUNNING;
- remove_wait_queue(&tfile->wq.wait, &wait);
- }
+ } else
+ ret = err;
return ret;
}
@@ -2199,8 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->flags = 0;
tfile->ifindex = 0;
- rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
init_waitqueue_head(&tfile->wq.wait);
+ RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 630caf48f63a..8cfc3bb0c6a6 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -793,7 +793,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
netdev->netdev_ops = &catc_netdev_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
catc->usbdev = usbdev;
catc->netdev = netdev;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 2e025ddcef21..5ee7a1dbc023 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -24,13 +24,21 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
+/* alternative VLAN for IP session 0 if not untagged */
+#define MBIM_IPS0_VID 4094
+
/* driver specific data - must match cdc_ncm usage */
struct cdc_mbim_state {
struct cdc_ncm_ctx *ctx;
atomic_t pmcount;
struct usb_driver *subdriver;
- struct usb_interface *control;
- struct usb_interface *data;
+ unsigned long _unused;
+ unsigned long flags;
+};
+
+/* flags for the cdc_mbim_state.flags field */
+enum cdc_mbim_flags {
+ FLAG_IPS0_VLAN = 1 << 0, /* IP session 0 is tagged */
};
/* using a counter to merge subdriver requests with our own into a combined state */
@@ -62,16 +70,91 @@ static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
return cdc_mbim_manage_power(dev, status);
}
+static int cdc_mbim_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct cdc_mbim_state *info = (void *)&dev->data;
+
+ /* creation of this VLAN is a request to tag IP session 0 */
+ if (vid == MBIM_IPS0_VID)
+ info->flags |= FLAG_IPS0_VLAN;
+ else
+ if (vid >= 512) /* we don't map these to MBIM session */
+ return -EINVAL;
+ return 0;
+}
+
+static int cdc_mbim_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct cdc_mbim_state *info = (void *)&dev->data;
+
+ /* this is a request for an untagged IP session 0 */
+ if (vid == MBIM_IPS0_VID)
+ info->flags &= ~FLAG_IPS0_VLAN;
+ return 0;
+}
+
+static const struct net_device_ops cdc_mbim_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cdc_mbim_rx_kill_vid,
+};
+
+/* Change the control interface altsetting and update the .driver_info
+ * pointer if the matching entry after changing class codes points to
+ * a different struct
+ */
+static int cdc_mbim_set_ctrlalt(struct usbnet *dev, struct usb_interface *intf, u8 alt)
+{
+ struct usb_driver *driver = to_usb_driver(intf->dev.driver);
+ const struct usb_device_id *id;
+ struct driver_info *info;
+ int ret;
+
+ ret = usb_set_interface(dev->udev,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ alt);
+ if (ret)
+ return ret;
+
+ id = usb_match_id(intf, driver->id_table);
+ if (!id)
+ return -ENODEV;
+
+ info = (struct driver_info *)id->driver_info;
+ if (info != dev->driver_info) {
+ dev_dbg(&intf->dev, "driver_info updated to '%s'\n",
+ info->description);
+ dev->driver_info = info;
+ }
+ return 0;
+}
static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct cdc_ncm_ctx *ctx;
struct usb_driver *subdriver = ERR_PTR(-ENODEV);
int ret = -ENODEV;
- u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf);
+ u8 data_altsetting = 1;
struct cdc_mbim_state *info = (void *)&dev->data;
- /* Probably NCM, defer for cdc_ncm_bind */
+ /* should we change control altsetting on a NCM/MBIM function? */
+ if (cdc_ncm_select_altsetting(intf) == CDC_NCM_COMM_ALTSETTING_MBIM) {
+ data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
+ ret = cdc_mbim_set_ctrlalt(dev, intf, CDC_NCM_COMM_ALTSETTING_MBIM);
+ if (ret)
+ goto err;
+ ret = -ENODEV;
+ }
+
+ /* we will hit this for NCM/MBIM functions if prefer_mbim is false */
if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
goto err;
@@ -101,7 +184,10 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->flags |= IFF_NOARP;
/* no need to put the VLAN tci in the packet headers */
- dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ /* monitor VLAN additions and removals */
+ dev->net->netdev_ops = &cdc_mbim_netdev_ops;
err:
return ret;
}
@@ -164,12 +250,24 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
skb_pull(skb, ETH_HLEN);
}
+ /* Is IP session <0> tagged too? */
+ if (info->flags & FLAG_IPS0_VLAN) {
+ /* drop all untagged packets */
+ if (!tci)
+ goto error;
+ /* map MBIM_IPS0_VID to IPS<0> */
+ if (tci == MBIM_IPS0_VID)
+ tci = 0;
+ }
+
/* mapping VLANs to MBIM sessions:
- * no tag => IPS session <0>
+ * no tag => IPS session <0> if !FLAG_IPS0_VLAN
* 1 - 255 => IPS session <vlanid>
* 256 - 511 => DSS session <vlanid - 256>
- * 512 - 4095 => unsupported, drop
+ * 512 - 4093 => unsupported, drop
+ * 4094 => IPS session <0> if FLAG_IPS0_VLAN
*/
+
switch (tci & 0x0f00) {
case 0x0000: /* VLAN ID 0 - 255 */
if (!is_ip)
@@ -178,6 +276,8 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
c[3] = tci;
break;
case 0x0100: /* VLAN ID 256 - 511 */
+ if (is_ip)
+ goto error;
sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
c = (u8 *)&sign;
c[3] = tci;
@@ -223,8 +323,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
/* need to send the NA on the VLAN dev, if any */
rcu_read_lock();
if (tci) {
- netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
- tci);
+ netdev = __vlan_find_dev_deep_rcu(dev->net, htons(ETH_P_8021Q),
+ tci);
if (!netdev) {
rcu_read_unlock();
return;
@@ -268,7 +368,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
__be16 proto = htons(ETH_P_802_3);
struct sk_buff *skb = NULL;
- if (tci < 256) { /* IPS session? */
+ if (tci < 256 || tci == MBIM_IPS0_VID) { /* IPS session? */
if (len < sizeof(struct iphdr))
goto err;
@@ -320,6 +420,7 @@ static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
struct usb_cdc_ncm_dpe16 *dpe16;
int ndpoffset;
int loopcount = 50; /* arbitrary max preventing infinite loop */
+ u32 payload = 0;
u8 *c;
u16 tci;
@@ -338,6 +439,9 @@ next_ndp:
case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
c = (u8 *)&ndp16->dwSignature;
tci = c[3];
+ /* tag IPS<0> packets too if MBIM_IPS0_VID exists */
+ if (!tci && info->flags & FLAG_IPS0_VLAN)
+ tci = MBIM_IPS0_VID;
break;
case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
c = (u8 *)&ndp16->dwSignature;
@@ -379,6 +483,7 @@ next_ndp:
if (!skb)
goto error;
usbnet_skb_return(dev, skb);
+ payload += len; /* count payload bytes in this NTB */
}
}
err_ndp:
@@ -387,6 +492,10 @@ err_ndp:
if (ndpoffset && loopcount--)
goto next_ndp;
+ /* update stats */
+ ctx->rx_overhead += skb_in->len - payload;
+ ctx->rx_ntbs++;
+
return 1;
error:
return 0;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 9a2bd11943eb..80a844e0ae03 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -65,19 +65,384 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
static struct usb_driver cdc_ncm_driver;
-static int cdc_ncm_setup(struct usbnet *dev)
+struct cdc_ncm_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define CDC_NCM_STAT(str, m) { \
+ .stat_string = str, \
+ .sizeof_stat = sizeof(((struct cdc_ncm_ctx *)0)->m), \
+ .stat_offset = offsetof(struct cdc_ncm_ctx, m) }
+#define CDC_NCM_SIMPLE_STAT(m) CDC_NCM_STAT(__stringify(m), m)
+
+static const struct cdc_ncm_stats cdc_ncm_gstrings_stats[] = {
+ CDC_NCM_SIMPLE_STAT(tx_reason_ntb_full),
+ CDC_NCM_SIMPLE_STAT(tx_reason_ndp_full),
+ CDC_NCM_SIMPLE_STAT(tx_reason_timeout),
+ CDC_NCM_SIMPLE_STAT(tx_reason_max_datagram),
+ CDC_NCM_SIMPLE_STAT(tx_overhead),
+ CDC_NCM_SIMPLE_STAT(tx_ntbs),
+ CDC_NCM_SIMPLE_STAT(rx_overhead),
+ CDC_NCM_SIMPLE_STAT(rx_ntbs),
+};
+
+static int cdc_ncm_get_sset_count(struct net_device __always_unused *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(cdc_ncm_gstrings_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cdc_ncm_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ int i;
+ char *p = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
+ p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset;
+ data[i] = (cdc_ncm_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
+ memcpy(p, cdc_ncm_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
+
+static const struct ethtool_ops cdc_ncm_ethtool_ops = {
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .get_link = usbnet_get_link,
+ .nway_reset = usbnet_nway_reset,
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_sset_count = cdc_ncm_get_sset_count,
+ .get_strings = cdc_ncm_get_strings,
+ .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
+};
+
+static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u32 val, max, min;
+
+ /* clamp new_rx to sane values */
+ min = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+ max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_RX, le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
+
+ /* dwNtbInMaxSize spec violation? Use MIN size for both limits */
+ if (max < min) {
+ dev_warn(&dev->intf->dev, "dwNtbInMaxSize=%u is too small. Using %u\n",
+ le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize), min);
+ max = min;
+ }
+
+ val = clamp_t(u32, new_rx, min, max);
+ if (val != new_rx)
+ dev_dbg(&dev->intf->dev, "rx_max must be in the [%u, %u] range\n", min, max);
+
+ return val;
+}
+
+static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u32 val, max, min;
+
+ /* clamp new_tx to sane values */
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+
+ /* some devices set dwNtbOutMaxSize too low for the above default */
+ min = min(min, max);
+
+ val = clamp_t(u32, new_tx, min, max);
+ if (val != new_tx)
+ dev_dbg(&dev->intf->dev, "tx_max must be in the [%u, %u] range\n", min, max);
+
+ return val;
+}
+
+static ssize_t cdc_ncm_show_min_tx_pkt(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ return sprintf(buf, "%u\n", ctx->min_tx_pkt);
+}
+
+static ssize_t cdc_ncm_show_rx_max(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ return sprintf(buf, "%u\n", ctx->rx_max);
+}
+
+static ssize_t cdc_ncm_show_tx_max(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ return sprintf(buf, "%u\n", ctx->tx_max);
+}
+
+static ssize_t cdc_ncm_show_tx_timer_usecs(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ return sprintf(buf, "%u\n", ctx->timer_interval / (u32)NSEC_PER_USEC);
+}
+
+static ssize_t cdc_ncm_store_min_tx_pkt(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ unsigned long val;
+
+ /* no need to restrict values - anything from 0 to infinity is OK */
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ ctx->min_tx_pkt = val;
+ return len;
+}
+
+static ssize_t cdc_ncm_store_rx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) || cdc_ncm_check_rx_max(dev, val) != val)
+ return -EINVAL;
+
+ cdc_ncm_update_rxtx_max(dev, val, ctx->tx_max);
+ return len;
+}
+
+static ssize_t cdc_ncm_store_tx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) || cdc_ncm_check_tx_max(dev, val) != val)
+ return -EINVAL;
+
+ cdc_ncm_update_rxtx_max(dev, ctx->rx_max, val);
+ return len;
+}
+
+static ssize_t cdc_ncm_store_tx_timer_usecs(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ ssize_t ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val && (val < CDC_NCM_TIMER_INTERVAL_MIN || val > CDC_NCM_TIMER_INTERVAL_MAX))
+ return -EINVAL;
+
+ spin_lock_bh(&ctx->mtx);
+ ctx->timer_interval = val * NSEC_PER_USEC;
+ if (!ctx->timer_interval)
+ ctx->tx_timer_pending = 0;
+ spin_unlock_bh(&ctx->mtx);
+ return len;
+}
+
+static DEVICE_ATTR(min_tx_pkt, S_IRUGO | S_IWUSR, cdc_ncm_show_min_tx_pkt, cdc_ncm_store_min_tx_pkt);
+static DEVICE_ATTR(rx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_rx_max, cdc_ncm_store_rx_max);
+static DEVICE_ATTR(tx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max);
+static DEVICE_ATTR(tx_timer_usecs, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs);
+
+#define NCM_PARM_ATTR(name, format, tocpu) \
+static ssize_t cdc_ncm_show_##name(struct device *d, struct device_attribute *attr, char *buf) \
+{ \
+ struct usbnet *dev = netdev_priv(to_net_dev(d)); \
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; \
+ return sprintf(buf, format "\n", tocpu(ctx->ncm_parm.name)); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, cdc_ncm_show_##name, NULL)
+
+NCM_PARM_ATTR(bmNtbFormatsSupported, "0x%04x", le16_to_cpu);
+NCM_PARM_ATTR(dwNtbInMaxSize, "%u", le32_to_cpu);
+NCM_PARM_ATTR(wNdpInDivisor, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNdpInPayloadRemainder, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNdpInAlignment, "%u", le16_to_cpu);
+NCM_PARM_ATTR(dwNtbOutMaxSize, "%u", le32_to_cpu);
+NCM_PARM_ATTR(wNdpOutDivisor, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNdpOutPayloadRemainder, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNdpOutAlignment, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNtbOutMaxDatagrams, "%u", le16_to_cpu);
+
+static struct attribute *cdc_ncm_sysfs_attrs[] = {
+ &dev_attr_min_tx_pkt.attr,
+ &dev_attr_rx_max.attr,
+ &dev_attr_tx_max.attr,
+ &dev_attr_tx_timer_usecs.attr,
+ &dev_attr_bmNtbFormatsSupported.attr,
+ &dev_attr_dwNtbInMaxSize.attr,
+ &dev_attr_wNdpInDivisor.attr,
+ &dev_attr_wNdpInPayloadRemainder.attr,
+ &dev_attr_wNdpInAlignment.attr,
+ &dev_attr_dwNtbOutMaxSize.attr,
+ &dev_attr_wNdpOutDivisor.attr,
+ &dev_attr_wNdpOutPayloadRemainder.attr,
+ &dev_attr_wNdpOutAlignment.attr,
+ &dev_attr_wNtbOutMaxDatagrams.attr,
+ NULL,
+};
+
+static struct attribute_group cdc_ncm_sysfs_attr_group = {
+ .name = "cdc_ncm",
+ .attrs = cdc_ncm_sysfs_attrs,
+};
+
+/* handle rx_max and tx_max changes */
+static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
u32 val;
- u8 flags;
- u8 iface_no;
- int err;
- int eth_hlen;
- u16 mbim_mtu;
- u16 ntb_fmt_supported;
- __le16 max_datagram_size;
- iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+ val = cdc_ncm_check_rx_max(dev, new_rx);
+
+ /* inform device about NTB input size changes */
+ if (val != ctx->rx_max) {
+ __le32 dwNtbInMaxSize = cpu_to_le32(val);
+
+ dev_info(&dev->intf->dev, "setting rx_max = %u\n", val);
+
+ /* tell device to use new size */
+ if (usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &dwNtbInMaxSize, 4) < 0)
+ dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
+ else
+ ctx->rx_max = val;
+ }
+
+ /* usbnet use these values for sizing rx queues */
+ if (dev->rx_urb_size != ctx->rx_max) {
+ dev->rx_urb_size = ctx->rx_max;
+ if (netif_running(dev->net))
+ usbnet_unlink_rx_urbs(dev);
+ }
+
+ val = cdc_ncm_check_tx_max(dev, new_tx);
+ if (val != ctx->tx_max)
+ dev_info(&dev->intf->dev, "setting tx_max = %u\n", val);
+
+ /* Adding a pad byte here if necessary simplifies the handling
+ * in cdc_ncm_fill_tx_frame, making tx_max always represent
+ * the real skb max size.
+ *
+ * We cannot use dev->maxpacket here because this is called from
+ * .bind which is called before usbnet sets up dev->maxpacket
+ */
+ if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
+ val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ val++;
+
+ /* we might need to flush any pending tx buffers if running */
+ if (netif_running(dev->net) && val > ctx->tx_max) {
+ netif_tx_lock_bh(dev->net);
+ usbnet_start_xmit(NULL, dev->net);
+ /* make sure tx_curr_skb is reallocated if it was empty */
+ if (ctx->tx_curr_skb) {
+ dev_kfree_skb_any(ctx->tx_curr_skb);
+ ctx->tx_curr_skb = NULL;
+ }
+ ctx->tx_max = val;
+ netif_tx_unlock_bh(dev->net);
+ } else {
+ ctx->tx_max = val;
+ }
+
+ dev->hard_mtu = ctx->tx_max;
+
+ /* max qlen depend on hard_mtu and rx_urb_size */
+ usbnet_update_max_qlen(dev);
+
+ /* never pad more than 3 full USB packets per transfer */
+ ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
+ CDC_NCM_MIN_TX_PKT, ctx->tx_max);
+}
+
+/* helpers for NCM and MBIM differences */
+static u8 cdc_ncm_flags(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
+ return ctx->mbim_desc->bmNetworkCapabilities;
+ if (ctx->func_desc)
+ return ctx->func_desc->bmNetworkCapabilities;
+ return 0;
+}
+
+static int cdc_ncm_eth_hlen(struct usbnet *dev)
+{
+ if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
+ return 0;
+ return ETH_HLEN;
+}
+
+static u32 cdc_ncm_min_dgram_size(struct usbnet *dev)
+{
+ if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
+ return CDC_MBIM_MIN_DATAGRAM_SIZE;
+ return CDC_NCM_MIN_DATAGRAM_SIZE;
+}
+
+static u32 cdc_ncm_max_dgram_size(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
+ return le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+ if (ctx->ether_desc)
+ return le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+ return CDC_NCM_MAX_DATAGRAM_SIZE;
+}
+
+/* initial one-time device setup. MUST be called with the data interface
+ * in altsetting 0
+ */
+static int cdc_ncm_init(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+ int err;
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
@@ -89,7 +454,36 @@ static int cdc_ncm_setup(struct usbnet *dev)
return err; /* GET_NTB_PARAMETERS is required */
}
- /* read correct set of parameters according to device mode */
+ /* set CRC Mode */
+ if (cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_CRC_MODE) {
+ dev_dbg(&dev->intf->dev, "Setting CRC mode off\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_CRC_NOT_APPENDED,
+ iface_no, NULL, 0);
+ if (err < 0)
+ dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
+ }
+
+ /* set NTB format, if both formats are supported.
+ *
+ * "The host shall only send this command while the NCM Data
+ * Interface is in alternate setting 0."
+ */
+ if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
+ USB_CDC_NCM_NTB32_SUPPORTED) {
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+ if (err < 0)
+ dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
+ }
+
+ /* set initial device values */
ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
@@ -97,72 +491,79 @@ static int cdc_ncm_setup(struct usbnet *dev)
ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
- ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
-
- /* there are some minor differences in NCM and MBIM defaults */
- if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
- if (!ctx->mbim_desc)
- return -EINVAL;
- eth_hlen = 0;
- flags = ctx->mbim_desc->bmNetworkCapabilities;
- ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
- if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
- } else {
- if (!ctx->func_desc)
- return -EINVAL;
- eth_hlen = ETH_HLEN;
- flags = ctx->func_desc->bmNetworkCapabilities;
- ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
- if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
- }
-
- /* common absolute max for NCM and MBIM */
- if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
dev_dbg(&dev->intf->dev,
"dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
- ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
+ ctx->tx_ndp_modulus, ctx->tx_max_datagrams, cdc_ncm_flags(dev));
/* max count of tx datagrams */
if ((ctx->tx_max_datagrams == 0) ||
(ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
- /* verify maximum size of received NTB in bytes */
- if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
- dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
- USB_CDC_NCM_NTB_MIN_IN_SIZE);
- ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
- }
+ /* set up maximum NDP size */
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
- if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
- dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_RX);
- ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
- }
+ /* initial coalescing timer interval */
+ ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
- /* inform device about NTB input size changes */
- if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
- __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+ return 0;
+}
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- 0, iface_no, &dwNtbInMaxSize, 4);
- if (err < 0)
- dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
+/* set a new max datagram size */
+static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+ __le16 max_datagram_size;
+ u16 mbim_mtu;
+ int err;
+
+ /* set default based on descriptors */
+ ctx->max_datagram_size = clamp_t(u32, new_size,
+ cdc_ncm_min_dgram_size(dev),
+ CDC_NCM_MAX_DATAGRAM_SIZE);
+
+ /* inform the device about the selected Max Datagram Size? */
+ if (!(cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
+ goto out;
+
+ /* read current mtu value from device */
+ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0) {
+ dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+ goto out;
}
- /* verify maximum size of transmitted NTB in bytes */
- if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) {
- dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_TX);
- ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+ if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
+ goto out;
+
+ max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0)
+ dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+
+out:
+ /* set MTU to max supported by the device if necessary */
+ dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev));
+
+ /* do not exceed operater preferred MTU */
+ if (ctx->mbim_extended_desc) {
+ mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
+ if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
+ dev->net->mtu = mbim_mtu;
}
+}
+
+static void cdc_ncm_fix_modulus(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u32 val;
/*
* verify that the structure alignment is:
@@ -199,68 +600,31 @@ static int cdc_ncm_setup(struct usbnet *dev)
}
/* adjust TX-remainder according to NCM specification. */
- ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) &
+ ctx->tx_remainder = ((ctx->tx_remainder - cdc_ncm_eth_hlen(dev)) &
(ctx->tx_modulus - 1));
+}
- /* additional configuration */
-
- /* set CRC Mode */
- if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
- err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_CRC_NOT_APPENDED,
- iface_no, NULL, 0);
- if (err < 0)
- dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
- }
-
- /* set NTB format, if both formats are supported */
- if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
- if (err < 0)
- dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
- }
-
- /* inform the device about the selected Max Datagram Size */
- if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
- goto out;
-
- /* read current mtu value from device */
- err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
- if (err < 0) {
- dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
- goto out;
- }
-
- if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
- goto out;
+static int cdc_ncm_setup(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u32 def_rx, def_tx;
- max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
- err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
- if (err < 0)
- dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+ /* be conservative when selecting intial buffer size to
+ * increase the number of hosts this will work for
+ */
+ def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX,
+ le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
+ def_tx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_TX,
+ le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
-out:
- /* set MTU to max supported by the device if necessary */
- if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
- dev->net->mtu = ctx->max_datagram_size - eth_hlen;
+ /* clamp rx_max and tx_max and inform device */
+ cdc_ncm_update_rxtx_max(dev, def_rx, def_tx);
- /* do not exceed operater preferred MTU */
- if (ctx->mbim_extended_desc) {
- mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
- if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
- dev->net->mtu = mbim_mtu;
- }
+ /* sanitize the modulus and remainder values */
+ cdc_ncm_fix_modulus(dev);
+ /* set max datagram size */
+ cdc_ncm_set_dgram_size(dev, cdc_ncm_max_dgram_size(dev));
return 0;
}
@@ -424,10 +788,21 @@ advance:
}
/* check if we got everything */
- if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) {
- dev_dbg(&intf->dev, "CDC descriptors missing\n");
+ if (!ctx->data) {
+ dev_dbg(&intf->dev, "CDC Union missing and no IAD found\n");
goto error;
}
+ if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) {
+ if (!ctx->mbim_desc) {
+ dev_dbg(&intf->dev, "MBIM functional descriptor missing\n");
+ goto error;
+ }
+ } else {
+ if (!ctx->ether_desc || !ctx->func_desc) {
+ dev_dbg(&intf->dev, "NCM or ECM functional descriptors missing\n");
+ goto error;
+ }
+ }
/* claim data interface, if different from control */
if (ctx->data != ctx->control) {
@@ -447,8 +822,8 @@ advance:
goto error2;
}
- /* initialize data interface */
- if (cdc_ncm_setup(dev))
+ /* initialize basic device settings */
+ if (cdc_ncm_init(dev))
goto error2;
/* configure data interface */
@@ -477,18 +852,14 @@ advance:
dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
}
- /* usbnet use these values for sizing tx/rx queues */
- dev->hard_mtu = ctx->tx_max;
- dev->rx_urb_size = ctx->rx_max;
+ /* finish setting up the device specific data */
+ cdc_ncm_setup(dev);
- /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
- * outside the sane range. Adding a pad byte here if necessary
- * simplifies the handling in cdc_ncm_fill_tx_frame, making
- * tx_max always represent the real skb max size.
- */
- if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
- ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
- ctx->tx_max++;
+ /* override ethtool_ops */
+ dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+
+ /* add our sysfs attrs */
+ dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
return 0;
@@ -541,10 +912,10 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
-/* Select the MBIM altsetting iff it is preferred and available,
- * returning the number of the corresponding data interface altsetting
+/* Return the number of the MBIM control interface altsetting iff it
+ * is preferred and available,
*/
-u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
+u8 cdc_ncm_select_altsetting(struct usb_interface *intf)
{
struct usb_host_interface *alt;
@@ -563,15 +934,15 @@ u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
* the rules given in section 6 (USB Device Model) of this
* specification."
*/
- if (prefer_mbim && intf->num_altsetting == 2) {
+ if (intf->num_altsetting < 2)
+ return intf->cur_altsetting->desc.bAlternateSetting;
+
+ if (prefer_mbim) {
alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
- if (alt && cdc_ncm_comm_intf_is_mbim(alt) &&
- !usb_set_interface(dev->udev,
- intf->cur_altsetting->desc.bInterfaceNumber,
- CDC_NCM_COMM_ALTSETTING_MBIM))
- return CDC_NCM_DATA_ALTSETTING_MBIM;
+ if (alt && cdc_ncm_comm_intf_is_mbim(alt))
+ return CDC_NCM_COMM_ALTSETTING_MBIM;
}
- return CDC_NCM_DATA_ALTSETTING_NCM;
+ return CDC_NCM_COMM_ALTSETTING_NCM;
}
EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
@@ -580,12 +951,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
/* MBIM backwards compatible function? */
- cdc_ncm_select_altsetting(dev, intf);
- if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
return -ENODEV;
- /* NCM data altsetting is always 1 */
- ret = cdc_ncm_bind_common(dev, intf, 1);
+ /* The NCM data altsetting is fixed */
+ ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
/*
* We should get an event when network connection is "connected" or
@@ -628,7 +998,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
/* verify that there is room for the NDP and the datagram (reserve) */
- if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE)
+ if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
return NULL;
/* link to it */
@@ -638,7 +1008,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
nth16->wNdpIndex = cpu_to_le16(skb->len);
/* push a new empty NDP */
- ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE);
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
ndp16->dwSignature = sign;
ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
return ndp16;
@@ -683,6 +1053,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* count total number of frames in this NTB */
ctx->tx_curr_frame_num = 0;
+
+ /* recent payload counter for this skb_out */
+ ctx->tx_curr_frame_payload = 0;
}
for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
@@ -720,6 +1093,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
ctx->tx_rem_sign = sign;
skb = NULL;
ready2send = 1;
+ ctx->tx_reason_ntb_full++; /* count reason for transmitting */
}
break;
}
@@ -733,12 +1107,14 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
+ ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
dev_kfree_skb_any(skb);
skb = NULL;
/* send now if this NDP is full */
if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
ready2send = 1;
+ ctx->tx_reason_ndp_full++; /* count reason for transmitting */
break;
}
}
@@ -758,7 +1134,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
ctx->tx_curr_skb = skb_out;
goto exit_no_skb;
- } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
+ } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) {
/* wait for more frames */
/* push variables */
ctx->tx_curr_skb = skb_out;
@@ -768,11 +1144,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
goto exit_no_skb;
} else {
+ if (n == ctx->tx_max_datagrams)
+ ctx->tx_reason_max_datagram++; /* count reason for transmitting */
/* frame goes out */
/* variables will be reset at next call */
}
- /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT
+ /* If collected data size is less or equal ctx->min_tx_pkt
* bytes, we send buffers as it is. If we get more data, it
* would be more efficient for USB HS mobile device with DMA
* engine to receive a full size NTB, than canceling DMA
@@ -782,7 +1160,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* a ZLP after full sized NTBs.
*/
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
- skb_out->len > CDC_NCM_MIN_TX_PKT)
+ skb_out->len > ctx->min_tx_pkt)
memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
ctx->tx_max - skb_out->len);
else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
@@ -795,11 +1173,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* return skb */
ctx->tx_curr_skb = NULL;
dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+
+ /* keep private stats: framing overhead and number of NTBs */
+ ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
+ ctx->tx_ntbs++;
+
+ /* usbnet has already counted all the framing overhead.
+ * Adjust the stats so that the tx_bytes counter show real
+ * payload data instead.
+ */
+ dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
+
return skb_out;
exit_no_skb:
- /* Start timer, if there is a remaining skb */
- if (ctx->tx_curr_skb != NULL)
+ /* Start timer, if there is a remaining non-empty skb */
+ if (ctx->tx_curr_skb != NULL && n > 0)
cdc_ncm_tx_timeout_start(ctx);
return NULL;
}
@@ -810,7 +1199,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
/* start timer, if not already started */
if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
hrtimer_start(&ctx->tx_timer,
- ktime_set(0, CDC_NCM_TIMER_INTERVAL),
+ ktime_set(0, ctx->timer_interval),
HRTIMER_MODE_REL);
}
@@ -835,6 +1224,7 @@ static void cdc_ncm_txpath_bh(unsigned long param)
cdc_ncm_tx_timeout_start(ctx);
spin_unlock_bh(&ctx->mtx);
} else if (dev->net != NULL) {
+ ctx->tx_reason_timeout++; /* count reason for transmitting */
spin_unlock_bh(&ctx->mtx);
netif_tx_lock_bh(dev->net);
usbnet_start_xmit(NULL, dev->net);
@@ -970,6 +1360,7 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
struct usb_cdc_ncm_dpe16 *dpe16;
int ndpoffset;
int loopcount = 50; /* arbitrary max preventing infinite loop */
+ u32 payload = 0;
ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
if (ndpoffset < 0)
@@ -1015,13 +1406,13 @@ next_ndp:
break;
} else {
- skb = skb_clone(skb_in, GFP_ATOMIC);
+ /* create a fresh copy to reduce truesize */
+ skb = netdev_alloc_skb_ip_align(dev->net, len);
if (!skb)
goto error;
- skb->len = len;
- skb->data = ((u8 *)skb_in->data) + offset;
- skb_set_tail_pointer(skb, len);
+ memcpy(skb_put(skb, len), skb_in->data + offset, len);
usbnet_skb_return(dev, skb);
+ payload += len; /* count payload bytes in this NTB */
}
}
err_ndp:
@@ -1030,6 +1421,10 @@ err_ndp:
if (ndpoffset && loopcount--)
goto next_ndp;
+ /* update stats */
+ ctx->rx_overhead += skb_in->len - payload;
+ ctx->rx_ntbs++;
+
return 1;
error:
return 0;
@@ -1049,14 +1444,14 @@ cdc_ncm_speed_change(struct usbnet *dev,
*/
if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
netif_info(dev, link, dev->net,
- "%u mbit/s downlink %u mbit/s uplink\n",
- (unsigned int)(rx_speed / 1000000U),
- (unsigned int)(tx_speed / 1000000U));
+ "%u mbit/s downlink %u mbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000000U),
+ (unsigned int)(tx_speed / 1000000U));
} else {
netif_info(dev, link, dev->net,
- "%u kbit/s downlink %u kbit/s uplink\n",
- (unsigned int)(rx_speed / 1000U),
- (unsigned int)(tx_speed / 1000U));
+ "%u kbit/s downlink %u kbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000U),
+ (unsigned int)(tx_speed / 1000U));
}
}
@@ -1086,11 +1481,10 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- ctx->connected = le16_to_cpu(event->wValue);
netif_info(dev, link, dev->net,
"network connection: %sconnected\n",
- ctx->connected ? "" : "dis");
- usbnet_link_change(dev, ctx->connected, 0);
+ !!event->wValue ? "" : "dis");
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1110,23 +1504,11 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
}
}
-static int cdc_ncm_check_connect(struct usbnet *dev)
-{
- struct cdc_ncm_ctx *ctx;
-
- ctx = (struct cdc_ncm_ctx *)dev->data[0];
- if (ctx == NULL)
- return 1; /* disconnected */
-
- return !ctx->connected;
-}
-
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
- .check_connect = cdc_ncm_check_connect,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
@@ -1140,7 +1522,6 @@ static const struct driver_info wwan_info = {
| FLAG_WWAN,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
- .check_connect = cdc_ncm_check_connect,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
@@ -1154,7 +1535,6 @@ static const struct driver_info wwan_noarp_info = {
| FLAG_WWAN | FLAG_NOARP,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
- .check_connect = cdc_ncm_check_connect,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 660bd5ea9fc0..a3a05869309d 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2425,7 +2425,7 @@ static void hso_net_init(struct net_device *net)
net->type = ARPHRD_NONE;
net->mtu = DEFAULT_MTU - 14;
net->tx_queue_len = 10;
- SET_ETHTOOL_OPS(net, &ops);
+ net->ethtool_ops = &ops;
/* and initialize the semaphore */
spin_lock_init(&hso_net->net_lock);
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 312178d7b698..5d95a13dbe2a 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -84,12 +84,13 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
ctx = drvstate->ctx;
if (usbnet_dev->status)
- /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
- * decimal (0x100)"
+ /* The wMaxCommand buffer must be big enough to hold
+ * any message from the modem. Experience has shown
+ * that some replies are more than 256 bytes long
*/
subdriver = usb_cdc_wdm_register(ctx->control,
&usbnet_dev->status->desc,
- 256, /* wMaxCommand */
+ 1024, /* wMaxCommand */
huawei_cdc_ncm_wdm_manage_power);
if (IS_ERR(subdriver)) {
ret = PTR_ERR(subdriver);
@@ -172,24 +173,11 @@ err:
return ret;
}
-static int huawei_cdc_ncm_check_connect(struct usbnet *usbnet_dev)
-{
- struct cdc_ncm_ctx *ctx;
-
- ctx = (struct cdc_ncm_ctx *)usbnet_dev->data[0];
-
- if (ctx == NULL)
- return 1; /* disconnected */
-
- return !ctx->connected;
-}
-
static const struct driver_info huawei_cdc_ncm_info = {
.description = "Huawei CDC NCM device",
.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
.bind = huawei_cdc_ncm_bind,
.unbind = huawei_cdc_ncm_unbind,
- .check_connect = huawei_cdc_ncm_check_connect,
.manage_power = huawei_cdc_ncm_manage_power,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 973275fef250..76465b117b72 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -534,7 +534,7 @@ static int ipheth_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(netdev, &intf->dev);
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
retval = register_netdev(netdev);
if (retval) {
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index a359d3bb7c5b..dcb6d33141e0 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1171,7 +1171,7 @@ err_fw:
netdev->netdev_ops = &kaweth_netdev_ops;
netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
/* kaweth is zeroed as part of alloc_netdev */
INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 03e8a15d7deb..f84080215915 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1159,7 +1159,7 @@ static int pegasus_probe(struct usb_interface *intf,
net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
net->netdev_ops = &pegasus_netdev_ops;
- SET_ETHTOOL_OPS(net, &ops);
+ net->ethtool_ops = &ops;
pegasus->mii.dev = net;
pegasus->mii.mdio_read = mdio_read;
pegasus->mii.mdio_write = mdio_write;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index dc4bf06948c7..cf62d7e8329f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -763,7 +763,12 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
- {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)}, /* Olivetti Olicard 155 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 3fbfb0869030..25431965a625 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -630,12 +630,10 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
int ret;
void *tmp;
- tmp = kmalloc(size, GFP_KERNEL);
+ tmp = kmemdup(data, size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
- memcpy(tmp, data, size);
-
ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
value, index, tmp, size, 500);
@@ -3452,7 +3450,7 @@ static int rtl8152_probe(struct usb_interface *intf,
NETIF_F_TSO | NETIF_F_FRAGLIST |
NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
tp->mii.dev = netdev;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index da2c4583bd2d..6e87e5710048 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -878,7 +878,7 @@ static int rtl8150_probe(struct usb_interface *intf,
dev->netdev = netdev;
netdev->netdev_ops = &rtl8150_netdev_ops;
netdev->watchdog_timeo = RTL8150_TX_TIMEOUT;
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
dev->intr_interval = 100; /* 100ms */
if (!alloc_all_urbs(dev)) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8a852b5f215f..7d9f84a91f37 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1646,7 +1646,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
- SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
+ dev->ethtool_ops = &virtnet_ethtool_ops;
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
@@ -1724,6 +1724,13 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
+ if (vi->any_header_sg) {
+ if (vi->mergeable_rx_bufs)
+ dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ dev->needed_headroom = sizeof(struct virtio_net_hdr);
+ }
+
/* Use single tx/rx queue pair as default */
vi->curr_queue_pairs = 1;
vi->max_queue_pairs = max_queue_pairs;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 97394345e5dd..b76f7dcde0db 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2589,8 +2589,8 @@ vmxnet3_open(struct net_device *netdev)
for (i = 0; i < adapter->num_tx_queues; i++)
spin_lock_init(&adapter->tx_queue[i].tx_lock);
- err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
- VMXNET3_DEF_RX_RING_SIZE,
+ err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
+ adapter->rx_ring_size,
VMXNET3_DEF_RX_RING_SIZE);
if (err)
goto queue_err;
@@ -2968,6 +2968,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->netdev = netdev;
adapter->pdev = pdev;
+ adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
+ adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+
spin_lock_init(&adapter->cmd_lock);
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
sizeof(struct vmxnet3_adapter),
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 600ab56c0008..b725fd9e7803 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -431,8 +431,8 @@ vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
}
@@ -449,8 +449,8 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_mini_max_pending = 0;
param->rx_jumbo_max_pending = 0;
- param->rx_pending = adapter->rx_queue[0].rx_ring[0].size;
- param->tx_pending = adapter->tx_queue[0].tx_ring.size;
+ param->rx_pending = adapter->rx_ring_size;
+ param->tx_pending = adapter->tx_ring_size;
param->rx_mini_pending = 0;
param->rx_jumbo_pending = 0;
}
@@ -529,9 +529,11 @@ vmxnet3_set_ringparam(struct net_device *netdev,
* size */
netdev_err(netdev, "failed to apply new sizes, "
"try the default ones\n");
+ new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+ new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
err = vmxnet3_create_queues(adapter,
- VMXNET3_DEF_TX_RING_SIZE,
- VMXNET3_DEF_RX_RING_SIZE,
+ new_tx_ring_size,
+ new_rx_ring_size,
VMXNET3_DEF_RX_RING_SIZE);
if (err) {
netdev_err(netdev, "failed to create queues "
@@ -545,6 +547,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
netdev_err(netdev, "failed to re-activate, error %d."
" Closing it\n", err);
}
+ adapter->tx_ring_size = new_tx_ring_size;
+ adapter->rx_ring_size = new_rx_ring_size;
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
@@ -579,7 +583,7 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev)
}
static int
-vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
+vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
@@ -592,7 +596,7 @@ vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
}
static int
-vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p)
+vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key)
{
unsigned int i;
unsigned long flags;
@@ -628,12 +632,12 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_rxnfc = vmxnet3_get_rxnfc,
#ifdef VMXNET3_RSS
.get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
- .get_rxfh_indir = vmxnet3_get_rss_indir,
- .set_rxfh_indir = vmxnet3_set_rss_indir,
+ .get_rxfh = vmxnet3_get_rss,
+ .set_rxfh = vmxnet3_set_rss,
#endif
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
+ netdev->ethtool_ops = &vmxnet3_ethtool_ops;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 190569d02450..29ee77f2c97f 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -349,6 +349,11 @@ struct vmxnet3_adapter {
u32 link_speed; /* in mbps */
u64 tx_timeout_count;
+
+ /* Ring sizes */
+ u32 tx_ring_size;
+ u32 rx_ring_size;
+
struct work_struct work;
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 4dbb2ed85b97..ade33ef82823 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -127,6 +127,7 @@ struct vxlan_dev {
struct list_head next; /* vxlan's per namespace list */
struct vxlan_sock *vn_sock; /* listening socket */
struct net_device *dev;
+ struct net *net; /* netns for packet i/o */
struct vxlan_rdst default_dst; /* default destination */
union vxlan_addr saddr; /* source address */
__be16 dst_port;
@@ -134,7 +135,7 @@ struct vxlan_dev {
__u16 port_max;
__u8 tos; /* TOS override */
__u8 ttl;
- u32 flags; /* VXLAN_F_* below */
+ u32 flags; /* VXLAN_F_* in vxlan.h */
struct work_struct sock_work;
struct work_struct igmp_join;
@@ -149,13 +150,6 @@ struct vxlan_dev {
struct hlist_head fdb_head[FDB_HASH_SIZE];
};
-#define VXLAN_F_LEARN 0x01
-#define VXLAN_F_PROXY 0x02
-#define VXLAN_F_RSC 0x04
-#define VXLAN_F_L2MISS 0x08
-#define VXLAN_F_L3MISS 0x10
-#define VXLAN_F_IPV6 0x20 /* internal flag */
-
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
static struct workqueue_struct *vxlan_wq;
@@ -571,6 +565,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
goto out;
}
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+ skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
off_eth = skb_gro_offset(skb);
hlen = off_eth + sizeof(*eh);
@@ -605,6 +600,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
}
skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
+ skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
pp = ptype->callbacks.gro_receive(head, skb);
out_unlock:
@@ -1160,15 +1156,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!vs)
goto drop;
- /* If the NIC driver gave us an encapsulated packet
- * with the encapsulation mark, the device checksummed it
- * for us. Otherwise force the upper layers to verify it.
- */
- if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) ||
- !skb->encapsulation)
- skb->ip_summed = CHECKSUM_NONE;
-
- skb->encapsulation = 0;
+ skb_pop_rcv_encapsulation(skb);
vs->rcv(vs, skb, vxh->vx_vni);
return 0;
@@ -1203,7 +1191,9 @@ static void vxlan_rcv(struct vxlan_sock *vs,
remote_ip = &vxlan->default_dst.remote_ip;
skb_reset_mac_header(skb);
+ skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
skb->protocol = eth_type_trans(skb, vxlan->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
/* Ignore packet loops (and multicast echo) */
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
@@ -1599,18 +1589,11 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(vxlan_src_port);
-static int handle_offloads(struct sk_buff *skb)
+static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb,
+ bool udp_csum)
{
- if (skb_is_gso(skb)) {
- int err = skb_unclone(skb, GFP_ATOMIC);
- if (unlikely(err))
- return err;
-
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- } else if (skb->ip_summed != CHECKSUM_PARTIAL)
- skb->ip_summed = CHECKSUM_NONE;
-
- return 0;
+ int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ return iptunnel_handle_offloads(skb, udp_csum, type);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -1618,7 +1601,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
struct dst_entry *dst, struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr, __u8 prio, __u8 ttl,
- __be16 src_port, __be16 dst_port, __be32 vni)
+ __be16 src_port, __be16 dst_port, __be32 vni,
+ bool xnet)
{
struct ipv6hdr *ip6h;
struct vxlanhdr *vxh;
@@ -1626,12 +1610,11 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
int min_headroom;
int err;
- if (!skb->encapsulation) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
+ skb = vxlan_handle_offloads(skb, !udp_get_no_check6_tx(vs->sock->sk));
+ if (IS_ERR(skb))
+ return -EINVAL;
- skb_scrub_packet(skb, false);
+ skb_scrub_packet(skb, xnet);
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ VXLAN_HLEN + sizeof(struct ipv6hdr)
@@ -1663,27 +1646,14 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
uh->source = src_port;
uh->len = htons(skb->len);
- uh->check = 0;
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
skb_dst_set(skb, dst);
- if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) {
- __wsum csum = skb_checksum(skb, 0, skb->len, 0);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- uh->check = csum_ipv6_magic(saddr, daddr, skb->len,
- IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_ipv6_magic(saddr, daddr,
- skb->len, IPPROTO_UDP, 0);
- }
+ udp6_set_csum(udp_get_no_check6_tx(vs->sock->sk), skb,
+ saddr, daddr, skb->len);
__skb_push(skb, sizeof(*ip6h));
skb_reset_network_header(skb);
@@ -1699,10 +1669,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
ip6h->daddr = *daddr;
ip6h->saddr = *saddr;
- err = handle_offloads(skb);
- if (err)
- return err;
-
ip6tunnel_xmit(skb, dev);
return 0;
}
@@ -1711,17 +1677,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
int vxlan_xmit_skb(struct vxlan_sock *vs,
struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, __be32 vni)
+ __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
{
struct vxlanhdr *vxh;
struct udphdr *uh;
int min_headroom;
int err;
- if (!skb->encapsulation) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
+ skb = vxlan_handle_offloads(skb, !vs->sock->sk->sk_no_check_tx);
+ if (IS_ERR(skb))
+ return -EINVAL;
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
@@ -1753,14 +1718,12 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
uh->source = src_port;
uh->len = htons(skb->len);
- uh->check = 0;
- err = handle_offloads(skb);
- if (err)
- return err;
+ udp_set_csum(vs->sock->sk->sk_no_check_tx, skb,
+ src, dst, skb->len);
return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
- tos, ttl, df, false);
+ tos, ttl, df, xnet);
}
EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
@@ -1853,7 +1816,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
fl4.daddr = dst->sin.sin_addr.s_addr;
fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
- rt = ip_route_output_key(dev_net(dev), &fl4);
+ rt = ip_route_output_key(vxlan->net, &fl4);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n",
&dst->sin.sin_addr.s_addr);
@@ -1874,7 +1837,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
ip_rt_put(rt);
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1887,7 +1850,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
fl4.saddr, dst->sin.sin_addr.s_addr,
tos, ttl, df, src_port, dst_port,
- htonl(vni << 8));
+ htonl(vni << 8),
+ !net_eq(vxlan->net, dev_net(vxlan->dev)));
if (err < 0)
goto rt_tx_error;
@@ -1927,7 +1891,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
dst_release(ndst);
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1938,7 +1902,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
dev, &fl6.saddr, &fl6.daddr, 0, ttl,
- src_port, dst_port, htonl(vni << 8));
+ src_port, dst_port, htonl(vni << 8),
+ !net_eq(vxlan->net, dev_net(vxlan->dev)));
#endif
}
@@ -2082,7 +2047,7 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
static int vxlan_init(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -2090,7 +2055,7 @@ static int vxlan_init(struct net_device *dev)
return -ENOMEM;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
+ vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
if (vs) {
/* If we have a socket with same port already, reuse it */
atomic_inc(&vs->refcnt);
@@ -2172,8 +2137,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
/* Cleanup timer and forwarding table on shutdown */
static int vxlan_stop(struct net_device *dev)
{
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
@@ -2202,7 +2167,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
struct net_device *lowerdev;
int max_mtu;
- lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+ lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
if (lowerdev == NULL)
return eth_change_mtu(dev, new_mtu);
@@ -2275,9 +2240,9 @@ static void vxlan_setup(struct net_device *dev)
eth_hw_addr_random(dev);
ether_setup(dev);
if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
- dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
+ dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
else
- dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
+ dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
dev->netdev_ops = &vxlan_netdev_ops;
dev->destructor = free_netdev;
@@ -2285,7 +2250,6 @@ static void vxlan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
@@ -2401,7 +2365,7 @@ static void vxlan_del_work(struct work_struct *work)
* could be used for both IPv4 and IPv6 communications, but
* users may set bindv6only=1.
*/
-static struct socket *create_v6_sock(struct net *net, __be16 port)
+static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
{
struct sock *sk;
struct socket *sock;
@@ -2438,18 +2402,25 @@ static struct socket *create_v6_sock(struct net *net, __be16 port)
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
+
+ if (flags & VXLAN_F_UDP_ZERO_CSUM6_TX)
+ udp_set_no_check6_tx(sk, true);
+
+ if (flags & VXLAN_F_UDP_ZERO_CSUM6_RX)
+ udp_set_no_check6_rx(sk, true);
+
return sock;
}
#else
-static struct socket *create_v6_sock(struct net *net, __be16 port)
+static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
{
return ERR_PTR(-EPFNOSUPPORT);
}
#endif
-static struct socket *create_v4_sock(struct net *net, __be16 port)
+static struct socket *create_v4_sock(struct net *net, __be16 port, u32 flags)
{
struct sock *sk;
struct socket *sock;
@@ -2482,18 +2453,24 @@ static struct socket *create_v4_sock(struct net *net, __be16 port)
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
+
+ if (!(flags & VXLAN_F_UDP_CSUM))
+ sock->sk->sk_no_check_tx = 1;
+
return sock;
}
/* Create new listen socket if needed */
static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data, bool ipv6)
+ vxlan_rcv_t *rcv, void *data,
+ u32 flags)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
struct socket *sock;
struct sock *sk;
unsigned int h;
+ bool ipv6 = !!(flags & VXLAN_F_IPV6);
vs = kzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
@@ -2505,9 +2482,9 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
INIT_WORK(&vs->del_work, vxlan_del_work);
if (ipv6)
- sock = create_v6_sock(net, port);
+ sock = create_v6_sock(net, port, flags);
else
- sock = create_v4_sock(net, port);
+ sock = create_v4_sock(net, port, flags);
if (IS_ERR(sock)) {
kfree(vs);
return ERR_CAST(sock);
@@ -2545,12 +2522,12 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
vxlan_rcv_t *rcv, void *data,
- bool no_share, bool ipv6)
+ bool no_share, u32 flags)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
- vs = vxlan_socket_create(net, port, rcv, data, ipv6);
+ vs = vxlan_socket_create(net, port, rcv, data, flags);
if (!IS_ERR(vs))
return vs;
@@ -2578,12 +2555,12 @@ EXPORT_SYMBOL_GPL(vxlan_sock_add);
static void vxlan_sock_work(struct work_struct *work)
{
struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
- struct net *net = dev_net(vxlan->dev);
+ struct net *net = vxlan->net;
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
__be16 port = vxlan->dst_port;
struct vxlan_sock *nvs;
- nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6);
+ nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
spin_lock(&vn->sock_lock);
if (!IS_ERR(nvs))
vxlan_vs_add_dev(nvs, vxlan);
@@ -2605,6 +2582,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (!data[IFLA_VXLAN_ID])
return -EINVAL;
+ vxlan->net = dev_net(dev);
+
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
dst->remote_vni = vni;
@@ -2660,8 +2639,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- /* update header length based on lower device */
- dev->hard_header_len = lowerdev->hard_header_len +
+ dev->needed_headroom = lowerdev->hard_header_len +
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
} else if (use_ipv6)
vxlan->flags |= VXLAN_F_IPV6;
@@ -2705,12 +2683,23 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_VXLAN_PORT])
vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+ if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
+ vxlan->flags |= VXLAN_F_UDP_CSUM;
+
+ if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
+ nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
+ vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+
+ if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
+ nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
+ vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+
if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
pr_info("duplicate VNI %u\n", vni);
return -EEXIST;
}
- SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
+ dev->ethtool_ops = &vxlan_ethtool_ops;
/* create an fdb entry for a valid default destination */
if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
@@ -2739,8 +2728,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
spin_lock(&vn->sock_lock);
if (!hlist_unhashed(&vxlan->hlist))
@@ -2768,7 +2757,10 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
- nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */
+ nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
0;
}
@@ -2828,7 +2820,13 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(vxlan->flags & VXLAN_F_L3MISS)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
- nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port))
+ nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
+ nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
+ !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
+ nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
+ !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
+ !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
@@ -2905,8 +2903,33 @@ static __net_init int vxlan_init_net(struct net *net)
return 0;
}
+static void __net_exit vxlan_exit_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan, *next;
+ struct net_device *dev, *aux;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &vxlan_link_ops)
+ unregister_netdevice_queue(dev, &list);
+
+ list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
+ /* If vxlan->dev is in the same netns, it has already been added
+ * to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(vxlan->dev), net))
+ unregister_netdevice_queue(dev, &list);
+ }
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
+ .exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index bcfff0d62de4..93ace042d0aa 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -26,6 +26,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/if.h>
#include <linux/hdlc.h>
#include <asm/io.h>
@@ -678,7 +679,6 @@ static inline void
fst_cpureset(struct fst_card_info *card)
{
unsigned char interrupt_line_register;
- unsigned long j = jiffies + 1;
unsigned int regval;
if (card->family == FST_FAMILY_TXU) {
@@ -696,16 +696,12 @@ fst_cpureset(struct fst_card_info *card)
/*
* We are delaying here to allow the 9054 to reset itself
*/
- j = jiffies + 1;
- while (jiffies < j)
- /* Do nothing */ ;
+ usleep_range(10, 20);
outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
/*
* We are delaying here to allow the 9054 to reload its eeprom
*/
- j = jiffies + 1;
- while (jiffies < j)
- /* Do nothing */ ;
+ usleep_range(10, 20);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
if (pci_write_config_byte
@@ -886,20 +882,18 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
* Receive a frame through the DMA
*/
static inline void
-fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
- dma_addr_t mem, int len)
+fst_rx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
/*
* This routine will setup the DMA and start it
*/
- dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
- (unsigned long) skb, (unsigned long) mem, len);
+ dbg(DBG_RX, "In fst_rx_dma %x %x %d\n", (u32)dma, mem, len);
if (card->dmarx_in_progress) {
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
}
- outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */
+ outl(dma, card->pci_conf + DMAPADR0); /* Copy to here */
outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
@@ -915,20 +909,19 @@ fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
* Send a frame through the DMA
*/
static inline void
-fst_tx_dma(struct fst_card_info *card, unsigned char *skb,
- unsigned char *mem, int len)
+fst_tx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
/*
* This routine will setup the DMA and start it.
*/
- dbg(DBG_TX, "In fst_tx_dma %p %p %d\n", skb, mem, len);
+ dbg(DBG_TX, "In fst_tx_dma %x %x %d\n", (u32)dma, mem, len);
if (card->dmatx_in_progress) {
dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
}
- outl((unsigned long) skb, card->pci_conf + DMAPADR1); /* Copy from here */
- outl((unsigned long) mem, card->pci_conf + DMALADR1); /* to here */
+ outl(dma, card->pci_conf + DMAPADR1); /* Copy from here */
+ outl(mem, card->pci_conf + DMALADR1); /* to here */
outl(len, card->pci_conf + DMASIZ1); /* for this length */
outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
@@ -1405,9 +1398,7 @@ do_bottom_half_tx(struct fst_card_info *card)
card->dma_len_tx = skb->len;
card->dma_txpos = port->txpos;
fst_tx_dma(card,
- (char *) card->
- tx_dma_handle_card,
- (char *)
+ card->tx_dma_handle_card,
BUF_OFFSET(txBuffer[pi]
[port->txpos][0]),
skb->len);
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index de3bbf43fc5a..cdd45fb8a1f6 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1322,10 +1322,6 @@ NOTE: This is rather a useless action right now, as the
static int sdla_change_mtu(struct net_device *dev, int new_mtu)
{
- struct frad_local *flp;
-
- flp = netdev_priv(dev);
-
if (netif_running(dev))
return -EBUSY;
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 4a01e5c7fe09..4c417903e9be 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -1061,7 +1061,7 @@ int i2400m_firmware_check(struct i2400m *i2400m)
goto error_bad_major;
}
result = 0;
- if (minor < I2400M_HDIv_MINOR_2 && minor > I2400M_HDIv_MINOR)
+ if (minor > I2400M_HDIv_MINOR_2 || minor < I2400M_HDIv_MINOR)
dev_warn(dev, "untested minor fw version %u.%u.%u\n",
major, minor, branch);
/* Yes, we ignore the branch -- we don't have to track it */
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 9c34d2fccfac..9c78090e72f8 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -500,26 +500,23 @@ int i2400m_pm_notifier(struct notifier_block *notifier,
*/
int i2400m_pre_reset(struct i2400m *i2400m)
{
- int result;
struct device *dev = i2400m_dev(i2400m);
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
d_printf(1, dev, "pre-reset shut down\n");
- result = 0;
mutex_lock(&i2400m->init_mutex);
if (i2400m->updown) {
netif_tx_disable(i2400m->wimax_dev.net_dev);
__i2400m_dev_stop(i2400m);
- result = 0;
/* down't set updown to zero -- this way
* post_reset can restore properly */
}
mutex_unlock(&i2400m->init_mutex);
if (i2400m->bus_release)
i2400m->bus_release(i2400m);
- d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
- return result;
+ d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
+ return 0;
}
EXPORT_SYMBOL_GPL(i2400m_pre_reset);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 99b3bfa717d5..d48776e4f343 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -365,15 +365,15 @@ static inline unsigned long at76_get_timeout(struct dfu_status *s)
static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
int manifest_sync_timeout)
{
- u8 *block;
- struct dfu_status dfu_stat_buf;
int ret = 0;
int need_dfu_state = 1;
int is_done = 0;
- u8 dfu_state = 0;
u32 dfu_timeout = 0;
int bsize = 0;
int blockno = 0;
+ struct dfu_status *dfu_stat_buf = NULL;
+ u8 *dfu_state = NULL;
+ u8 *block = NULL;
at76_dbg(DBG_DFU, "%s( %p, %u, %d)", __func__, buf, size,
manifest_sync_timeout);
@@ -383,13 +383,28 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
return -EINVAL;
}
+ dfu_stat_buf = kmalloc(sizeof(struct dfu_status), GFP_KERNEL);
+ if (!dfu_stat_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL);
- if (!block)
- return -ENOMEM;
+ if (!block) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ dfu_state = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!dfu_state) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ *dfu_state = 0;
do {
if (need_dfu_state) {
- ret = at76_dfu_get_state(udev, &dfu_state);
+ ret = at76_dfu_get_state(udev, dfu_state);
if (ret < 0) {
dev_err(&udev->dev,
"cannot get DFU state: %d\n", ret);
@@ -398,13 +413,13 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
need_dfu_state = 0;
}
- switch (dfu_state) {
+ switch (*dfu_state) {
case STATE_DFU_DOWNLOAD_SYNC:
at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_SYNC");
- ret = at76_dfu_get_status(udev, &dfu_stat_buf);
+ ret = at76_dfu_get_status(udev, dfu_stat_buf);
if (ret >= 0) {
- dfu_state = dfu_stat_buf.state;
- dfu_timeout = at76_get_timeout(&dfu_stat_buf);
+ *dfu_state = dfu_stat_buf->state;
+ dfu_timeout = at76_get_timeout(dfu_stat_buf);
need_dfu_state = 0;
} else
dev_err(&udev->dev,
@@ -447,12 +462,12 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
case STATE_DFU_MANIFEST_SYNC:
at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_SYNC");
- ret = at76_dfu_get_status(udev, &dfu_stat_buf);
+ ret = at76_dfu_get_status(udev, dfu_stat_buf);
if (ret < 0)
break;
- dfu_state = dfu_stat_buf.state;
- dfu_timeout = at76_get_timeout(&dfu_stat_buf);
+ *dfu_state = dfu_stat_buf->state;
+ dfu_timeout = at76_get_timeout(dfu_stat_buf);
need_dfu_state = 0;
/* override the timeout from the status response,
@@ -484,14 +499,17 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
break;
default:
- at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", dfu_state);
+ at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", *dfu_state);
ret = -EINVAL;
break;
}
} while (!is_done && (ret >= 0));
exit:
+ kfree(dfu_state);
kfree(block);
+ kfree(dfu_stat_buf);
+
if (ret >= 0)
ret = 0;
@@ -1277,6 +1295,7 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
dev_err(&udev->dev,
"loading %dth firmware block failed: %d\n",
blockno, ret);
+ ret = -EIO;
goto exit;
}
buf += bsize;
@@ -1410,6 +1429,8 @@ static int at76_startup_device(struct at76_priv *priv)
/* remove BSSID from previous run */
memset(priv->bssid, 0, ETH_ALEN);
+ priv->scanning = false;
+
if (at76_set_radio(priv, 1) == 1)
at76_wait_completion(priv, CMD_RADIO_ON);
@@ -1483,6 +1504,52 @@ static void at76_work_submit_rx(struct work_struct *work)
mutex_unlock(&priv->mtx);
}
+/* This is a workaround to make scan working:
+ * currently mac80211 does not process frames with no frequency
+ * information.
+ * However during scan the HW performs a sweep by itself, and we
+ * are unable to know where the radio is actually tuned.
+ * This function tries to do its best to guess this information..
+ * During scan, If the current frame is a beacon or a probe response,
+ * the channel information is extracted from it.
+ * When not scanning, for other frames, or if it happens that for
+ * whatever reason we fail to parse beacons and probe responses, this
+ * function returns the priv->channel information, that should be correct
+ * at least when we are not scanning.
+ */
+static inline int at76_guess_freq(struct at76_priv *priv)
+{
+ size_t el_off;
+ const u8 *el;
+ int channel = priv->channel;
+ int len = priv->rx_skb->len;
+ struct ieee80211_hdr *hdr = (void *)priv->rx_skb->data;
+
+ if (!priv->scanning)
+ goto exit;
+
+ if (len < 24)
+ goto exit;
+
+ if (ieee80211_is_probe_resp(hdr->frame_control)) {
+ el_off = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ el = ((struct ieee80211_mgmt *)hdr)->u.probe_resp.variable;
+ } else if (ieee80211_is_beacon(hdr->frame_control)) {
+ el_off = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ el = ((struct ieee80211_mgmt *)hdr)->u.beacon.variable;
+ } else {
+ goto exit;
+ }
+ len -= el_off;
+
+ el = cfg80211_find_ie(WLAN_EID_DS_PARAMS, el, len);
+ if (el && el[1] > 0)
+ channel = el[2];
+
+exit:
+ return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+}
+
static void at76_rx_tasklet(unsigned long param)
{
struct urb *urb = (struct urb *)param;
@@ -1523,6 +1590,8 @@ static void at76_rx_tasklet(unsigned long param)
rx_status.signal = buf->rssi;
rx_status.flag |= RX_FLAG_DECRYPTED;
rx_status.flag |= RX_FLAG_IV_STRIPPED;
+ rx_status.band = IEEE80211_BAND_2GHZ;
+ rx_status.freq = at76_guess_freq(priv);
at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
priv->rx_skb->len, priv->rx_skb->data_len);
@@ -1875,6 +1944,8 @@ static void at76_dwork_hw_scan(struct work_struct *work)
if (is_valid_ether_addr(priv->bssid))
at76_join(priv);
+ priv->scanning = false;
+
mutex_unlock(&priv->mtx);
ieee80211_scan_completed(priv->hw, false);
@@ -1929,6 +2000,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw,
goto exit;
}
+ priv->scanning = true;
ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan,
SCAN_POLL_INTERVAL);
@@ -2020,6 +2092,44 @@ static void at76_configure_filter(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &priv->work_set_promisc);
}
+static int at76_set_wep(struct at76_priv *priv)
+{
+ int ret = 0;
+ struct mib_mac_wep *mib_data = &priv->mib_buf.data.wep_mib;
+
+ priv->mib_buf.type = MIB_MAC_WEP;
+ priv->mib_buf.size = sizeof(struct mib_mac_wep);
+ priv->mib_buf.index = 0;
+
+ memset(mib_data, 0, sizeof(*mib_data));
+
+ if (priv->wep_enabled) {
+ if (priv->wep_keys_len[priv->wep_key_id] > WEP_SMALL_KEY_LEN)
+ mib_data->encryption_level = 2;
+ else
+ mib_data->encryption_level = 1;
+
+ /* always exclude unencrypted if WEP is active */
+ mib_data->exclude_unencrypted = 1;
+ } else {
+ mib_data->exclude_unencrypted = 0;
+ mib_data->encryption_level = 0;
+ }
+
+ mib_data->privacy_invoked = priv->wep_enabled;
+ mib_data->wep_default_key_id = priv->wep_key_id;
+ memcpy(mib_data->wep_default_keyvalue, priv->wep_keys,
+ sizeof(priv->wep_keys));
+
+ ret = at76_set_mib(priv, &priv->mib_buf);
+
+ if (ret < 0)
+ wiphy_err(priv->hw->wiphy,
+ "set_mib (wep) failed: %d\n", ret);
+
+ return ret;
+}
+
static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -2062,7 +2172,7 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
priv->wep_enabled = 1;
}
- at76_startup_device(priv);
+ at76_set_wep(priv);
mutex_unlock(&priv->mtx);
@@ -2330,16 +2440,22 @@ static int at76_probe(struct usb_interface *interface,
struct usb_device *udev;
int op_mode;
int need_ext_fw = 0;
- struct mib_fw_version fwv;
+ struct mib_fw_version *fwv = NULL;
int board_type = (int)id->driver_info;
udev = usb_get_dev(interface_to_usbdev(interface));
+ fwv = kmalloc(sizeof(*fwv), GFP_KERNEL);
+ if (!fwv) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
/* Load firmware into kernel memory */
fwe = at76_load_firmware(udev, board_type);
if (!fwe) {
ret = -ENOENT;
- goto error;
+ goto exit;
}
op_mode = at76_get_op_mode(udev);
@@ -2353,7 +2469,7 @@ static int at76_probe(struct usb_interface *interface,
dev_err(&interface->dev,
"cannot handle a device in HW_CONFIG_MODE\n");
ret = -EBUSY;
- goto error;
+ goto exit;
}
if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH
@@ -2366,10 +2482,10 @@ static int at76_probe(struct usb_interface *interface,
dev_err(&interface->dev,
"error %d downloading internal firmware\n",
ret);
- goto error;
+ goto exit;
}
usb_put_dev(udev);
- return ret;
+ goto exit;
}
/* Internal firmware already inside the device. Get firmware
@@ -2382,8 +2498,8 @@ static int at76_probe(struct usb_interface *interface,
* query the device for the fw version */
if ((fwe->fw_version.major > 0 || fwe->fw_version.minor >= 100)
|| (op_mode == OPMODE_NORMAL_NIC_WITH_FLASH)) {
- ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv));
- if (ret < 0 || (fwv.major | fwv.minor) == 0)
+ ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
+ if (ret < 0 || (fwv->major | fwv->minor) == 0)
need_ext_fw = 1;
} else
/* No way to check firmware version, reload to be sure */
@@ -2394,37 +2510,37 @@ static int at76_probe(struct usb_interface *interface,
"downloading external firmware\n");
ret = at76_load_external_fw(udev, fwe);
- if (ret)
- goto error;
+ if (ret < 0)
+ goto exit;
/* Re-check firmware version */
- ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv));
+ ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
if (ret < 0) {
dev_err(&interface->dev,
"error %d getting firmware version\n", ret);
- goto error;
+ goto exit;
}
}
priv = at76_alloc_new_device(udev);
if (!priv) {
ret = -ENOMEM;
- goto error;
+ goto exit;
}
usb_set_intfdata(interface, priv);
- memcpy(&priv->fw_version, &fwv, sizeof(struct mib_fw_version));
+ memcpy(&priv->fw_version, fwv, sizeof(struct mib_fw_version));
priv->board_type = board_type;
ret = at76_init_new_device(priv, interface);
if (ret < 0)
at76_delete_device(priv);
- return ret;
-
-error:
- usb_put_dev(udev);
+exit:
+ kfree(fwv);
+ if (ret < 0)
+ usb_put_dev(udev);
return ret;
}
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index f14a65473fe8..55090a38ac95 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -219,18 +219,6 @@ struct at76_req_join {
u8 reserved;
} __packed;
-struct set_mib_buffer {
- u8 type;
- u8 size;
- u8 index;
- u8 reserved;
- union {
- u8 byte;
- __le16 word;
- u8 addr[ETH_ALEN];
- } data;
-} __packed;
-
struct mib_local {
u16 reserved0;
u8 beacon_enable;
@@ -334,6 +322,19 @@ struct mib_mdomain {
u8 channel_list[14]; /* 0 for invalid channels */
} __packed;
+struct set_mib_buffer {
+ u8 type;
+ u8 size;
+ u8 index;
+ u8 reserved;
+ union {
+ u8 byte;
+ __le16 word;
+ u8 addr[ETH_ALEN];
+ struct mib_mac_wep wep_mib;
+ } data;
+} __packed;
+
struct at76_fw_header {
__le32 crc; /* CRC32 of the whole image */
__le32 board_type; /* firmware compatibility code */
@@ -417,6 +418,7 @@ struct at76_priv {
int scan_max_time; /* scan max channel time */
int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */
int scan_need_any; /* if set, need to scan for any ESSID */
+ bool scanning; /* if set, the scan is running */
u16 assoc_id; /* current association ID, if associated */
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 507d9a9ee69a..f92050617ae6 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1090,7 +1090,8 @@ static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ar5523 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index a1f099628850..17d221abd58c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -175,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
return 0;
}
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
{
struct bmi_cmd cmd;
union bmi_resp resp;
@@ -184,7 +184,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
- address, *param);
+ address, param);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
@@ -193,7 +193,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
- cmd.execute.param = __cpu_to_le32(*param);
+ cmd.execute.param = __cpu_to_le32(param);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
@@ -204,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
if (resplen < sizeof(resp.execute)) {
ath10k_warn("invalid execute response length (%d)\n",
resplen);
- return ret;
+ return -EIO;
}
- *param = __le32_to_cpu(resp.execute.result);
+ *result = __le32_to_cpu(resp.execute.result);
+
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 8d81ce1cec21..111ab701465c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -201,7 +201,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
\
addr = host_interest_item_address(HI_ITEM(item)); \
ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
- *val = __le32_to_cpu(tmp); \
+ if (!ret) \
+ *val = __le32_to_cpu(tmp); \
ret; \
})
@@ -217,7 +218,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
ret; \
})
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a79499c82350..d185dc0cd12b 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -329,6 +329,33 @@ exit:
return ret;
}
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_ring *src_ring = pipe->src_ring;
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ar_pci->ce_lock);
+
+ /*
+ * This function must be called only if there is an incomplete
+ * scatter-gather transfer (before index register is updated)
+ * that needs to be cleaned up.
+ */
+ if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
+ return;
+
+ if (WARN_ON_ONCE(src_ring->write_index ==
+ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
+ return;
+
+ src_ring->write_index--;
+ src_ring->write_index &= src_ring->nentries_mask;
+
+ src_ring->per_transfer_context[src_ring->write_index] = NULL;
+}
+
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
@@ -840,35 +867,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_ring *src_ring;
- unsigned int nentries = attr->src_nentries;
- unsigned int ce_nbytes;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
- dma_addr_t base_addr;
- char *ptr;
-
- nentries = roundup_pow_of_two(nentries);
-
- if (ce_state->src_ring) {
- WARN_ON(ce_state->src_ring->nentries != nentries);
- return 0;
- }
-
- ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
- ptr = kzalloc(ce_nbytes, GFP_KERNEL);
- if (ptr == NULL)
- return -ENOMEM;
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
- ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
- src_ring = ce_state->src_ring;
+ nentries = roundup_pow_of_two(attr->src_nentries);
- ptr += sizeof(struct ath10k_ce_ring);
- src_ring->nentries = nentries;
- src_ring->nentries_mask = nentries - 1;
+ memset(src_ring->per_transfer_context, 0,
+ nentries * sizeof(*src_ring->per_transfer_context));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@@ -878,21 +887,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- src_ring->per_transfer_context = (void **)ptr;
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ src_ring->base_addr_ce_space);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+ ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot init ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ memset(dest_ring->per_transfer_context, 0,
+ nentries * sizeof(*dest_ring->per_transfer_context));
+
+ dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->sw_index &= dest_ring->nentries_mask;
+ dest_ring->write_index =
+ ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ dest_ring->write_index &= dest_ring->nentries_mask;
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ dest_ring->base_addr_ce_space);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(sizeof(*src_ring) +
+ (nentries *
+ sizeof(*src_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (src_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
/*
* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
src_ring->base_addr_owner_space_unaligned =
- pci_alloc_consistent(ar_pci->pdev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!src_ring->base_addr_owner_space_unaligned) {
- kfree(ce_state->src_ring);
- ce_state->src_ring = NULL;
- return -ENOMEM;
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
}
src_ring->base_addr_ce_space_unaligned = base_addr;
@@ -912,88 +987,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
if (!src_ring->shadow_base_unaligned) {
- pci_free_consistent(ar_pci->pdev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- src_ring->base_addr_owner_space,
- src_ring->base_addr_ce_space);
- kfree(ce_state->src_ring);
- ce_state->src_ring = NULL;
- return -ENOMEM;
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space,
+ src_ring->base_addr_ce_space);
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
}
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
- src_ring->base_addr_ce_space);
- ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
- ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
- ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
- ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
- ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
-
- ath10k_dbg(ATH10K_DBG_BOOT,
- "boot ce src ring id %d entries %d base_addr %p\n",
- ce_id, nentries, src_ring->base_addr_owner_space);
-
- return 0;
+ return src_ring;
}
-static int ath10k_ce_init_dest_ring(struct ath10k *ar,
- unsigned int ce_id,
- struct ath10k_ce_pipe *ce_state,
- const struct ce_attr *attr)
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *dest_ring;
- unsigned int nentries = attr->dest_nentries;
- unsigned int ce_nbytes;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ u32 nentries;
dma_addr_t base_addr;
- char *ptr;
- nentries = roundup_pow_of_two(nentries);
+ nentries = roundup_pow_of_two(attr->dest_nentries);
- if (ce_state->dest_ring) {
- WARN_ON(ce_state->dest_ring->nentries != nentries);
- return 0;
- }
-
- ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
- ptr = kzalloc(ce_nbytes, GFP_KERNEL);
- if (ptr == NULL)
- return -ENOMEM;
+ dest_ring = kzalloc(sizeof(*dest_ring) +
+ (nentries *
+ sizeof(*dest_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (dest_ring == NULL)
+ return ERR_PTR(-ENOMEM);
- ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
- dest_ring = ce_state->dest_ring;
-
- ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
- dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
- dest_ring->sw_index &= dest_ring->nentries_mask;
- dest_ring->write_index =
- ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
- dest_ring->write_index &= dest_ring->nentries_mask;
-
- dest_ring->per_transfer_context = (void **)ptr;
-
/*
* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
- pci_alloc_consistent(ar_pci->pdev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
- kfree(ce_state->dest_ring);
- ce_state->dest_ring = NULL;
- return -ENOMEM;
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
}
dest_ring->base_addr_ce_space_unaligned = base_addr;
@@ -1012,39 +1053,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
- dest_ring->base_addr_ce_space);
- ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
- ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
- ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
- ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
-
- ath10k_dbg(ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %p\n",
- ce_id, nentries, dest_ring->base_addr_owner_space);
-
- return 0;
-}
-
-static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
-
- spin_lock_bh(&ar_pci->ce_lock);
-
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ctrl_addr;
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
-
- spin_unlock_bh(&ar_pci->ce_lock);
-
- return ce_state;
+ return dest_ring;
}
/*
@@ -1054,11 +1063,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr)
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
{
- struct ath10k_ce_pipe *ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
/*
@@ -1074,64 +1083,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ret = ath10k_pci_wake(ar);
if (ret)
- return NULL;
+ return ret;
- ce_state = ath10k_ce_init_state(ar, ce_id, attr);
- if (!ce_state) {
- ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
- goto out;
- }
+ spin_lock_bh(&ar_pci->ce_lock);
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+ spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) {
- ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+ ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
- ath10k_ce_deinit(ce_state);
- ce_state = NULL;
goto out;
}
}
if (attr->dest_nentries) {
- ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
- ath10k_ce_deinit(ce_state);
- ce_state = NULL;
goto out;
}
}
out:
ath10k_pci_sleep(ar);
- return ce_state;
+ return ret;
}
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
+
+ ath10k_ce_deinit_src_ring(ar, ce_id);
+ ath10k_ce_deinit_dest_ring(ar, ce_id);
+
+ ath10k_pci_sleep(ar);
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ int ret;
+
+ if (attr->src_nentries) {
+ ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
+ if (IS_ERR(ce_state->src_ring)) {
+ ret = PTR_ERR(ce_state->src_ring);
+ ath10k_err("failed to allocate copy engine source ring %d: %d\n",
+ ce_id, ret);
+ ce_state->src_ring = NULL;
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
+ attr);
+ if (IS_ERR(ce_state->dest_ring)) {
+ ret = PTR_ERR(ce_state->dest_ring);
+ ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
+ ce_id, ret);
+ ce_state->dest_ring = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
- struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
- pci_free_consistent(ar_pci->pdev,
- (ce_state->src_ring->nentries *
- sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- ce_state->src_ring->base_addr_owner_space,
- ce_state->src_ring->base_addr_ce_space);
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
kfree(ce_state->src_ring);
}
if (ce_state->dest_ring) {
- pci_free_consistent(ar_pci->pdev,
- (ce_state->dest_ring->nentries *
- sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- ce_state->dest_ring->base_addr_owner_space,
- ce_state->dest_ring->base_addr_ce_space);
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 8eb7f99ed992..7a5a36fc59c1 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -104,7 +104,8 @@ struct ath10k_ce_ring {
void *shadow_base_unaligned;
struct ce_desc *shadow_base;
- void **per_transfer_context;
+ /* keep last */
+ void *per_transfer_context[0];
};
struct ath10k_ce_pipe {
@@ -159,6 +160,8 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
unsigned int transfer_id,
unsigned int flags);
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
+
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
@@ -210,10 +213,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Engine Initialization=======================*/
-/* Initialize an instance of a CE */
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr);
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
/*
@@ -236,8 +241,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp,
unsigned int *transfer_idp);
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
-
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ebc5fc2ede75..82017f56e661 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -58,36 +58,6 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
complete(&ar->target_suspend);
}
-static int ath10k_init_connect_htc(struct ath10k *ar)
-{
- int status;
-
- status = ath10k_wmi_connect_htc_service(ar);
- if (status)
- goto conn_fail;
-
- /* Start HTC */
- status = ath10k_htc_start(&ar->htc);
- if (status)
- goto conn_fail;
-
- /* Wait for WMI event to be ready */
- status = ath10k_wmi_wait_for_service_ready(ar);
- if (status <= 0) {
- ath10k_warn("wmi service ready event not received");
- status = -ETIMEDOUT;
- goto timeout;
- }
-
- ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
- return 0;
-
-timeout:
- ath10k_htc_stop(&ar->htc);
-conn_fail:
- return status;
-}
-
static int ath10k_init_configure_target(struct ath10k *ar)
{
u32 param_host;
@@ -249,30 +219,40 @@ exit:
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
- u32 address = ar->hw_params.patch_load_addr;
- u32 exec_param;
+ u32 result, address = ar->hw_params.patch_load_addr;
int ret;
/* OTP is optional */
- if (!ar->otp_data || !ar->otp_len)
+ if (!ar->otp_data || !ar->otp_len) {
+ ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+ ar->otp_data, ar->otp_len);
return 0;
+ }
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+ address, ar->otp_len);
ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
- goto exit;
+ return ret;
}
- exec_param = 0;
- ret = ath10k_bmi_execute(ar, address, &exec_param);
+ ret = ath10k_bmi_execute(ar, address, 0, &result);
if (ret) {
ath10k_err("could not execute otp (%d)\n", ret);
- goto exit;
+ return ret;
}
-exit:
- return ret;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+ if (result != 0) {
+ ath10k_err("otp calibration failed: %d", result);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int ath10k_download_fw(struct ath10k *ar)
@@ -389,8 +369,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
/* first fetch the firmware file (firmware-*.bin) */
ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
if (IS_ERR(ar->firmware)) {
- ath10k_err("Could not fetch firmware file '%s': %ld\n",
- name, PTR_ERR(ar->firmware));
+ ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
+ ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
return PTR_ERR(ar->firmware);
}
@@ -401,14 +381,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) {
- ath10k_err("firmware image too small to contain magic: %zu\n",
- len);
+ ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
+ ar->hw_params.fw.dir, name, len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
- ath10k_err("Invalid firmware magic\n");
+ ath10k_err("invalid firmware magic\n");
ret = -EINVAL;
goto err;
}
@@ -430,7 +410,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
data += sizeof(*hdr);
if (len < ie_len) {
- ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+ ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
ret = -EINVAL;
goto err;
@@ -513,8 +493,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
}
if (!ar->firmware_data || !ar->firmware_len) {
- ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
- name);
+ ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+ ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
goto err;
}
@@ -531,7 +511,9 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
- ath10k_err("could not fetch board data (%d)\n", ret);
+ ath10k_err("could not fetch board data '%s/%s' (%d)\n",
+ ar->hw_params.fw.dir, ar->hw_params.fw.board,
+ ret);
goto err;
}
@@ -549,19 +531,21 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
+ ar->fw_api = 2;
+ ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
- if (ret == 0) {
- ar->fw_api = 2;
- goto out;
- }
+ if (ret == 0)
+ goto success;
+
+ ar->fw_api = 1;
+ ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_1(ar);
if (ret)
return ret;
- ar->fw_api = 1;
-
-out:
+success:
ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
return 0;
@@ -572,16 +556,22 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
int ret;
ret = ath10k_download_board_data(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to download board data: %d\n", ret);
return ret;
+ }
ret = ath10k_download_and_run_otp(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to run otp: %d\n", ret);
return ret;
+ }
ret = ath10k_download_fw(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to download firmware: %d\n", ret);
return ret;
+ }
return ret;
}
@@ -660,8 +650,9 @@ static void ath10k_core_restart(struct work_struct *work)
switch (ar->state) {
case ATH10K_STATE_ON:
- ath10k_halt(ar);
ar->state = ATH10K_STATE_RESTARTING;
+ del_timer_sync(&ar->scan.timeout);
+ ath10k_reset_scan((unsigned long)ar);
ieee80211_restart_hw(ar->hw);
break;
case ATH10K_STATE_OFF:
@@ -670,6 +661,8 @@ static void ath10k_core_restart(struct work_struct *work)
ath10k_warn("cannot restart a device that hasn't been started\n");
break;
case ATH10K_STATE_RESTARTING:
+ /* hw restart might be requested from multiple places */
+ break;
case ATH10K_STATE_RESTARTED:
ar->state = ATH10K_STATE_WEDGED;
/* fall through */
@@ -681,70 +674,6 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex);
}
-struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
- const struct ath10k_hif_ops *hif_ops)
-{
- struct ath10k *ar;
-
- ar = ath10k_mac_create();
- if (!ar)
- return NULL;
-
- ar->ath_common.priv = ar;
- ar->ath_common.hw = ar->hw;
-
- ar->p2p = !!ath10k_p2p;
- ar->dev = dev;
-
- ar->hif.priv = hif_priv;
- ar->hif.ops = hif_ops;
-
- init_completion(&ar->scan.started);
- init_completion(&ar->scan.completed);
- init_completion(&ar->scan.on_channel);
- init_completion(&ar->target_suspend);
-
- init_completion(&ar->install_key_done);
- init_completion(&ar->vdev_setup_done);
-
- setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
-
- ar->workqueue = create_singlethread_workqueue("ath10k_wq");
- if (!ar->workqueue)
- goto err_wq;
-
- mutex_init(&ar->conf_mutex);
- spin_lock_init(&ar->data_lock);
-
- INIT_LIST_HEAD(&ar->peers);
- init_waitqueue_head(&ar->peer_mapping_wq);
-
- init_completion(&ar->offchan_tx_completed);
- INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
- skb_queue_head_init(&ar->offchan_tx_queue);
-
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
- skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
-
- INIT_WORK(&ar->restart_work, ath10k_core_restart);
-
- return ar;
-
-err_wq:
- ath10k_mac_destroy(ar);
- return NULL;
-}
-EXPORT_SYMBOL(ath10k_core_create);
-
-void ath10k_core_destroy(struct ath10k *ar)
-{
- flush_workqueue(ar->workqueue);
- destroy_workqueue(ar->workqueue);
-
- ath10k_mac_destroy(ar);
-}
-EXPORT_SYMBOL(ath10k_core_destroy);
-
int ath10k_core_start(struct ath10k *ar)
{
int status;
@@ -785,10 +714,28 @@ int ath10k_core_start(struct ath10k *ar)
goto err;
}
+ status = ath10k_htt_init(ar);
+ if (status) {
+ ath10k_err("failed to init htt: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ status = ath10k_htt_tx_alloc(&ar->htt);
+ if (status) {
+ ath10k_err("failed to alloc htt tx: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ status = ath10k_htt_rx_alloc(&ar->htt);
+ if (status) {
+ ath10k_err("failed to alloc htt rx: %d\n", status);
+ goto err_htt_tx_detach;
+ }
+
status = ath10k_hif_start(ar);
if (status) {
ath10k_err("could not start HIF: %d\n", status);
- goto err_wmi_detach;
+ goto err_htt_rx_detach;
}
status = ath10k_htc_wait_target(&ar->htc);
@@ -797,15 +744,30 @@ int ath10k_core_start(struct ath10k *ar)
goto err_hif_stop;
}
- status = ath10k_htt_attach(ar);
+ status = ath10k_htt_connect(&ar->htt);
if (status) {
- ath10k_err("could not attach htt (%d)\n", status);
+ ath10k_err("failed to connect htt (%d)\n", status);
goto err_hif_stop;
}
- status = ath10k_init_connect_htc(ar);
- if (status)
- goto err_htt_detach;
+ status = ath10k_wmi_connect(ar);
+ if (status) {
+ ath10k_err("could not connect wmi: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_htc_start(&ar->htc);
+ if (status) {
+ ath10k_err("failed to start htc: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_wmi_wait_for_service_ready(ar);
+ if (status <= 0) {
+ ath10k_warn("wmi service ready event not received");
+ status = -ETIMEDOUT;
+ goto err_htc_stop;
+ }
ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
@@ -813,31 +775,36 @@ int ath10k_core_start(struct ath10k *ar)
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err("could not send WMI init command (%d)\n", status);
- goto err_disconnect_htc;
+ goto err_htc_stop;
}
status = ath10k_wmi_wait_for_unified_ready(ar);
if (status <= 0) {
ath10k_err("wmi unified ready event not received\n");
status = -ETIMEDOUT;
- goto err_disconnect_htc;
+ goto err_htc_stop;
}
- status = ath10k_htt_attach_target(&ar->htt);
- if (status)
- goto err_disconnect_htc;
+ status = ath10k_htt_setup(&ar->htt);
+ if (status) {
+ ath10k_err("failed to setup htt: %d\n", status);
+ goto err_htc_stop;
+ }
status = ath10k_debug_start(ar);
if (status)
- goto err_disconnect_htc;
+ goto err_htc_stop;
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs);
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
- ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n",
- ar->hw_params.name, ar->target_version,
- ar->hw->wiphy->fw_version, ar->fw_api,
+ ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
+ ar->hw_params.name,
+ ar->target_version,
+ ar->chip_id,
+ ar->hw->wiphy->fw_version,
+ ar->fw_api,
ar->htt.target_version_major,
ar->htt.target_version_minor);
@@ -845,12 +812,14 @@ int ath10k_core_start(struct ath10k *ar)
return 0;
-err_disconnect_htc:
+err_htc_stop:
ath10k_htc_stop(&ar->htc);
-err_htt_detach:
- ath10k_htt_detach(&ar->htt);
err_hif_stop:
ath10k_hif_stop(ar);
+err_htt_rx_detach:
+ ath10k_htt_rx_free(&ar->htt);
+err_htt_tx_detach:
+ ath10k_htt_tx_free(&ar->htt);
err_wmi_detach:
ath10k_wmi_detach(ar);
err:
@@ -885,10 +854,14 @@ void ath10k_core_stop(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
/* try to suspend target */
- ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+ if (ar->state != ATH10K_STATE_RESTARTING)
+ ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+
ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
- ath10k_htt_detach(&ar->htt);
+ ath10k_hif_stop(ar);
+ ath10k_htt_tx_free(&ar->htt);
+ ath10k_htt_rx_free(&ar->htt);
ath10k_wmi_detach(ar);
}
EXPORT_SYMBOL(ath10k_core_stop);
@@ -980,22 +953,15 @@ static int ath10k_core_check_chip_id(struct ath10k *ar)
return 0;
}
-int ath10k_core_register(struct ath10k *ar, u32 chip_id)
+static void ath10k_core_register_work(struct work_struct *work)
{
+ struct ath10k *ar = container_of(work, struct ath10k, register_work);
int status;
- ar->chip_id = chip_id;
-
- status = ath10k_core_check_chip_id(ar);
- if (status) {
- ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
- return status;
- }
-
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err("could not probe fw (%d)\n", status);
- return status;
+ goto err;
}
status = ath10k_mac_register(ar);
@@ -1010,18 +976,43 @@ int ath10k_core_register(struct ath10k *ar, u32 chip_id)
goto err_unregister_mac;
}
- return 0;
+ set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
+ return;
err_unregister_mac:
ath10k_mac_unregister(ar);
err_release_fw:
ath10k_core_free_firmware_files(ar);
- return status;
+err:
+ device_release_driver(ar->dev);
+ return;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
+{
+ int status;
+
+ ar->chip_id = chip_id;
+
+ status = ath10k_core_check_chip_id(ar);
+ if (status) {
+ ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
+ return status;
+ }
+
+ queue_work(ar->workqueue, &ar->register_work);
+
+ return 0;
}
EXPORT_SYMBOL(ath10k_core_register);
void ath10k_core_unregister(struct ath10k *ar)
{
+ cancel_work_sync(&ar->register_work);
+
+ if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ return;
+
/* We must unregister from mac80211 before we stop HTC and HIF.
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
@@ -1033,6 +1024,71 @@ void ath10k_core_unregister(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_core_unregister);
+struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
+ const struct ath10k_hif_ops *hif_ops)
+{
+ struct ath10k *ar;
+
+ ar = ath10k_mac_create();
+ if (!ar)
+ return NULL;
+
+ ar->ath_common.priv = ar;
+ ar->ath_common.hw = ar->hw;
+
+ ar->p2p = !!ath10k_p2p;
+ ar->dev = dev;
+
+ ar->hif.priv = hif_priv;
+ ar->hif.ops = hif_ops;
+
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+ init_completion(&ar->target_suspend);
+
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->vdev_setup_done);
+
+ setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
+
+ ar->workqueue = create_singlethread_workqueue("ath10k_wq");
+ if (!ar->workqueue)
+ goto err_wq;
+
+ mutex_init(&ar->conf_mutex);
+ spin_lock_init(&ar->data_lock);
+
+ INIT_LIST_HEAD(&ar->peers);
+ init_waitqueue_head(&ar->peer_mapping_wq);
+
+ init_completion(&ar->offchan_tx_completed);
+ INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
+ skb_queue_head_init(&ar->offchan_tx_queue);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+ INIT_WORK(&ar->register_work, ath10k_core_register_work);
+ INIT_WORK(&ar->restart_work, ath10k_core_restart);
+
+ return ar;
+
+err_wq:
+ ath10k_mac_destroy(ar);
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_core_create);
+
+void ath10k_core_destroy(struct ath10k *ar)
+{
+ flush_workqueue(ar->workqueue);
+ destroy_workqueue(ar->workqueue);
+
+ ath10k_mac_destroy(ar);
+}
+EXPORT_SYMBOL(ath10k_core_destroy);
+
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 0e71979d837c..68ceef61933d 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -119,6 +119,7 @@ struct ath10k_peer_stat {
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
+ u32 peer_rx_rate; /* 10x only */
};
struct ath10k_target_stats {
@@ -130,6 +131,12 @@ struct ath10k_target_stats {
u32 cycle_count;
u32 phy_err_count;
u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
/* PDEV TX stats */
s32 comp_queued;
@@ -260,6 +267,8 @@ struct ath10k_vif {
u8 fixed_rate;
u8 fixed_nss;
u8 force_sgi;
+ bool use_cts_prot;
+ int num_legacy_stations;
};
struct ath10k_vif_iter {
@@ -326,6 +335,7 @@ enum ath10k_dev_flags {
/* Indicates that ath10k device is during CAC phase of DFS */
ATH10K_CAC_RUNNING,
ATH10K_FLAG_FIRST_BOOT_DONE,
+ ATH10K_FLAG_CORE_REGISTERED,
};
struct ath10k {
@@ -419,13 +429,24 @@ struct ath10k {
struct cfg80211_chan_def chandef;
int free_vdev_map;
+ bool promisc;
+ bool monitor;
int monitor_vdev_id;
- bool monitor_enabled;
- bool monitor_present;
+ bool monitor_started;
unsigned int filter_flags;
unsigned long dev_flags;
u32 dfs_block_radar_events;
+ /* protected by conf_mutex */
+ bool radar_enabled;
+ int num_started_vdevs;
+
+ /* Protected by conf-mutex */
+ u8 supp_tx_chainmask;
+ u8 supp_rx_chainmask;
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;
@@ -456,6 +477,7 @@ struct ath10k {
enum ath10k_state state;
+ struct work_struct register_work;
struct work_struct restart_work;
/* cycle count is reported twice for each visited channel during scan.
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 6debd281350a..1b7ff4ba122c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
u8 *tmp = ev->data;
struct ath10k_target_stats *stats;
int num_pdev_stats, num_vdev_stats, num_peer_stats;
- struct wmi_pdev_stats *ps;
+ struct wmi_pdev_stats_10x *ps;
int i;
spin_lock_bh(&ar->data_lock);
@@ -173,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
if (num_pdev_stats) {
- ps = (struct wmi_pdev_stats *)tmp;
+ ps = (struct wmi_pdev_stats_10x *)tmp;
stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
@@ -228,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
- tmp += sizeof(struct wmi_pdev_stats);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+ ar->fw_features)) {
+ stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
+ stats->rts_bad = __le32_to_cpu(ps->rts_bad);
+ stats->rts_good = __le32_to_cpu(ps->rts_good);
+ stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
+ stats->no_beacons = __le32_to_cpu(ps->no_beacons);
+ stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
+ tmp += sizeof(struct wmi_pdev_stats_10x);
+ } else {
+ tmp += sizeof(struct wmi_pdev_stats_old);
+ }
}
/* 0 or max vdevs */
@@ -243,22 +254,29 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
if (num_peer_stats) {
- struct wmi_peer_stats *peer_stats;
+ struct wmi_peer_stats_10x *peer_stats;
struct ath10k_peer_stat *s;
stats->peers = num_peer_stats;
for (i = 0; i < num_peer_stats; i++) {
- peer_stats = (struct wmi_peer_stats *)tmp;
+ peer_stats = (struct wmi_peer_stats_10x *)tmp;
s = &stats->peer_stat[i];
- WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
- s->peer_macaddr);
+ memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
+ ETH_ALEN);
s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
s->peer_tx_rate =
__le32_to_cpu(peer_stats->peer_tx_rate);
-
- tmp += sizeof(struct wmi_peer_stats);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+ ar->fw_features)) {
+ s->peer_rx_rate =
+ __le32_to_cpu(peer_stats->peer_rx_rate);
+ tmp += sizeof(struct wmi_peer_stats_10x);
+
+ } else {
+ tmp += sizeof(struct wmi_peer_stats_old);
+ }
}
}
@@ -272,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
struct ath10k *ar = file->private_data;
struct ath10k_target_stats *fw_stats;
char *buf = NULL;
- unsigned int len = 0, buf_len = 2500;
+ unsigned int len = 0, buf_len = 8000;
ssize_t ret_cnt = 0;
long left;
int i;
@@ -320,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"Cycle count", fw_stats->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PHY error count", fw_stats->phy_err_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS bad count", fw_stats->rts_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS good count", fw_stats->rts_good);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "FCS bad count", fw_stats->fcs_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "No beacon count", fw_stats->no_beacons);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MIB int count", fw_stats->mib_int_count);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -411,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s\n",
- "ath10k PEER stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
+ "ath10k PEER stats", fw_stats->peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
@@ -425,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer TX rate",
fw_stats->peer_stat[i].peer_tx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX rate",
+ fw_stats->peer_stat[i].peer_rx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
spin_unlock_bh(&ar->data_lock);
@@ -451,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- const char buf[] = "To simulate firmware crash write the keyword"
- " `crash` to this file.\nThis will force firmware"
- " to report a crash to the host system.\n";
+ const char buf[] = "To simulate firmware crash write one of the"
+ " keywords to this file:\n `soft` - this will send"
+ " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
+ " supports that command.\n `hard` - this will send"
+ " to firmware command with illegal parameters"
+ " causing firmware crash.\n";
+
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- char buf[32] = {};
+ char buf[32];
int ret;
mutex_lock(&ar->conf_mutex);
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
- if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
- ret = -EINVAL;
- goto exit;
- }
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = 0;
if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_RESTARTED) {
@@ -479,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
goto exit;
}
- ath10k_info("simulating firmware crash\n");
+ /* drop the possible '\n' from the end */
+ if (buf[count - 1] == '\n') {
+ buf[count - 1] = 0;
+ count--;
+ }
- ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
- if (ret)
- ath10k_warn("failed to force fw hang (%d)\n", ret);
+ if (!strcmp(buf, "soft")) {
+ ath10k_info("simulating soft firmware crash\n");
+ ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+ } else if (!strcmp(buf, "hard")) {
+ ath10k_info("simulating hard firmware crash\n");
+ ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
+ ar->wmi.vdev_param->rts_threshold, 0);
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath10k_warn("failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
- if (ret == 0)
- ret = count;
+ ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 7f1bccd3597f..e493db4b4a41 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -157,6 +157,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
goto err_pull;
}
ep->tx_credits -= credits;
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
@@ -185,6 +188,9 @@ err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
@@ -234,12 +240,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
if (report->eid >= ATH10K_HTC_EP_COUNT)
break;
- ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
- report->eid, report->credits);
-
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
+ ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);
@@ -824,17 +830,11 @@ int ath10k_htc_start(struct ath10k_htc *htc)
return 0;
}
-/*
- * stop HTC communications, i.e. stop interrupt reception, and flush all
- * queued buffers
- */
void ath10k_htc_stop(struct ath10k_htc *htc)
{
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
-
- ath10k_hif_stop(htc->ar);
}
/* registered target arrival callback from the HIF layer */
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 69697af59ce0..19c12cc8d663 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -22,7 +22,7 @@
#include "core.h"
#include "debug.h"
-static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
+int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
@@ -48,39 +48,14 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
return 0;
}
-int ath10k_htt_attach(struct ath10k *ar)
+int ath10k_htt_init(struct ath10k *ar)
{
struct ath10k_htt *htt = &ar->htt;
- int ret;
htt->ar = ar;
htt->max_throughput_mbps = 800;
/*
- * Connect to HTC service.
- * This has to be done before calling ath10k_htt_rx_attach,
- * since ath10k_htt_rx_attach involves sending a rx ring configure
- * message to the target.
- */
- ret = ath10k_htt_htc_attach(htt);
- if (ret) {
- ath10k_err("could not attach htt htc (%d)\n", ret);
- goto err_htc_attach;
- }
-
- ret = ath10k_htt_tx_attach(htt);
- if (ret) {
- ath10k_err("could not attach htt tx (%d)\n", ret);
- goto err_htc_attach;
- }
-
- ret = ath10k_htt_rx_attach(htt);
- if (ret) {
- ath10k_err("could not attach htt rx (%d)\n", ret);
- goto err_rx_attach;
- }
-
- /*
* Prefetch enough data to satisfy target
* classification engine.
* This is for LL chips. HL chips will probably
@@ -93,11 +68,6 @@ int ath10k_htt_attach(struct ath10k *ar)
2; /* ip4 dscp or ip6 priority */
return 0;
-
-err_rx_attach:
- ath10k_htt_tx_detach(htt);
-err_htc_attach:
- return ret;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
@@ -117,7 +87,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
return 0;
}
-int ath10k_htt_attach_target(struct ath10k_htt *htt)
+int ath10k_htt_setup(struct ath10k_htt *htt)
{
int status;
@@ -140,9 +110,3 @@ int ath10k_htt_attach_target(struct ath10k_htt *htt)
return ath10k_htt_send_rx_ring_cfg_ll(htt);
}
-
-void ath10k_htt_detach(struct ath10k_htt *htt)
-{
- ath10k_htt_rx_detach(htt);
- ath10k_htt_tx_detach(htt);
-}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 654867fc1ae7..9a263462c793 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -21,6 +21,7 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
+#include <net/mac80211.h>
#include "htc.h"
#include "rx_desc.h"
@@ -1172,23 +1173,6 @@ struct htt_peer_unmap_event {
u16 peer_id;
};
-struct htt_rx_info {
- struct sk_buff *skb;
- enum htt_rx_mpdu_status status;
- enum htt_rx_mpdu_encrypt_type encrypt_type;
- s8 signal;
- struct {
- u8 info0;
- u32 info1;
- u32 info2;
- } rate;
-
- u32 tsf;
- bool fcs_err;
- bool amsdu_more;
- bool mic_err;
-};
-
struct ath10k_htt_txbuf {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
@@ -1289,6 +1273,9 @@ struct ath10k_htt {
struct tasklet_struct txrx_compl_task;
struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q;
+
+ /* rx_status template */
+ struct ieee80211_rx_status rx_status;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1341,14 +1328,16 @@ struct htt_rx_desc {
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
-int ath10k_htt_attach(struct ath10k *ar);
-int ath10k_htt_attach_target(struct ath10k_htt *htt);
-void ath10k_htt_detach(struct ath10k_htt *htt);
+int ath10k_htt_connect(struct ath10k_htt *htt);
+int ath10k_htt_init(struct ath10k *ar);
+int ath10k_htt_setup(struct ath10k_htt *htt);
+
+int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
+void ath10k_htt_tx_free(struct ath10k_htt *htt);
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
+void ath10k_htt_rx_free(struct ath10k_htt *htt);
-int ath10k_htt_tx_attach(struct ath10k_htt *htt);
-void ath10k_htt_tx_detach(struct ath10k_htt *htt);
-int ath10k_htt_rx_attach(struct ath10k_htt *htt);
-void ath10k_htt_rx_detach(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index cdcbe2de95f9..6c102b1312ff 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -225,10 +225,26 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
ath10k_htt_rx_msdu_buff_replenish(htt);
}
-void ath10k_htt_rx_detach(struct ath10k_htt *htt)
+static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
{
- int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < htt->rx_ring.size; i++) {
+ skb = htt->rx_ring.netbufs_ring[i];
+ if (!skb)
+ continue;
+ dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ htt->rx_ring.netbufs_ring[i] = NULL;
+ }
+}
+
+void ath10k_htt_rx_free(struct ath10k_htt *htt)
+{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->rx_replenish_task);
tasklet_kill(&htt->txrx_compl_task);
@@ -236,18 +252,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q);
- while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
- struct sk_buff *skb =
- htt->rx_ring.netbufs_ring[sw_rd_idx];
- struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
-
- dma_unmap_single(htt->ar->dev, cb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
- sw_rd_idx++;
- sw_rd_idx &= htt->rx_ring.size_mask;
- }
+ ath10k_htt_rx_ring_clean_up(htt);
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
@@ -277,6 +282,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
+ htt->rx_ring.netbufs_ring[idx] = NULL;
idx++;
idx &= htt->rx_ring.size_mask;
@@ -297,6 +303,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
}
}
+/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
u8 **fw_desc, int *fw_desc_len,
struct sk_buff **head_msdu,
@@ -305,12 +312,13 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
+ bool corrupted = false;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused) {
ath10k_warn("htt is confused. refusing rx\n");
- return 0;
+ return -1;
}
msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
@@ -398,7 +406,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc->frag_info.ring2_more_count;
- msdu_chaining = msdu_chained;
if (msdu_len_invalid)
msdu_len = 0;
@@ -426,11 +433,15 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->next = next;
msdu = next;
+ msdu_chaining = 1;
}
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
+ if (msdu_chaining && !last_msdu)
+ corrupted = true;
+
if (last_msdu) {
msdu->next = NULL;
break;
@@ -442,6 +453,23 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
}
*tail_msdu = msdu;
+ if (*head_msdu == NULL)
+ msdu_chaining = -1;
+
+ /*
+ * Apparently FW sometimes reports weird chained MSDU sequences with
+ * more than one rx descriptor. This seems like a bug but needs more
+ * analyzing. For the time being fix it by dropping such sequences to
+ * avoid blowing up the host system.
+ */
+ if (corrupted) {
+ ath10k_warn("failed to pop chained msdus, dropping\n");
+ ath10k_htt_rx_free_msdu_chain(*head_msdu);
+ *head_msdu = NULL;
+ *tail_msdu = NULL;
+ msdu_chaining = -EINVAL;
+ }
+
/*
* Don't refill the ring yet.
*
@@ -464,7 +492,7 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
ath10k_htt_rx_msdu_buff_replenish(htt);
}
-int ath10k_htt_rx_attach(struct ath10k_htt *htt)
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
dma_addr_t paddr;
void *vaddr;
@@ -490,7 +518,7 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
htt->rx_ring.netbufs_ring =
- kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
+ kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
GFP_KERNEL);
if (!htt->rx_ring.netbufs_ring)
goto err_netbuf;
@@ -636,6 +664,203 @@ struct amsdu_subframe_hdr {
__be16 len;
} __packed;
+static const u8 rx_legacy_rate_idx[] = {
+ 3, /* 0x00 - 11Mbps */
+ 2, /* 0x01 - 5.5Mbps */
+ 1, /* 0x02 - 2Mbps */
+ 0, /* 0x03 - 1Mbps */
+ 3, /* 0x04 - 11Mbps */
+ 2, /* 0x05 - 5.5Mbps */
+ 1, /* 0x06 - 2Mbps */
+ 0, /* 0x07 - 1Mbps */
+ 10, /* 0x08 - 48Mbps */
+ 8, /* 0x09 - 24Mbps */
+ 6, /* 0x0A - 12Mbps */
+ 4, /* 0x0B - 6Mbps */
+ 11, /* 0x0C - 54Mbps */
+ 9, /* 0x0D - 36Mbps */
+ 7, /* 0x0E - 18Mbps */
+ 5, /* 0x0F - 9Mbps */
+};
+
+static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ enum ieee80211_band band,
+ u8 info0, u32 info1, u32 info2,
+ struct ieee80211_rx_status *status)
+{
+ u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+ u8 preamble = 0;
+
+ /* Check if valid fields */
+ if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+ return;
+
+ preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+
+ switch (preamble) {
+ case HTT_RX_LEGACY:
+ cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
+ rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+ rate_idx = 0;
+
+ if (rate < 0x08 || rate > 0x0F)
+ break;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ if (cck)
+ rate &= ~BIT(3);
+ rate_idx = rx_legacy_rate_idx[rate];
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate_idx = rx_legacy_rate_idx[rate];
+ /* We are using same rate table registering
+ HW - ath10k_rates[]. In case of 5GHz skip
+ CCK rates, so -4 here */
+ rate_idx -= 4;
+ break;
+ default:
+ break;
+ }
+
+ status->rate_idx = rate_idx;
+ break;
+ case HTT_RX_HT:
+ case HTT_RX_HT_WITH_TXBF:
+ /* HT-SIG - Table 20-11 in info1 and info2 */
+ mcs = info1 & 0x1F;
+ nss = mcs >> 3;
+ bw = (info1 >> 7) & 1;
+ sgi = (info2 >> 7) & 1;
+
+ status->rate_idx = mcs;
+ status->flag |= RX_FLAG_HT;
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+ if (bw)
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ case HTT_RX_VHT:
+ case HTT_RX_VHT_WITH_TXBF:
+ /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+ TODO check this */
+ mcs = (info2 >> 4) & 0x0F;
+ nss = ((info1 >> 10) & 0x07) + 1;
+ bw = info1 & 3;
+ sgi = info2 & 1;
+
+ status->rate_idx = mcs;
+ status->vht_nss = nss;
+
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+
+ switch (bw) {
+ /* 20MHZ */
+ case 0:
+ break;
+ /* 40MHZ */
+ case 1:
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ /* 80MHZ */
+ case 2:
+ status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ }
+
+ status->flag |= RX_FLAG_VHT;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb,
+ enum htt_rx_mpdu_encrypt_type enctype,
+ enum rx_msdu_decap_format fmt,
+ bool dot11frag)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ rx_status->flag &= ~(RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
+ return;
+
+ /*
+ * There's no explicit rx descriptor flag to indicate whether a given
+ * frame has been decrypted or not. We're forced to use the decap
+ * format as an implicit indication. However fragmentation rx is always
+ * raw and it probably never reports undecrypted raws.
+ *
+ * This makes sure sniffed frames are reported as-is without stripping
+ * the protected flag.
+ */
+ if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
+ return;
+
+ rx_status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
+ ~IEEE80211_FCTL_PROTECTED);
+}
+
+static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_channel *ch;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch)
+ return false;
+
+ status->band = ch->band;
+ status->freq = ch->center_freq;
+
+ return true;
+}
+
+static void ath10k_process_rx(struct ath10k *ar,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ *status = *rx_status;
+
+ ath10k_dbg(ATH10K_DBG_DATA,
+ "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
+ skb,
+ skb->len,
+ status->flag == 0 ? "legacy" : "",
+ status->flag & RX_FLAG_HT ? "ht" : "",
+ status->flag & RX_FLAG_VHT ? "vht" : "",
+ status->flag & RX_FLAG_40MHZ ? "40" : "",
+ status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
+ status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->vht_nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR));
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ skb->data, skb->len);
+
+ ieee80211_rx(ar->hw, skb);
+}
+
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
{
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
@@ -643,11 +868,12 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
}
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
- struct htt_rx_info *info)
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb_in)
{
struct htt_rx_desc *rxd;
+ struct sk_buff *skb = skb_in;
struct sk_buff *first;
- struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
struct ieee80211_hdr *hdr;
@@ -728,24 +954,28 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
break;
}
- info->skb = skb;
- info->encrypt_type = enctype;
+ skb_in = skb;
+ ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
+ false);
skb = skb->next;
- info->skb->next = NULL;
+ skb_in->next = NULL;
if (skb)
- info->amsdu_more = true;
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
+ else
+ rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
- ath10k_process_rx(htt->ar, info);
+ ath10k_process_rx(htt->ar, rx_status, skb_in);
}
/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
* monitor interface active for sniffing purposes. */
}
-static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
{
- struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
@@ -808,66 +1038,9 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
break;
}
- info->skb = skb;
- info->encrypt_type = enctype;
+ ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
- ath10k_process_rx(htt->ar, info);
-}
-
-static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
- return true;
-
- return false;
-}
-
-static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
- return true;
-
- return false;
-}
-
-static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
- return true;
-
- return false;
-}
-
-static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
- return true;
-
- return false;
+ ath10k_process_rx(htt->ar, rx_status, skb);
}
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@@ -952,21 +1125,73 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
return 0;
}
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
+ struct sk_buff *head,
+ enum htt_rx_mpdu_status status,
+ bool channel_set,
+ u32 attention)
+{
+ if (head->len == 0) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx dropping due to zero-len\n");
+ return false;
+ }
+
+ if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx dropping due to decrypt-err\n");
+ return false;
+ }
+
+ if (!channel_set) {
+ ath10k_warn("no channel configured; ignoring frame!\n");
+ return false;
+ }
+
+ /* Skip mgmt frames while we handle this in WMI */
+ if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
+ attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
+ ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ return false;
+ }
+
+ if (status != HTT_RX_IND_MPDU_STATUS_OK &&
+ status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+ status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
+ !htt->ar->monitor_started) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx ignoring frame w/ status %d\n",
+ status);
+ return false;
+ }
+
+ if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx CAC running\n");
+ return false;
+ }
+
+ return true;
+}
+
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
- struct htt_rx_info info;
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ struct htt_rx_desc *rxd;
+ enum htt_rx_mpdu_status status;
struct ieee80211_hdr *hdr;
int num_mpdu_ranges;
+ u32 attention;
int fw_desc_len;
u8 *fw_desc;
+ bool channel_set;
int i, j;
+ int ret;
lockdep_assert_held(&htt->rx_ring.lock);
- memset(&info, 0, sizeof(info));
-
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
fw_desc = (u8 *)&rx->fw_desc;
@@ -974,106 +1199,90 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+ /* Fill this once, while this is per-ppdu */
+ if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
+ memset(rx_status, 0, sizeof(*rx_status));
+ rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rx->ppdu.combined_rssi;
+ }
+
+ if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
+ /* TSF available only in 32-bit */
+ rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
+ }
+
+ channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
+
+ if (channel_set) {
+ ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
+ rx->ppdu.info0,
+ __le32_to_cpu(rx->ppdu.info1),
+ __le32_to_cpu(rx->ppdu.info2),
+ rx_status);
+ }
+
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges));
for (i = 0; i < num_mpdu_ranges; i++) {
- info.status = mpdu_ranges[i].mpdu_range_status;
+ status = mpdu_ranges[i].mpdu_range_status;
for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
struct sk_buff *msdu_head, *msdu_tail;
- enum htt_rx_mpdu_status status;
- int msdu_chaining;
msdu_head = NULL;
msdu_tail = NULL;
- msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
- &fw_desc,
- &fw_desc_len,
- &msdu_head,
- &msdu_tail);
-
- if (!msdu_head) {
- ath10k_warn("htt rx no data!\n");
- continue;
- }
-
- if (msdu_head->len == 0) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx dropping due to zero-len\n");
+ ret = ath10k_htt_rx_amsdu_pop(htt,
+ &fw_desc,
+ &fw_desc_len,
+ &msdu_head,
+ &msdu_tail);
+
+ if (ret < 0) {
+ ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
+ ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx dropping due to decrypt-err\n");
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
+ rxd = container_of((void *)msdu_head->data,
+ struct htt_rx_desc,
+ msdu_payload);
+ attention = __le32_to_cpu(rxd->attention.flags);
- status = info.status;
-
- /* Skip mgmt frames while we handle this in WMI */
- if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
- ath10k_htt_rx_is_mgmt(msdu_head)) {
- ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
+ status,
+ channel_set,
+ attention)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- if (status != HTT_RX_IND_MPDU_STATUS_OK &&
- status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
- status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
- !htt->ar->monitor_enabled) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx ignoring frame w/ status %d\n",
- status);
+ if (ret > 0 &&
+ ath10k_unchain_msdu(msdu_head) < 0) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx CAC running\n");
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- if (msdu_chaining &&
- (ath10k_unchain_msdu(msdu_head) < 0)) {
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- info.skb = msdu_head;
- info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
- info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
-
- if (info.fcs_err)
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx has FCS err\n");
-
- if (info.mic_err)
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx has MIC err\n");
-
- info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
- info.signal += rx->ppdu.combined_rssi;
+ if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ else
+ rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
- info.rate.info0 = rx->ppdu.info0;
- info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
- info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
- info.tsf = __le32_to_cpu(rx->ppdu.tsf);
+ if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ else
+ rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
- ath10k_htt_rx_amsdu(htt, &info);
+ ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
else
- ath10k_htt_rx_msdu(htt, &info);
+ ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
}
}
@@ -1084,11 +1293,12 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *frag)
{
struct sk_buff *msdu_head, *msdu_tail;
+ enum htt_rx_mpdu_encrypt_type enctype;
struct htt_rx_desc *rxd;
enum rx_msdu_decap_format fmt;
- struct htt_rx_info info = {};
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct ieee80211_hdr *hdr;
- int msdu_chaining;
+ int ret;
bool tkip_mic_err;
bool decrypt_err;
u8 *fw_desc;
@@ -1102,24 +1312,21 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_tail = NULL;
spin_lock_bh(&htt->rx_ring.lock);
- msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
- &msdu_head, &msdu_tail);
+ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+ &msdu_head, &msdu_tail);
spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
- if (!msdu_head) {
- ath10k_warn("htt rx frag no data\n");
- return;
- }
-
- if (msdu_chaining || msdu_head != msdu_tail) {
- ath10k_warn("aggregation with fragmentation?!\n");
+ if (ret) {
+ ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
+ ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
return;
}
/* FIXME: implement signal strength */
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
hdr = (struct ieee80211_hdr *)msdu_head->data;
rxd = (void *)msdu_head->data - sizeof(*rxd);
@@ -1136,57 +1343,55 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
goto end;
}
- info.skb = msdu_head;
- info.status = HTT_RX_IND_MPDU_STATUS_OK;
- info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
+ true);
+ msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
- if (tkip_mic_err) {
+ if (tkip_mic_err)
ath10k_warn("tkip mic error\n");
- info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
- }
if (decrypt_err) {
ath10k_warn("decryption err in fragmented rx\n");
- dev_kfree_skb_any(info.skb);
+ dev_kfree_skb_any(msdu_head);
goto end;
}
- if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+ if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
+ paramlen = ath10k_htt_rx_crypto_param_len(enctype);
/* It is more efficient to move the header than the payload */
- memmove((void *)info.skb->data + paramlen,
- (void *)info.skb->data,
+ memmove((void *)msdu_head->data + paramlen,
+ (void *)msdu_head->data,
hdrlen);
- skb_pull(info.skb, paramlen);
- hdr = (struct ieee80211_hdr *)info.skb->data;
+ skb_pull(msdu_head, paramlen);
+ hdr = (struct ieee80211_hdr *)msdu_head->data;
}
/* remove trailing FCS */
trim = 4;
/* remove crypto trailer */
- trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
+ trim += ath10k_htt_rx_crypto_tail_len(enctype);
/* last fragment of TKIP frags has MIC */
if (!ieee80211_has_morefrags(hdr->frame_control) &&
- info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
trim += 8;
- if (trim > info.skb->len) {
+ if (trim > msdu_head->len) {
ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
- dev_kfree_skb_any(info.skb);
+ dev_kfree_skb_any(msdu_head);
goto end;
}
- skb_trim(info.skb, info.skb->len - trim);
+ skb_trim(msdu_head, msdu_head->len - trim);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
- info.skb->data, info.skb->len);
- ath10k_process_rx(htt->ar, &info);
+ msdu_head->data, msdu_head->len);
+ ath10k_process_rx(htt->ar, rx_status, msdu_head);
end:
if (fw_desc_len > 0) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 7a3e2e40dd5c..7064354d1f4f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -83,7 +83,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
__clear_bit(msdu_id, htt->used_msdu_ids);
}
-int ath10k_htt_tx_attach(struct ath10k_htt *htt)
+int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
@@ -120,7 +120,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
return 0;
}
-static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
{
struct htt_tx_done tx_done = {0};
int msdu_id;
@@ -141,9 +141,9 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
spin_unlock_bh(&htt->tx_lock);
}
-void ath10k_htt_tx_detach(struct ath10k_htt *htt)
+void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
- ath10k_htt_tx_cleanup_pending(htt);
+ ath10k_htt_tx_free_pending(htt);
kfree(htt->pending_tx);
kfree(htt->used_msdu_ids);
dma_pool_destroy(htt->tx_pool);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 35fc44e281f5..007e855f4ba9 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -28,6 +28,7 @@
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
+#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 511a2f81e7af..a21080028c54 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -54,7 +54,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
arg.key_cipher = WMI_CIPHER_AES_CCM;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ else
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case WLAN_CIPHER_SUITE_TKIP:
arg.key_cipher = WMI_CIPHER_TKIP;
@@ -165,7 +168,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
first_errno = ret;
if (ret)
- ath10k_warn("could not remove peer wep key %d (%d)\n",
+ ath10k_warn("failed to remove peer wep key %d: %d\n",
i, ret);
peer->keys[i] = NULL;
@@ -213,7 +216,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
first_errno = ret;
if (ret)
- ath10k_warn("could not remove key for %pM\n", addr);
+ ath10k_warn("failed to remove key for %pM: %d\n",
+ addr, ret);
}
return first_errno;
@@ -323,14 +327,14 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
+ ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
+ ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
@@ -351,7 +355,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_pdev_set_param(ar, param,
ATH10K_KICKOUT_THRESHOLD);
if (ret) {
- ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
+ ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -360,7 +364,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MIN_IDLE);
if (ret) {
- ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
+ ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -369,7 +373,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_IDLE);
if (ret) {
- ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
+ ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -378,7 +382,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
if (ret) {
- ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -488,92 +492,20 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
return 0;
}
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
+static bool ath10k_monitor_is_enabled(struct ath10k *ar)
{
- struct ath10k *ar = arvif->ar;
- struct cfg80211_chan_def *chandef = &ar->chandef;
- struct wmi_vdev_start_request_arg arg = {};
- int ret = 0;
-
lockdep_assert_held(&ar->conf_mutex);
- reinit_completion(&ar->vdev_setup_done);
-
- arg.vdev_id = arvif->vdev_id;
- arg.dtim_period = arvif->dtim_period;
- arg.bcn_intval = arvif->beacon_interval;
-
- arg.channel.freq = chandef->chan->center_freq;
- arg.channel.band_center_freq1 = chandef->center_freq1;
- arg.channel.mode = chan_to_phymode(chandef);
-
- arg.channel.min_power = 0;
- arg.channel.max_power = chandef->chan->max_power * 2;
- arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
- arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- arg.ssid = arvif->u.ap.ssid;
- arg.ssid_len = arvif->u.ap.ssid_len;
- arg.hidden_ssid = arvif->u.ap.hidden_ssid;
-
- /* For now allow DFS for AP mode */
- arg.channel.chan_radar =
- !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
- } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
- arg.ssid = arvif->vif->bss_conf.ssid;
- arg.ssid_len = arvif->vif->bss_conf.ssid_len;
- }
-
ath10k_dbg(ATH10K_DBG_MAC,
- "mac vdev %d start center_freq %d phymode %s\n",
- arg.vdev_id, arg.channel.freq,
- ath10k_wmi_phymode_str(arg.channel.mode));
+ "mac monitor refs: promisc %d monitor %d cac %d\n",
+ ar->promisc, ar->monitor,
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
- ret = ath10k_wmi_vdev_start(ar, &arg);
- if (ret) {
- ath10k_warn("WMI vdev %i start failed: ret %d\n",
- arg.vdev_id, ret);
- return ret;
- }
-
- ret = ath10k_vdev_setup_sync(ar);
- if (ret) {
- ath10k_warn("vdev %i setup failed %d\n",
- arg.vdev_id, ret);
- return ret;
- }
-
- return ret;
+ return ar->promisc || ar->monitor ||
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
}
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
-{
- struct ath10k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath10k_warn("WMI vdev %i stop failed: ret %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- ret = ath10k_vdev_setup_sync(ar);
- if (ret) {
- ath10k_warn("vdev %i setup sync failed %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- return ret;
-}
-
-static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
+static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = &ar->chandef;
struct ieee80211_channel *channel = chandef->chan;
@@ -582,11 +514,6 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
lockdep_assert_held(&ar->conf_mutex);
- if (!ar->monitor_present) {
- ath10k_warn("mac montor stop -- monitor is not present\n");
- return -EINVAL;
- }
-
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -604,88 +531,75 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
- ath10k_warn("Monitor vdev %i start failed: ret %d\n",
+ ath10k_warn("failed to request monitor vdev %i start: %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("Monitor vdev %i setup failed %d\n",
+ ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
- ath10k_warn("Monitor vdev %i up failed: %d\n",
+ ath10k_warn("failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
goto vdev_stop;
}
ar->monitor_vdev_id = vdev_id;
- ar->monitor_enabled = true;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+ ar->monitor_vdev_id);
return 0;
vdev_stop:
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
-static int ath10k_monitor_stop(struct ath10k *ar)
+static int ath10k_monitor_vdev_stop(struct ath10k *ar)
{
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (!ar->monitor_present) {
- ath10k_warn("mac montor stop -- monitor is not present\n");
- return -EINVAL;
- }
-
- if (!ar->monitor_enabled) {
- ath10k_warn("mac montor stop -- monitor is not enabled\n");
- return -EINVAL;
- }
-
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev %i down failed: %d\n",
+ ath10k_warn("failed to put down monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
if (ret)
- ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
+ ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
- ar->monitor_enabled = false;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
return ret;
}
-static int ath10k_monitor_create(struct ath10k *ar)
+static int ath10k_monitor_vdev_create(struct ath10k *ar)
{
int bit, ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (ar->monitor_present) {
- ath10k_warn("Monitor mode already enabled\n");
- return 0;
- }
-
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
- ath10k_warn("No free VDEV slots\n");
+ ath10k_warn("failed to find free vdev id for monitor vdev\n");
return -ENOMEM;
}
@@ -696,7 +610,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
WMI_VDEV_TYPE_MONITOR,
0, ar->mac_addr);
if (ret) {
- ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
+ ath10k_warn("failed to request monitor vdev %i creation: %d\n",
ar->monitor_vdev_id, ret);
goto vdev_fail;
}
@@ -704,7 +618,6 @@ static int ath10k_monitor_create(struct ath10k *ar)
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
- ar->monitor_present = true;
return 0;
vdev_fail:
@@ -715,48 +628,123 @@ vdev_fail:
return ret;
}
-static int ath10k_monitor_destroy(struct ath10k *ar)
+static int ath10k_monitor_vdev_delete(struct ath10k *ar)
{
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (!ar->monitor_present)
- return 0;
-
ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
if (ret) {
- ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
+ ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
- ar->monitor_present = false;
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
-static int ath10k_start_cac(struct ath10k *ar)
+static int ath10k_monitor_start(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
- set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ if (!ath10k_monitor_is_enabled(ar)) {
+ ath10k_warn("trying to start monitor with no references\n");
+ return 0;
+ }
+
+ if (ar->monitor_started) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
+ return 0;
+ }
- ret = ath10k_monitor_create(ar);
+ ret = ath10k_monitor_vdev_create(ar);
if (ret) {
- clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_warn("failed to create monitor vdev: %d\n", ret);
return ret;
}
- ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+ ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath10k_warn("failed to start monitor vdev: %d\n", ret);
+ ath10k_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ar->monitor_started = true;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
+
+ return 0;
+}
+
+static void ath10k_monitor_stop(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath10k_monitor_is_enabled(ar)) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac monitor will be stopped later\n");
+ return;
+ }
+
+ if (!ar->monitor_started) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac monitor probably failed to start earlier\n");
+ return;
+ }
+
+ ret = ath10k_monitor_vdev_stop(ar);
+ if (ret)
+ ath10k_warn("failed to stop monitor vdev: %d\n", ret);
+
+ ret = ath10k_monitor_vdev_delete(ar);
+ if (ret)
+ ath10k_warn("failed to delete monitor vdev: %d\n", ret);
+
+ ar->monitor_started = false;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
+}
+
+static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->enable_rtscts;
+
+ if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
+ WMI_RTSCTS_PROFILE);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ rts_cts);
+}
+
+static int ath10k_start_cac(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ret = ath10k_monitor_start(ar);
if (ret) {
+ ath10k_warn("failed to start monitor (cac): %d\n", ret);
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
- ath10k_monitor_destroy(ar);
return ret;
}
@@ -774,58 +762,26 @@ static int ath10k_stop_cac(struct ath10k *ar)
if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
return 0;
- ath10k_monitor_stop(ar);
- ath10k_monitor_destroy(ar);
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_monitor_stop(ar);
ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
return 0;
}
-static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state)
+static void ath10k_recalc_radar_detection(struct ath10k *ar)
{
- switch (dfs_state) {
- case NL80211_DFS_USABLE:
- return "USABLE";
- case NL80211_DFS_UNAVAILABLE:
- return "UNAVAILABLE";
- case NL80211_DFS_AVAILABLE:
- return "AVAILABLE";
- default:
- WARN_ON(1);
- return "bug";
- }
-}
-
-static void ath10k_config_radar_detection(struct ath10k *ar)
-{
- struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
- bool radar = ar->hw->conf.radar_enabled;
- bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
- enum nl80211_dfs_state dfs_state = chan->dfs_state;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC,
- "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
- chan->center_freq, radar, chan_radar,
- ath10k_dfs_state(dfs_state));
-
- /*
- * It's safe to call it even if CAC is not started.
- * This call here guarantees changing channel, etc. will stop CAC.
- */
ath10k_stop_cac(ar);
- if (!radar)
- return;
-
- if (!chan_radar)
+ if (!ar->radar_enabled)
return;
- if (dfs_state != NL80211_DFS_USABLE)
+ if (ar->num_started_vdevs > 0)
return;
ret = ath10k_start_cac(ar);
@@ -835,11 +791,106 @@ static void ath10k_config_radar_detection(struct ath10k *ar)
* radiation is not allowed, make this channel DFS_UNAVAILABLE
* by indicating that radar was detected.
*/
- ath10k_warn("failed to start CAC (%d)\n", ret);
+ ath10k_warn("failed to start CAC: %d\n", ret);
ieee80211_radar_detected(ar->hw);
}
}
+static int ath10k_vdev_start(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.mode = chan_to_phymode(chandef);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ arg.ssid = arvif->vif->bss_conf.ssid;
+ arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+ if (ret) {
+ ath10k_warn("failed to start WMI vdev %i: %d\n",
+ arg.vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
+ arg.vdev_id, ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+ ath10k_recalc_radar_detection(ar);
+
+ return ret;
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn("failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ if (ar->num_started_vdevs != 0) {
+ ar->num_started_vdevs--;
+ ath10k_recalc_radar_detection(ar);
+ }
+
+ return ret;
+}
+
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@@ -880,7 +931,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
- ath10k_warn("Failed to bring up vdev %d: %i\n",
+ ath10k_warn("failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
ath10k_vdev_stop(arvif);
return;
@@ -904,7 +955,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
if (!info->ibss_joined) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
if (ret)
- ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
self_peer, arvif->vdev_id, ret);
if (is_zero_ether_addr(arvif->bssid))
@@ -913,7 +964,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
arvif->bssid);
if (ret) {
- ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
arvif->bssid, arvif->vdev_id, ret);
return;
}
@@ -925,7 +976,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
if (ret) {
- ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
self_peer, arvif->vdev_id, ret);
return;
}
@@ -934,7 +985,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
if (ret)
- ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
arvif->vdev_id, ret);
}
@@ -961,7 +1012,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
- ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
+ ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return ret;
}
@@ -974,8 +1025,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
if (ret) {
- ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
- psmode, arvif->vdev_id);
+ ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
return ret;
}
@@ -1429,7 +1480,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!ap_sta) {
- ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
+ ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
bss_conf->bssid, arvif->vdev_id);
rcu_read_unlock();
return;
@@ -1442,7 +1493,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
bss_conf, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
+ ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
rcu_read_unlock();
return;
@@ -1452,7 +1503,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
+ ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
return;
}
@@ -1473,7 +1524,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
if (ret) {
- ath10k_warn("VDEV: %d up failed: ret %d\n",
+ ath10k_warn("failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
return;
}
@@ -1524,7 +1575,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, bool reassoc)
{
struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
@@ -1533,34 +1584,46 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
if (ret) {
- ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
+ ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
+ peer_arg.peer_reassoc = reassoc;
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
+ ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
if (ret) {
- ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
+ ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
return ret;
}
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
+ ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
if (ret) {
- ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
+ ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
@@ -1575,9 +1638,19 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
ret = ath10k_clear_peer_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
+ ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -1685,19 +1758,44 @@ static int ath10k_update_channel_list(struct ath10k *ar)
return ret;
}
+static enum wmi_dfs_region
+ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_UNSET:
+ return WMI_UNINIT_DFS_DOMAIN;
+ case NL80211_DFS_FCC:
+ return WMI_FCC_DFS_DOMAIN;
+ case NL80211_DFS_ETSI:
+ return WMI_ETSI_DFS_DOMAIN;
+ case NL80211_DFS_JP:
+ return WMI_MKK4_DFS_DOMAIN;
+ }
+ return WMI_UNINIT_DFS_DOMAIN;
+}
+
static void ath10k_regd_update(struct ath10k *ar)
{
struct reg_dmn_pair_mapping *regpair;
int ret;
+ enum wmi_dfs_region wmi_dfs_reg;
+ enum nl80211_dfs_regions nl_dfs_reg;
lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_update_channel_list(ar);
if (ret)
- ath10k_warn("could not update channel list (%d)\n", ret);
+ ath10k_warn("failed to update channel list: %d\n", ret);
regpair = ar->ath_common.regulatory.regpair;
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ nl_dfs_reg = ar->dfs_detector->region;
+ wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
+ } else {
+ wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
+ }
+
/* Target allows setting up per-band regdomain but ath_common provides
* a combined one only */
ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1705,9 +1803,10 @@ static void ath10k_regd_update(struct ath10k *ar)
regpair->reg_domain, /* 2ghz */
regpair->reg_domain, /* 5ghz */
regpair->reg_2ghz_ctl,
- regpair->reg_5ghz_ctl);
+ regpair->reg_5ghz_ctl,
+ wmi_dfs_reg);
if (ret)
- ath10k_warn("could not set pdev regdomain (%d)\n", ret);
+ ath10k_warn("failed to set pdev regdomain: %d\n", ret);
}
static void ath10k_reg_notifier(struct wiphy *wiphy,
@@ -1725,7 +1824,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
request->dfs_region);
if (!result)
- ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n",
+ ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
request->dfs_region);
}
@@ -1759,10 +1858,10 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
if (info->control.vif)
return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
- if (ar->monitor_enabled)
+ if (ar->monitor_started)
return ar->monitor_vdev_id;
- ath10k_warn("could not resolve vdev id\n");
+ ath10k_warn("failed to resolve vdev id\n");
return 0;
}
@@ -1792,8 +1891,13 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
wep_key_work);
int ret, keyidx = arvif->def_wep_key_newidx;
+ mutex_lock(&arvif->ar->conf_mutex);
+
+ if (arvif->ar->state != ATH10K_STATE_ON)
+ goto unlock;
+
if (arvif->def_wep_key_idx == keyidx)
- return;
+ goto unlock;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
arvif->vdev_id, keyidx);
@@ -1803,11 +1907,16 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
arvif->ar->wmi.vdev_param->def_keyid,
keyidx);
if (ret) {
- ath10k_warn("could not update wep keyidx (%d)\n", ret);
- return;
+ ath10k_warn("failed to update wep key index for vdev %d: %d\n",
+ arvif->vdev_id,
+ ret);
+ goto unlock;
}
arvif->def_wep_key_idx = keyidx;
+
+unlock:
+ mutex_unlock(&arvif->ar->conf_mutex);
}
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
@@ -1879,7 +1988,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
ar->fw_features)) {
if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
ATH10K_MAX_NUM_MGMT_PENDING) {
- ath10k_warn("wmi mgmt_tx queue limit reached\n");
+ ath10k_warn("reached WMI management tranmist queue limit\n");
ret = -EBUSY;
goto exit;
}
@@ -1903,7 +2012,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
exit:
if (ret) {
- ath10k_warn("tx failed (%d). dropping packet.\n", ret);
+ ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
}
}
@@ -1964,7 +2073,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
if (!peer) {
ret = ath10k_peer_create(ar, vdev_id, peer_addr);
if (ret)
- ath10k_warn("peer %pM on vdev %d not created (%d)\n",
+ ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
}
@@ -1984,7 +2093,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
if (!peer) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
if (ret)
- ath10k_warn("peer %pM on vdev %d not deleted (%d)\n",
+ ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
}
@@ -2018,7 +2127,8 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
ret = ath10k_wmi_mgmt_tx(ar, skb);
if (ret) {
- ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+ ath10k_warn("failed to transmit management frame via WMI: %d\n",
+ ret);
ieee80211_free_txskb(ar->hw, skb);
}
}
@@ -2043,7 +2153,7 @@ void ath10k_reset_scan(unsigned long ptr)
return;
}
- ath10k_warn("scan timeout. resetting. fw issue?\n");
+ ath10k_warn("scan timed out, firmware problem?\n");
if (ar->scan.is_roc)
ieee80211_remain_on_channel_expired(ar->hw);
@@ -2079,7 +2189,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
ret = ath10k_wmi_stop_scan(ar, &arg);
if (ret) {
- ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+ ath10k_warn("failed to stop wmi scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.in_progress = false;
ath10k_offchan_tx_purge(ar);
@@ -2099,7 +2209,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
spin_lock_bh(&ar->data_lock);
if (ar->scan.in_progress) {
- ath10k_warn("could not stop scan. its still in progress\n");
+ ath10k_warn("failed to stop scan, it's still in progress\n");
ar->scan.in_progress = false;
ath10k_offchan_tx_purge(ar);
ret = -ETIMEDOUT;
@@ -2187,72 +2297,171 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_htt(ar, skb);
}
-/*
- * Initialize various parameters with default vaules.
- */
+/* Must not be called with conf_mutex held as workers can use that also. */
+static void ath10k_drain_tx(struct ath10k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ ath10k_offchan_tx_purge(ar);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
+
+ cancel_work_sync(&ar->offchan_tx_work);
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+}
+
void ath10k_halt(struct ath10k *ar)
{
+ struct ath10k_vif *arvif;
+
lockdep_assert_held(&ar->conf_mutex);
- ath10k_stop_cac(ar);
+ if (ath10k_monitor_is_enabled(ar)) {
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ar->promisc = false;
+ ar->monitor = false;
+ ath10k_monitor_stop(ar);
+ }
+
del_timer_sync(&ar->scan.timeout);
- ath10k_offchan_tx_purge(ar);
- ath10k_mgmt_over_wmi_tx_purge(ar);
+ ath10k_reset_scan((unsigned long)ar);
ath10k_peer_cleanup_all(ar);
ath10k_core_stop(ar);
ath10k_hif_power_down(ar);
spin_lock_bh(&ar->data_lock);
- if (ar->scan.in_progress) {
- del_timer(&ar->scan.timeout);
- ar->scan.in_progress = false;
- ieee80211_scan_completed(ar->hw, true);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->beacon)
+ continue;
+
+ dma_unmap_single(arvif->ar->dev,
+ ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
}
spin_unlock_bh(&ar->data_lock);
}
+static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->cfg_tx_chainmask) {
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+ } else {
+ *tx_ant = ar->supp_tx_chainmask;
+ *rx_ant = ar->supp_rx_chainmask;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if ((ar->state != ATH10K_STATE_ON) &&
+ (ar->state != ATH10K_STATE_RESTARTED))
+ return 0;
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
+ tx_ant);
+ if (ret) {
+ ath10k_warn("failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
+ rx_ant);
+ if (ret) {
+ ath10k_warn("failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
int ret = 0;
+ /*
+ * This makes sense only when restarting hw. It is harmless to call
+ * uncoditionally. This is necessary to make sure no HTT/WMI tx
+ * commands will be submitted while restarting.
+ */
+ ath10k_drain_tx(ar);
+
mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH10K_STATE_OFF &&
- ar->state != ATH10K_STATE_RESTARTING) {
+ switch (ar->state) {
+ case ATH10K_STATE_OFF:
+ ar->state = ATH10K_STATE_ON;
+ break;
+ case ATH10K_STATE_RESTARTING:
+ ath10k_halt(ar);
+ ar->state = ATH10K_STATE_RESTARTED;
+ break;
+ case ATH10K_STATE_ON:
+ case ATH10K_STATE_RESTARTED:
+ case ATH10K_STATE_WEDGED:
+ WARN_ON(1);
ret = -EINVAL;
- goto exit;
+ goto err;
}
ret = ath10k_hif_power_up(ar);
if (ret) {
- ath10k_err("could not init hif (%d)\n", ret);
- ar->state = ATH10K_STATE_OFF;
- goto exit;
+ ath10k_err("Could not init hif: %d\n", ret);
+ goto err_off;
}
ret = ath10k_core_start(ar);
if (ret) {
- ath10k_err("could not init core (%d)\n", ret);
- ath10k_hif_power_down(ar);
- ar->state = ATH10K_STATE_OFF;
- goto exit;
+ ath10k_err("Could not init core: %d\n", ret);
+ goto err_power_down;
}
- if (ar->state == ATH10K_STATE_OFF)
- ar->state = ATH10K_STATE_ON;
- else if (ar->state == ATH10K_STATE_RESTARTING)
- ar->state = ATH10K_STATE_RESTARTED;
-
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
- if (ret)
- ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
- ret);
+ if (ret) {
+ ath10k_warn("failed to enable PMF QOS: %d\n", ret);
+ goto err_core_stop;
+ }
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
- if (ret)
- ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
- ret);
+ if (ret) {
+ ath10k_warn("failed to enable dynamic BW: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ if (ar->cfg_tx_chainmask)
+ __ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
+ ar->cfg_rx_chainmask);
/*
* By default FW set ARP frames ac to voice (6). In that case ARP
@@ -2266,15 +2475,27 @@ static int ath10k_start(struct ieee80211_hw *hw)
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->arp_ac_override, 0);
if (ret) {
- ath10k_warn("could not set arp ac override parameter: %d\n",
+ ath10k_warn("failed to set arp ac override parameter: %d\n",
ret);
- goto exit;
+ goto err_core_stop;
}
+ ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
- ret = 0;
-exit:
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_core_stop:
+ ath10k_core_stop(ar);
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+err_off:
+ ar->state = ATH10K_STATE_OFF;
+
+err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -2283,19 +2504,15 @@ static void ath10k_stop(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
+ ath10k_drain_tx(ar);
+
mutex_lock(&ar->conf_mutex);
- if (ar->state == ATH10K_STATE_ON ||
- ar->state == ATH10K_STATE_RESTARTED ||
- ar->state == ATH10K_STATE_WEDGED)
+ if (ar->state != ATH10K_STATE_OFF) {
ath10k_halt(ar);
-
- ar->state = ATH10K_STATE_OFF;
+ ar->state = ATH10K_STATE_OFF;
+ }
mutex_unlock(&ar->conf_mutex);
- ath10k_mgmt_over_wmi_tx_purge(ar);
-
- cancel_work_sync(&ar->offchan_tx_work);
- cancel_work_sync(&ar->wmi_mgmt_tx_work);
cancel_work_sync(&ar->restart_work);
}
@@ -2309,7 +2526,7 @@ static int ath10k_config_ps(struct ath10k *ar)
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_mac_vif_setup_ps(arvif);
if (ret) {
- ath10k_warn("could not setup powersave (%d)\n", ret);
+ ath10k_warn("failed to setup powersave: %d\n", ret);
break;
}
}
@@ -2343,7 +2560,6 @@ static const char *chandef_get_width(enum nl80211_chan_width width)
static void ath10k_config_chan(struct ath10k *ar)
{
struct ath10k_vif *arvif;
- bool monitor_was_enabled;
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -2357,10 +2573,8 @@ static void ath10k_config_chan(struct ath10k *ar)
/* First stop monitor interface. Some FW versions crash if there's a
* lone monitor interface. */
- monitor_was_enabled = ar->monitor_enabled;
-
- if (ar->monitor_enabled)
- ath10k_monitor_stop(ar);
+ if (ar->monitor_started)
+ ath10k_monitor_vdev_stop(ar);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (!arvif->is_started)
@@ -2371,7 +2585,7 @@ static void ath10k_config_chan(struct ath10k *ar)
ret = ath10k_vdev_stop(arvif);
if (ret) {
- ath10k_warn("could not stop vdev %d (%d)\n",
+ ath10k_warn("failed to stop vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
@@ -2388,7 +2602,7 @@ static void ath10k_config_chan(struct ath10k *ar)
ret = ath10k_vdev_start(arvif);
if (ret) {
- ath10k_warn("could not start vdev %d (%d)\n",
+ ath10k_warn("failed to start vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
@@ -2399,14 +2613,14 @@ static void ath10k_config_chan(struct ath10k *ar)
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
- ath10k_warn("could not bring vdev up %d (%d)\n",
+ ath10k_warn("failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
- if (monitor_was_enabled)
- ath10k_monitor_start(ar, ar->monitor_vdev_id);
+ if (ath10k_monitor_is_enabled(ar))
+ ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
}
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2420,15 +2634,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ath10k_dbg(ATH10K_DBG_MAC,
- "mac config channel %d mhz flags 0x%x\n",
+ "mac config channel %dMHz flags 0x%x radar %d\n",
conf->chandef.chan->center_freq,
- conf->chandef.chan->flags);
+ conf->chandef.chan->flags,
+ conf->radar_enabled);
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
spin_unlock_bh(&ar->data_lock);
- ath10k_config_radar_detection(ar);
+ ar->radar_enabled = conf->radar_enabled;
+ ath10k_recalc_radar_detection(ar);
if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
ar->chandef = conf->chandef;
@@ -2444,14 +2660,14 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
- ath10k_warn("mac failed to set 2g txpower %d (%d)\n",
+ ath10k_warn("failed to set 2g txpower %d: %d\n",
hw->conf.power_level, ret);
param = ar->wmi.pdev_param->txpower_limit5g;
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
- ath10k_warn("mac failed to set 5g txpower %d (%d)\n",
+ ath10k_warn("failed to set 5g txpower %d: %d\n",
hw->conf.power_level, ret);
}
@@ -2459,10 +2675,19 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ath10k_config_ps(ar);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- if (conf->flags & IEEE80211_CONF_MONITOR)
- ret = ath10k_monitor_create(ar);
- else
- ret = ath10k_monitor_destroy(ar);
+ if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) {
+ ar->monitor = true;
+ ret = ath10k_monitor_start(ar);
+ if (ret) {
+ ath10k_warn("failed to start monitor (config): %d\n",
+ ret);
+ ar->monitor = false;
+ }
+ } else if (!(conf->flags & IEEE80211_CONF_MONITOR) &&
+ ar->monitor) {
+ ar->monitor = false;
+ ath10k_monitor_stop(ar);
+ }
}
mutex_unlock(&ar->conf_mutex);
@@ -2497,12 +2722,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
INIT_LIST_HEAD(&arvif->list);
- if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
- ath10k_warn("Only one monitor interface allowed\n");
- ret = -EBUSY;
- goto err;
- }
-
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
ret = -EBUSY;
@@ -2545,7 +2764,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
- ath10k_warn("WMI vdev %i create failed: ret %d\n",
+ ath10k_warn("failed to create WMI vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
@@ -2557,7 +2776,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
arvif->def_wep_key_idx);
if (ret) {
- ath10k_warn("Failed to set vdev %i default keyid: %d\n",
+ ath10k_warn("failed to set vdev %i default key id: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
@@ -2567,7 +2786,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ATH10K_HW_TXRX_NATIVE_WIFI);
/* 10.X firmware does not support this VDEV parameter. Do not warn */
if (ret && ret != -EOPNOTSUPP) {
- ath10k_warn("Failed to set vdev %i TX encap: %d\n",
+ ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
@@ -2575,14 +2794,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
- ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
+ ath10k_warn("failed to create vdev %i peer for AP: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
ret = ath10k_mac_set_kickout(arvif);
if (ret) {
- ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
+ ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2594,7 +2813,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
+ ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2604,7 +2823,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
+ ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2614,7 +2833,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
+ ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2622,21 +2841,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
if (ret) {
- ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
if (ret) {
- ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ar->monitor_present = true;
-
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -2668,6 +2884,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
spin_lock_bh(&ar->data_lock);
if (arvif->beacon) {
+ dma_unmap_single(arvif->ar->dev,
+ ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
}
@@ -2679,7 +2898,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
if (ret)
- ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
+ ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
@@ -2690,12 +2909,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
- ath10k_warn("WMI vdev %i delete failed: %d\n",
+ ath10k_warn("failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ar->monitor_present = false;
-
ath10k_peer_cleanup(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex);
@@ -2728,28 +2944,17 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
- /* Monitor must not be started if it wasn't created first.
- * Promiscuous mode may be started on a non-monitor interface - in
- * such case the monitor vdev is not created so starting the
- * monitor makes no sense. Since ath10k uses no special RX filters
- * (only BSS filter in STA mode) there's no need for any special
- * action here. */
- if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- !ar->monitor_enabled && ar->monitor_present) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
- ar->monitor_vdev_id);
-
- ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
- if (ret)
- ath10k_warn("Unable to start monitor mode\n");
- } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- ar->monitor_enabled && ar->monitor_present) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
- ar->monitor_vdev_id);
-
- ret = ath10k_monitor_stop(ar);
- if (ret)
- ath10k_warn("Unable to stop monitor mode\n");
+ if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) {
+ ar->promisc = true;
+ ret = ath10k_monitor_start(ar);
+ if (ret) {
+ ath10k_warn("failed to start monitor (promisc): %d\n",
+ ret);
+ ar->promisc = false;
+ }
+ } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) {
+ ar->promisc = false;
+ ath10k_monitor_stop(ar);
}
mutex_unlock(&ar->conf_mutex);
@@ -2780,7 +2985,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, arvif->beacon_interval);
if (ret)
- ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
+ ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2793,7 +2998,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
- ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
+ ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2808,7 +3013,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
- ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
+ ath10k_warn("failed to set dtim period for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2820,7 +3025,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
- if (changed & BSS_CHANGED_BSSID) {
+ /*
+ * Firmware manages AP self-peer internally so make sure to not create
+ * it in driver. Otherwise AP self-peer deletion may timeout later.
+ */
+ if (changed & BSS_CHANGED_BSSID &&
+ vif->type != NL80211_IFTYPE_AP) {
if (!is_zero_ether_addr(info->bssid)) {
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d create peer %pM\n",
@@ -2829,7 +3039,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
- ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
+ ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2868,20 +3078,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_control_beaconing(arvif, info);
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- u32 cts_prot;
- if (info->use_cts_prot)
- cts_prot = 1;
- else
- cts_prot = 0;
-
+ arvif->use_cts_prot = info->use_cts_prot;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
- arvif->vdev_id, cts_prot);
+ arvif->vdev_id, info->use_cts_prot);
- vdev_param = ar->wmi.vdev_param->enable_rtscts;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- cts_prot);
+ ret = ath10k_recalc_rtscts_prot(arvif);
if (ret)
- ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
+ ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
}
@@ -2900,7 +3103,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
- ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
+ ath10k_warn("failed to set erp slot for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2919,7 +3122,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
- ath10k_warn("Failed to set preamble for vdev %d: %i\n",
+ ath10k_warn("failed to set preamble for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2990,7 +3193,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
ret = ath10k_start_scan(ar, &arg);
if (ret) {
- ath10k_warn("could not start hw scan (%d)\n", ret);
+ ath10k_warn("failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.in_progress = false;
spin_unlock_bh(&ar->data_lock);
@@ -3010,8 +3213,7 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ret = ath10k_abort_scan(ar);
if (ret) {
- ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n",
- ret);
+ ath10k_warn("failed to abort scan: %d\n", ret);
ieee80211_scan_completed(hw, 1 /* aborted */);
}
mutex_unlock(&ar->conf_mutex);
@@ -3089,7 +3291,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!peer) {
if (cmd == SET_KEY) {
- ath10k_warn("cannot install key for non-existent peer %pM\n",
+ ath10k_warn("failed to install key for non-existent peer %pM\n",
peer_addr);
ret = -EOPNOTSUPP;
goto exit;
@@ -3112,7 +3314,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr);
if (ret) {
- ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
+ ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
}
@@ -3127,7 +3329,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
peer->keys[key->keyidx] = NULL;
else if (peer == NULL)
/* impossible unless FW goes crazy */
- ath10k_warn("peer %pM disappeared!\n", peer_addr);
+ ath10k_warn("Peer %pM disappeared!\n", peer_addr);
spin_unlock_bh(&ar->data_lock);
exit:
@@ -3195,6 +3397,16 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps, err);
}
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ sta->addr);
+
+ err = ath10k_station_assoc(ar, arvif, sta, true);
+ if (err)
+ ath10k_warn("failed to reassociate station: %pM\n",
+ sta->addr);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3236,7 +3448,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
max_num_peers = TARGET_NUM_PEERS;
if (ar->num_peers >= max_num_peers) {
- ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n",
+ ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
ar->num_peers, max_num_peers);
ret = -ENOBUFS;
goto exit;
@@ -3248,7 +3460,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+ ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
@@ -3260,7 +3472,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
+ ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -3275,9 +3487,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
sta->addr);
- ret = ath10k_station_assoc(ar, arvif, sta);
+ ret = ath10k_station_assoc(ar, arvif, sta, false);
if (ret)
- ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
+ ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
@@ -3291,7 +3503,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
- ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
+ ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
}
exit:
@@ -3339,7 +3551,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
WMI_STA_PS_PARAM_UAPSD,
arvif->u.sta.uapsd);
if (ret) {
- ath10k_warn("could not set uapsd params %d\n", ret);
+ ath10k_warn("failed to set uapsd params: %d\n", ret);
goto exit;
}
@@ -3352,7 +3564,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
WMI_STA_PS_PARAM_RX_WAKE_POLICY,
value);
if (ret)
- ath10k_warn("could not set rx wake param %d\n", ret);
+ ath10k_warn("failed to set rx wake param: %d\n", ret);
exit:
return ret;
@@ -3402,13 +3614,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
/* FIXME: FW accepts wmm params per hw, not per vif */
ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
if (ret) {
- ath10k_warn("could not set wmm params %d\n", ret);
+ ath10k_warn("failed to set wmm params: %d\n", ret);
goto exit;
}
ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
- ath10k_warn("could not set sta uapsd %d\n", ret);
+ ath10k_warn("failed to set sta uapsd: %d\n", ret);
exit:
mutex_unlock(&ar->conf_mutex);
@@ -3461,7 +3673,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ret = ath10k_start_scan(ar, &arg);
if (ret) {
- ath10k_warn("could not start roc scan (%d)\n", ret);
+ ath10k_warn("failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.in_progress = false;
spin_unlock_bh(&ar->data_lock);
@@ -3470,7 +3682,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
if (ret == 0) {
- ath10k_warn("could not switch to channel for roc scan\n");
+ ath10k_warn("failed to switch to channel for roc scan\n");
ath10k_abort_scan(ar);
ret = -ETIMEDOUT;
goto exit;
@@ -3511,7 +3723,7 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
- ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
break;
}
@@ -3534,7 +3746,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
- ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
break;
}
@@ -3544,7 +3756,8 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
bool skip;
@@ -3573,7 +3786,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}), ATH10K_FLUSH_TIMEOUT_HZ);
if (ret <= 0 || skip)
- ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
+ ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
skip, ar->state, ret);
skip:
@@ -3608,7 +3821,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
ret = ath10k_hif_suspend(ar);
if (ret) {
- ath10k_warn("could not suspend hif (%d)\n", ret);
+ ath10k_warn("failed to suspend hif: %d\n", ret);
goto resume;
}
@@ -3617,7 +3830,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
resume:
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret)
- ath10k_warn("could not resume target (%d)\n", ret);
+ ath10k_warn("failed to resume target: %d\n", ret);
ret = 1;
exit:
@@ -3634,14 +3847,14 @@ static int ath10k_resume(struct ieee80211_hw *hw)
ret = ath10k_hif_resume(ar);
if (ret) {
- ath10k_warn("could not resume hif (%d)\n", ret);
+ ath10k_warn("failed to resume hif: %d\n", ret);
ret = 1;
goto exit;
}
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret) {
- ath10k_warn("could not resume target (%d)\n", ret);
+ ath10k_warn("failed to resume target: %d\n", ret);
ret = 1;
goto exit;
}
@@ -3964,7 +4177,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
vdev_param, fixed_rate);
if (ret) {
- ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n",
+ ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
fixed_rate, ret);
ret = -EINVAL;
goto exit;
@@ -3977,7 +4190,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
vdev_param, fixed_nss);
if (ret) {
- ath10k_warn("Could not set fixed_nss param %d: %d\n",
+ ath10k_warn("failed to set fixed nss param %d: %d\n",
fixed_nss, ret);
ret = -EINVAL;
goto exit;
@@ -3990,7 +4203,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
force_sgi);
if (ret) {
- ath10k_warn("Could not set sgi param %d: %d\n",
+ ath10k_warn("failed to set sgi param %d: %d\n",
force_sgi, ret);
ret = -EINVAL;
goto exit;
@@ -4026,7 +4239,7 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
}
if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
- ath10k_warn("Could not force SGI usage for default rate settings\n");
+ ath10k_warn("failed to force SGI usage for default rate settings\n");
return -EINVAL;
}
@@ -4034,14 +4247,6 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
fixed_nss, force_sgi);
}
-static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_chan_def *chandef)
-{
- /* there's no need to do anything here. vif->csa_active is enough */
- return;
-}
-
static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -4072,8 +4277,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
bw = WMI_PEER_CHWIDTH_80MHZ;
break;
case IEEE80211_STA_RX_BW_160:
- ath10k_warn("mac sta rc update for %pM: invalid bw %d\n",
- sta->addr, sta->bandwidth);
+ ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
+ sta->bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
}
@@ -4099,8 +4304,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
smps = WMI_PEER_SMPS_DYNAMIC;
break;
case IEEE80211_SMPS_NUM_MODES:
- ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n",
- sta->addr, sta->smps_mode);
+ ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
+ sta->smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -4108,15 +4313,6 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
arsta->smps = smps;
}
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- /* FIXME: Not implemented. Probably the only way to do it would
- * be to re-assoc the peer. */
- changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
- ath10k_dbg(ATH10K_DBG_MAC,
- "mac sta rc update for %pM: changing supported rates not implemented\n",
- sta->addr);
- }
-
arsta->changed |= changed;
spin_unlock_bh(&ar->data_lock);
@@ -4154,10 +4350,11 @@ static const struct ieee80211_ops ath10k_ops = {
.set_frag_threshold = ath10k_set_frag_threshold,
.flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon,
+ .set_antenna = ath10k_set_antenna,
+ .get_antenna = ath10k_get_antenna,
.restart_complete = ath10k_restart_complete,
.get_survey = ath10k_get_survey,
.set_bitrate_mask = ath10k_set_bitrate_mask,
- .channel_switch_beacon = ath10k_channel_switch_beacon,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
#ifdef CONFIG_PM
@@ -4503,6 +4700,18 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ /* TODO: Have to deal with 2x2 chips if/when the come out. */
+ ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK;
+ ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK;
+ } else {
+ ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK;
+ ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK;
+ }
+
+ ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
+ ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
+
if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
ar->hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -4516,7 +4725,6 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_SPECTRUM_MGMT;
@@ -4570,19 +4778,19 @@ int ath10k_mac_register(struct ath10k *ar)
NL80211_DFS_UNSET);
if (!ar->dfs_detector)
- ath10k_warn("dfs pattern detector init failed\n");
+ ath10k_warn("failed to initialise DFS pattern detector\n");
}
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
- ath10k_err("Regulatory initialization failed: %i\n", ret);
+ ath10k_err("failed to initialise regulatory: %i\n", ret);
goto err_free;
}
ret = ieee80211_register_hw(ar->hw);
if (ret) {
- ath10k_err("ieee80211 registration failed: %d\n", ret);
+ ath10k_err("failed to register ieee80211: %d\n", ret);
goto err_free;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9d242d801d9d..d0004d59c97e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -39,15 +39,28 @@ enum ath10k_pci_irq_mode {
ATH10K_PCI_IRQ_MSI = 2,
};
-static unsigned int ath10k_target_ps;
+enum ath10k_pci_reset_mode {
+ ATH10K_PCI_RESET_AUTO = 0,
+ ATH10K_PCI_RESET_WARM_ONLY = 1,
+};
+
+static unsigned int ath10k_pci_target_ps;
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
-module_param(ath10k_target_ps, uint, 0644);
-MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
+MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
+MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
+
+/* how long wait to wait for target to initialise, in ms */
+#define ATH10K_PCI_TARGET_WAIT 3000
+#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
+
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -346,9 +359,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
- data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
- orig_nbytes,
- &ce_data_base);
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ orig_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
@@ -442,12 +456,12 @@ done:
__le32_to_cpu(((__le32 *)data_buf)[i]);
}
} else
- ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
- __func__, address);
+ ath10k_warn("failed to read diag value at 0x%x: %d\n",
+ address, ret);
if (data_buf)
- pci_free_consistent(ar_pci->pdev, orig_nbytes,
- data_buf, ce_data_base);
+ dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ ce_data_base);
return ret;
}
@@ -490,9 +504,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
- data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
- orig_nbytes,
- &ce_data_base);
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ orig_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
goto done;
@@ -588,13 +603,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
done:
if (data_buf) {
- pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
- ce_data_base);
+ dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ ce_data_base);
}
if (ret != 0)
- ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
- address);
+ ath10k_warn("failed to write diag value at 0x%x: %d\n",
+ address, ret);
return ret;
}
@@ -747,17 +762,21 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int sw_index = src_ring->sw_index;
- unsigned int write_index = src_ring->write_index;
- int err, i;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int err, i = 0;
spin_lock_bh(&ar_pci->ce_lock);
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) < n_items)) {
err = -ENOBUFS;
- goto unlock;
+ goto err;
}
for (i = 0; i < n_items - 1; i++) {
@@ -774,7 +793,7 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
items[i].transfer_id,
CE_SEND_FLAG_GATHER);
if (err)
- goto unlock;
+ goto err;
}
/* `i` is equal to `n_items -1` after for() */
@@ -792,10 +811,15 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
items[i].transfer_id,
0);
if (err)
- goto unlock;
+ goto err;
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __ath10k_ce_send_revert(ce_pipe);
- err = 0;
-unlock:
spin_unlock_bh(&ar_pci->ce_lock);
return err;
}
@@ -803,6 +827,9 @@ unlock:
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
+
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
@@ -854,6 +881,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
+
if (!force) {
int resources;
/*
@@ -880,7 +909,7 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
memcpy(&ar_pci->msg_callbacks_current, callbacks,
sizeof(ar_pci->msg_callbacks_current));
@@ -938,6 +967,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
{
int ret = 0;
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
+
/* polling for received messages not supported */
*dl_is_polled = 0;
@@ -997,6 +1028,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
{
int ul_is_polled, dl_is_polled;
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
+
(void)ath10k_pci_hif_map_service_to_pipe(ar,
ATH10K_HTC_SVC_ID_RSVD_CTRL,
ul_pipe,
@@ -1098,6 +1131,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret, ret_early;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
+
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
@@ -1233,18 +1268,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info;
- int pipe_num;
+ int i;
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
- if (pipe_info->ce_hdl) {
- ath10k_ce_deinit(pipe_info->ce_hdl);
- pipe_info->ce_hdl = NULL;
- pipe_info->buf_sz = 0;
- }
- }
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_deinit_pipe(ar, i);
}
static void ath10k_pci_hif_stop(struct ath10k *ar)
@@ -1252,7 +1279,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
+
+ if (WARN_ON(!ar_pci->started))
+ return;
ret = ath10k_ce_disable_interrupts(ar);
if (ret)
@@ -1697,30 +1727,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_alloc_ce(struct ath10k *ar)
+{
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err("failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void ath10k_pci_free_ce(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
static int ath10k_pci_ce_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
- int pipe_num;
+ int pipe_num, ret;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
+ pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
pipe_info->pipe_num = pipe_num;
pipe_info->hif_ce_state = ar;
attr = &host_ce_config_wlan[pipe_num];
- pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
- if (pipe_info->ce_hdl == NULL) {
- ath10k_err("failed to initialize CE for pipe: %d\n",
- pipe_num);
-
- /* It is safe to call it here. It checks if ce_hdl is
- * valid for each pipe */
- ath10k_pci_ce_deinit(ar);
- return -1;
+ ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
+ if (ret) {
+ ath10k_err("failed to initialize copy engine pipe %d: %d\n",
+ pipe_num, ret);
+ return ret;
}
if (pipe_num == CE_COUNT - 1) {
@@ -1741,16 +1790,15 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- u32 fw_indicator_address, fw_indicator;
+ u32 fw_indicator;
ath10k_pci_wake(ar);
- fw_indicator_address = ar_pci->fw_indicator_address;
- fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
+ fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
if (fw_indicator & FW_IND_EVENT_PENDING) {
/* ACK: clear Target-side pending event */
- ath10k_pci_write32(ar, fw_indicator_address,
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_indicator & ~FW_IND_EVENT_PENDING);
if (ar_pci->started) {
@@ -1767,13 +1815,32 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
+/* this function effectively clears target memory controller assert line */
+static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+}
+
static int ath10k_pci_warm_reset(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
u32 val;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
ret = ath10k_do_pci_wake(ar);
if (ret) {
@@ -1801,7 +1868,7 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
msleep(100);
/* clear fw indicator */
- ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
/* clear target LF timer interrupts */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
@@ -1826,6 +1893,8 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
SOC_RESET_CONTROL_ADDRESS);
msleep(10);
+ ath10k_pci_warm_reset_si0(ar);
+
/* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS);
@@ -1934,7 +2003,9 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
irq_mode = "legacy";
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
- ath10k_info("pci irq %s\n", irq_mode);
+ ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
+ irq_mode, ath10k_pci_irq_mode,
+ ath10k_pci_reset_mode);
return 0;
@@ -1952,23 +2023,52 @@ err:
return ret;
}
+static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
+{
+ int i, ret;
+
+ /*
+ * Sometime warm reset succeeds after retries.
+ *
+ * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
+ * at first try.
+ */
+ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
+ ret = __ath10k_pci_hif_power_up(ar, false);
+ if (ret == 0)
+ break;
+
+ ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
+ i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
+ }
+
+ return ret;
+}
+
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
+
/*
* Hardware CUS232 version 2 has some issues with cold reset and the
* preferred (and safer) way to perform a device reset is through a
* warm reset.
*
- * Warm reset doesn't always work though (notably after a firmware
- * crash) so fall back to cold reset if necessary.
+ * Warm reset doesn't always work though so fall back to cold reset may
+ * be necessary.
*/
- ret = __ath10k_pci_hif_power_up(ar, false);
+ ret = ath10k_pci_hif_power_up_warm(ar);
if (ret) {
- ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
+ ath10k_warn("failed to power up target using warm reset: %d\n",
ret);
+ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
+ return ret;
+
+ ath10k_warn("trying cold reset\n");
+
ret = __ath10k_pci_hif_power_up(ar, true);
if (ret) {
ath10k_err("failed to power up target using cold reset too (%d)\n",
@@ -1984,12 +2084,14 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
+
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
+ ath10k_pci_ce_deinit(ar);
ath10k_pci_warm_reset(ar);
- ath10k_pci_ce_deinit(ar);
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
}
@@ -2137,7 +2239,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
static void ath10k_pci_early_irq_tasklet(unsigned long data)
{
struct ath10k *ar = (struct ath10k *)data;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 fw_ind;
int ret;
@@ -2148,14 +2249,11 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
return;
}
- fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
+ fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
if (fw_ind & FW_IND_EVENT_PENDING) {
- ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_ind & ~FW_IND_EVENT_PENDING);
-
- /* Some structures are unavailable during early boot or at
- * driver teardown so just print that the device has crashed. */
- ath10k_warn("device crashed - no diagnostics available\n");
+ ath10k_pci_hif_dump_area(ar);
}
ath10k_pci_sleep(ar);
@@ -2385,33 +2483,69 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int wait_limit = 300; /* 3 sec */
+ unsigned long timeout;
int ret;
+ u32 val;
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
ret = ath10k_pci_wake(ar);
if (ret) {
- ath10k_err("failed to wake up target: %d\n", ret);
+ ath10k_err("failed to wake up target for init: %d\n", ret);
return ret;
}
- while (wait_limit-- &&
- !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
- FW_IND_INITIALIZED)) {
+ timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
+
+ do {
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
+
+ /* target should never return this */
+ if (val == 0xffffffff)
+ continue;
+
+ /* the device has crashed so don't bother trying anymore */
+ if (val & FW_IND_EVENT_PENDING)
+ break;
+
+ if (val & FW_IND_INITIALIZED)
+ break;
+
if (ar_pci->num_msi_intrs == 0)
/* Fix potential race by repeating CORE_BASE writes */
- iowrite32(PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL,
- ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
+ ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK |
+ PCIE_INTR_CE_MASK_ALL);
+
mdelay(10);
- }
+ } while (time_before(jiffies, timeout));
- if (wait_limit < 0) {
- ath10k_err("target stalled\n");
+ if (val == 0xffffffff) {
+ ath10k_err("failed to read device register, device is gone\n");
ret = -EIO;
goto out;
}
+ if (val & FW_IND_EVENT_PENDING) {
+ ath10k_warn("device has crashed during init\n");
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
+ val & ~FW_IND_EVENT_PENDING);
+ ath10k_pci_hif_dump_area(ar);
+ ret = -ECOMM;
+ goto out;
+ }
+
+ if (!(val & FW_IND_INITIALIZED)) {
+ ath10k_err("failed to receive initialized event from target: %08x\n",
+ val);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
+
out:
ath10k_pci_sleep(ar);
return ret;
@@ -2422,6 +2556,8 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
int i, ret;
u32 val;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
+
ret = ath10k_do_pci_wake(ar);
if (ret) {
ath10k_err("failed to wake up target: %d\n",
@@ -2453,6 +2589,9 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
}
ath10k_do_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
+
return 0;
}
@@ -2484,7 +2623,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k_pci *ar_pci;
u32 lcr_val, chip_id;
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
if (ar_pci == NULL)
@@ -2503,7 +2642,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
- if (ath10k_target_ps)
+ if (ath10k_pci_target_ps)
set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
ath10k_pci_dump_features(ar_pci);
@@ -2516,23 +2655,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
ar_pci->ar = ar;
- ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0);
pci_set_drvdata(pdev, ar);
- /*
- * Without any knowledge of the Host, the Target may have been reset or
- * power cycled and its Config Space may no longer reflect the PCI
- * address space that was assigned earlier by the PCI infrastructure.
- * Refresh it now.
- */
- ret = pci_assign_resource(pdev, BAR_NUM);
- if (ret) {
- ath10k_err("failed to assign PCI space: %d\n", ret);
- goto err_ar;
- }
-
ret = pci_enable_device(pdev);
if (ret) {
ath10k_err("failed to enable PCI device: %d\n", ret);
@@ -2594,16 +2720,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ath10k_do_pci_sleep(ar);
+ ret = ath10k_pci_alloc_ce(ar);
+ if (ret) {
+ ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
+ goto err_iomap;
+ }
+
ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("failed to register driver core: %d\n", ret);
- goto err_iomap;
+ goto err_free_ce;
}
return 0;
+err_free_ce:
+ ath10k_pci_free_ce(ar);
err_iomap:
pci_iounmap(pdev, mem);
err_master:
@@ -2626,7 +2760,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
struct ath10k *ar = pci_get_drvdata(pdev);
struct ath10k_pci *ar_pci;
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
if (!ar)
return;
@@ -2636,9 +2770,8 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
if (!ar_pci)
return;
- tasklet_kill(&ar_pci->msi_fw_err);
-
ath10k_core_unregister(ar);
+ ath10k_pci_free_ce(ar);
pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM);
@@ -2680,6 +2813,5 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index b43fdb4f7319..dfdebb4157aa 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -189,9 +189,6 @@ struct ath10k_pci {
struct ath10k_hif_cb msg_callbacks_current;
- /* Target address used to signal a pending firmware event */
- u32 fw_indicator_address;
-
/* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 0541dd939ce9..82669a77e553 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -100,189 +100,6 @@ exit:
wake_up(&htt->empty_tx_wq);
}
-static const u8 rx_legacy_rate_idx[] = {
- 3, /* 0x00 - 11Mbps */
- 2, /* 0x01 - 5.5Mbps */
- 1, /* 0x02 - 2Mbps */
- 0, /* 0x03 - 1Mbps */
- 3, /* 0x04 - 11Mbps */
- 2, /* 0x05 - 5.5Mbps */
- 1, /* 0x06 - 2Mbps */
- 0, /* 0x07 - 1Mbps */
- 10, /* 0x08 - 48Mbps */
- 8, /* 0x09 - 24Mbps */
- 6, /* 0x0A - 12Mbps */
- 4, /* 0x0B - 6Mbps */
- 11, /* 0x0C - 54Mbps */
- 9, /* 0x0D - 36Mbps */
- 7, /* 0x0E - 18Mbps */
- 5, /* 0x0F - 9Mbps */
-};
-
-static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
- enum ieee80211_band band,
- struct ieee80211_rx_status *status)
-{
- u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
- u8 info0 = info->rate.info0;
- u32 info1 = info->rate.info1;
- u32 info2 = info->rate.info2;
- u8 preamble = 0;
-
- /* Check if valid fields */
- if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
- return;
-
- preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
-
- switch (preamble) {
- case HTT_RX_LEGACY:
- cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
- rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
- rate_idx = 0;
-
- if (rate < 0x08 || rate > 0x0F)
- break;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- if (cck)
- rate &= ~BIT(3);
- rate_idx = rx_legacy_rate_idx[rate];
- break;
- case IEEE80211_BAND_5GHZ:
- rate_idx = rx_legacy_rate_idx[rate];
- /* We are using same rate table registering
- HW - ath10k_rates[]. In case of 5GHz skip
- CCK rates, so -4 here */
- rate_idx -= 4;
- break;
- default:
- break;
- }
-
- status->rate_idx = rate_idx;
- break;
- case HTT_RX_HT:
- case HTT_RX_HT_WITH_TXBF:
- /* HT-SIG - Table 20-11 in info1 and info2 */
- mcs = info1 & 0x1F;
- nss = mcs >> 3;
- bw = (info1 >> 7) & 1;
- sgi = (info2 >> 7) & 1;
-
- status->rate_idx = mcs;
- status->flag |= RX_FLAG_HT;
- if (sgi)
- status->flag |= RX_FLAG_SHORT_GI;
- if (bw)
- status->flag |= RX_FLAG_40MHZ;
- break;
- case HTT_RX_VHT:
- case HTT_RX_VHT_WITH_TXBF:
- /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
- TODO check this */
- mcs = (info2 >> 4) & 0x0F;
- nss = ((info1 >> 10) & 0x07) + 1;
- bw = info1 & 3;
- sgi = info2 & 1;
-
- status->rate_idx = mcs;
- status->vht_nss = nss;
-
- if (sgi)
- status->flag |= RX_FLAG_SHORT_GI;
-
- switch (bw) {
- /* 20MHZ */
- case 0:
- break;
- /* 40MHZ */
- case 1:
- status->flag |= RX_FLAG_40MHZ;
- break;
- /* 80MHZ */
- case 2:
- status->vht_flag |= RX_VHT_FLAG_80MHZ;
- }
-
- status->flag |= RX_FLAG_VHT;
- break;
- default:
- break;
- }
-}
-
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
-{
- struct ieee80211_rx_status *status;
- struct ieee80211_channel *ch;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
-
- status = IEEE80211_SKB_RXCB(info->skb);
- memset(status, 0, sizeof(*status));
-
- if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
- status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
- hdr->frame_control = __cpu_to_le16(
- __le16_to_cpu(hdr->frame_control) &
- ~IEEE80211_FCTL_PROTECTED);
- }
-
- if (info->mic_err)
- status->flag |= RX_FLAG_MMIC_ERROR;
-
- if (info->fcs_err)
- status->flag |= RX_FLAG_FAILED_FCS_CRC;
-
- if (info->amsdu_more)
- status->flag |= RX_FLAG_AMSDU_MORE;
-
- status->signal = info->signal;
-
- spin_lock_bh(&ar->data_lock);
- ch = ar->scan_channel;
- if (!ch)
- ch = ar->rx_channel;
- spin_unlock_bh(&ar->data_lock);
-
- if (!ch) {
- ath10k_warn("no channel configured; ignoring frame!\n");
- dev_kfree_skb_any(info->skb);
- return;
- }
-
- process_rx_rates(ar, info, ch->band, status);
- status->band = ch->band;
- status->freq = ch->center_freq;
-
- if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
- /* TSF available only in 32-bit */
- status->mactime = info->tsf & 0xffffffff;
- status->flag |= RX_FLAG_MACTIME_END;
- }
-
- ath10k_dbg(ATH10K_DBG_DATA,
- "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
- info->skb,
- info->skb->len,
- status->flag == 0 ? "legacy" : "",
- status->flag & RX_FLAG_HT ? "ht" : "",
- status->flag & RX_FLAG_VHT ? "vht" : "",
- status->flag & RX_FLAG_40MHZ ? "40" : "",
- status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
- status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
- status->rate_idx,
- status->vht_nss,
- status->freq,
- status->band, status->flag, info->fcs_err);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
- info->skb->data, info->skb->len);
-
- ieee80211_rx(ar->hw, info->skb);
-}
-
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr)
{
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index 356dc9c04c9e..aee3e20058f8 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -21,7 +21,6 @@
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cb1f7b5bcf4c..4b7782a529ac 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -639,6 +639,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
struct sk_buff *wmi_skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int len;
+ u32 buf_len = skb->len;
u16 fc;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -648,6 +649,15 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
return -EINVAL;
len = sizeof(cmd->hdr) + skb->len;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
len = round_up(len, 4);
wmi_skb = ath10k_wmi_alloc_skb(len);
@@ -659,7 +669,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
cmd->hdr.tx_rate = 0;
cmd->hdr.tx_power = 0;
- cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
+ cmd->hdr.buf_len = __cpu_to_le32(buf_len);
memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(cmd->buf, skb->data, skb->len);
@@ -957,10 +967,16 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
* frames with Protected Bit set. */
if (ieee80211_has_protected(hdr->frame_control) &&
!ieee80211_is_auth(hdr->frame_control)) {
- status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
- hdr->frame_control = __cpu_to_le16(fc &
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_action(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
~IEEE80211_FCTL_PROTECTED);
+ }
}
ath10k_dbg(ATH10K_DBG_MGMT,
@@ -1362,13 +1378,10 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct sk_buff *bcn;
int ret, vdev_id = 0;
- ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
-
ev = (struct wmi_host_swba_event *)skb->data;
map = __le32_to_cpu(ev->vdev_map);
- ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
- "-vdev map 0x%x\n",
+ ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
ev->vdev_map);
for (; map; map >>= 1, vdev_id++) {
@@ -1385,12 +1398,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
bcn_info = &ev->bcn_info[i];
ath10k_dbg(ATH10K_DBG_MGMT,
- "-bcn_info[%d]:\n"
- "--tim_len %d\n"
- "--tim_mcast %d\n"
- "--tim_changed %d\n"
- "--tim_num_ps_pending %d\n"
- "--tim_bitmap 0x%08x%08x%08x%08x\n",
+ "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
i,
__le32_to_cpu(bcn_info->tim_info.tim_len),
__le32_to_cpu(bcn_info->tim_info.tim_mcast),
@@ -1439,6 +1447,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
}
ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
@@ -1448,6 +1457,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ATH10K_SKB_CB(bcn)->paddr);
if (ret) {
ath10k_warn("failed to map beacon: %d\n", ret);
+ dev_kfree_skb_any(bcn);
goto skip;
}
@@ -2365,7 +2375,7 @@ void ath10k_wmi_detach(struct ath10k *ar)
ar->wmi.num_mem_chunks = 0;
}
-int ath10k_wmi_connect_htc_service(struct ath10k *ar)
+int ath10k_wmi_connect(struct ath10k *ar)
{
int status;
struct ath10k_htc_svc_conn_req conn_req;
@@ -2393,8 +2403,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
return 0;
}
-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
- u16 rd5g, u16 ctl2g, u16 ctl5g)
+static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+ u16 rd2g, u16 rd5g, u16 ctl2g,
+ u16 ctl5g)
{
struct wmi_pdev_set_regdomain_cmd *cmd;
struct sk_buff *skb;
@@ -2418,6 +2429,46 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
+static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+ u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_pdev_set_regdomain_cmd_10x *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+ cmd->dfs_domain = __cpu_to_le32(dfs_reg);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+ ctl2g, ctl5g, dfs_reg);
+ else
+ return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+ ctl2g, ctl5g);
+}
+
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *arg)
{
@@ -3456,8 +3507,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi peer assoc vdev %d addr %pM\n",
- arg->vdev_id, arg->addr);
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index f51d5ca0141f..e93df2c10413 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -198,16 +198,6 @@ struct wmi_mac_addr {
} __packed;
} __packed;
-/* macro to convert MAC address from WMI word format to char array */
-#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
- (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \
- (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
- (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
- (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
- (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \
- (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
- } while (0)
-
struct wmi_cmd_map {
u32 init_cmdid;
u32 start_scan_cmdid;
@@ -2185,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {
__le32 conformance_test_limit_5G;
} __packed;
+enum wmi_dfs_region {
+ /* Uninitialized dfs domain */
+ WMI_UNINIT_DFS_DOMAIN = 0,
+
+ /* FCC3 dfs domain */
+ WMI_FCC_DFS_DOMAIN = 1,
+
+ /* ETSI dfs domain */
+ WMI_ETSI_DFS_DOMAIN = 2,
+
+ /*Japan dfs domain */
+ WMI_MKK4_DFS_DOMAIN = 3,
+};
+
+struct wmi_pdev_set_regdomain_cmd_10x {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+
+ /* dfs domain from wmi_dfs_region */
+ __le32 dfs_domain;
+} __packed;
+
/* Command to set/unset chip in quiet mode */
struct wmi_pdev_set_quiet_cmd {
/* period in TUs */
@@ -2210,6 +2225,19 @@ enum ath10k_protmode {
ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
};
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_ACROSS_SW_RETRIES
+};
+
+#define WMI_RTSCTS_ENABLED 1
+#define WMI_RTSCTS_SET_MASK 0x0f
+#define WMI_RTSCTS_SET_LSB 0
+
+#define WMI_RTSCTS_PROFILE_MASK 0xf0
+#define WMI_RTSCTS_PROFILE_LSB 4
+
enum wmi_beacon_gen_mode {
WMI_BEACON_STAGGERED_MODE = 0,
WMI_BEACON_BURST_MODE = 1
@@ -2295,9 +2323,9 @@ struct wmi_pdev_param_map {
#define WMI_PDEV_PARAM_UNSUPPORTED 0
enum wmi_pdev_param {
- /* TX chian mask */
+ /* TX chain mask */
WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
- /* RX chian mask */
+ /* RX chain mask */
WMI_PDEV_PARAM_RX_CHAIN_MASK,
/* TX power limit for 2G Radio */
WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
@@ -2682,6 +2710,9 @@ struct wal_dbg_tx_stats {
/* wal pdev resets */
__le32 pdev_resets;
+ /* frames dropped due to non-availability of stateless TIDs */
+ __le32 stateless_tid_alloc_failure;
+
__le32 phy_underrun;
/* MPDU is more than txop limit */
@@ -2738,13 +2769,21 @@ enum wmi_stats_id {
WMI_REQUEST_AP_STAT = 0x02
};
+struct wlan_inst_rssi_args {
+ __le16 cfg_retry_count;
+ __le16 retry_count;
+};
+
struct wmi_request_stats_cmd {
__le32 stats_id;
- /*
- * Space to add parameters like
- * peer mac addr
- */
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* Instantaneous RSSI arguments */
+ struct wlan_inst_rssi_args inst_rssi_args;
} __packed;
/* Suspend option */
@@ -2795,7 +2834,18 @@ struct wmi_stats_event {
* PDEV statistics
* TODO: add all PDEV stats here
*/
-struct wmi_pdev_stats {
+struct wmi_pdev_stats_old {
+ __le32 chan_nf; /* Channel noise floor */
+ __le32 tx_frame_count; /* TX frame count */
+ __le32 rx_frame_count; /* RX frame count */
+ __le32 rx_clear_count; /* rx clear count */
+ __le32 cycle_count; /* cycle count */
+ __le32 phy_err_count; /* Phy error count */
+ __le32 chan_tx_pwr; /* channel tx power */
+ struct wal_dbg_stats wal; /* WAL dbg stats */
+} __packed;
+
+struct wmi_pdev_stats_10x {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
@@ -2804,6 +2854,12 @@ struct wmi_pdev_stats {
__le32 phy_err_count; /* Phy error count */
__le32 chan_tx_pwr; /* channel tx power */
struct wal_dbg_stats wal; /* WAL dbg stats */
+ __le32 ack_rx_bad;
+ __le32 rts_bad;
+ __le32 rts_good;
+ __le32 fcs_bad;
+ __le32 no_beacons;
+ __le32 mib_int_count;
} __packed;
/*
@@ -2818,10 +2874,17 @@ struct wmi_vdev_stats {
* peer statistics.
* TODO: add more stats
*/
-struct wmi_peer_stats {
+struct wmi_peer_stats_old {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_rssi;
+ __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_peer_stats_10x {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
+ __le32 peer_rx_rate;
} __packed;
struct wmi_vdev_create_cmd {
@@ -4196,13 +4259,14 @@ void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
-int ath10k_wmi_connect_htc_service(struct ath10k *ar);
+int ath10k_wmi_connect(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *);
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
- u16 rd5g, u16 ctl2g, u16 ctl5g);
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg);
int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
int ath10k_wmi_cmd_init(struct ath10k *ar);
int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a2973b7acf2..0fce1c76638e 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3709,8 +3709,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
AR5K_TPC);
} else {
- ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX |
- AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
+ ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
+ AR5K_PHY_TXPOWER_RATE_MAX);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index e39e5860a2e9..9c125ff083f7 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -1,11 +1,19 @@
config ATH6KL
tristate "Atheros mobile chipsets support"
+ depends on CFG80211
+ ---help---
+ This module adds core support for wireless adapters based on
+ Atheros AR6003 and AR6004 chipsets. You still need separate
+ bus drivers for USB and SDIO to be able to use real devices.
+
+ If you choose to build it as a module, it will be called
+ ath6kl_core. Please note that AR6002 and AR6001 are not
+ supported by this driver.
config ATH6KL_SDIO
tristate "Atheros ath6kl SDIO support"
depends on ATH6KL
depends on MMC
- depends on CFG80211
---help---
This module adds support for wireless adapters based on
Atheros AR6003 and AR6004 chipsets running over SDIO. If you
@@ -17,25 +25,31 @@ config ATH6KL_USB
tristate "Atheros ath6kl USB support"
depends on ATH6KL
depends on USB
- depends on CFG80211
---help---
This module adds support for wireless adapters based on
- Atheros AR6004 chipset running over USB. This is still under
- implementation and it isn't functional. If you choose to
- build it as a module, it will be called ath6kl_usb.
+ Atheros AR6004 chipset and chipsets based on it running over
+ USB. If you choose to build it as a module, it will be
+ called ath6kl_usb.
config ATH6KL_DEBUG
bool "Atheros ath6kl debugging"
depends on ATH6KL
---help---
- Enables debug support
+ Enables ath6kl debug support, including debug messages
+ enabled with debug_mask module parameter and debugfs
+ interface.
+
+ If unsure, say Y to make it easier to debug problems.
config ATH6KL_TRACING
bool "Atheros ath6kl tracing support"
depends on ATH6KL
depends on EVENT_TRACING
---help---
- Select this to ath6kl use tracing infrastructure.
+ Select this to ath6kl use tracing infrastructure which, for
+ example, can be enabled with help of trace-cmd. All debug
+ messages and commands are delivered to using individually
+ enablable trace points.
If unsure, say Y to make it easier to debug problems.
@@ -47,3 +61,5 @@ config ATH6KL_REGDOMAIN
Enabling this makes it possible to change the regdomain in
the firmware. This can be only enabled if regulatory requirements
are taken into account.
+
+ If unsure, say N.
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index c2c6f4604958..0e26f4a34fda 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -724,8 +724,9 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"added bss %pM to cfg80211\n", bssid);
kfree(ie);
- } else
+ } else {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
+ }
return bss;
}
@@ -970,7 +971,6 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
ssid_list[i].flag,
ssid_list[i].ssid.ssid_len,
ssid_list[i].ssid.ssid);
-
}
/* Make sure no old entries are left behind */
@@ -1759,7 +1759,7 @@ static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
}
static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
@@ -1897,7 +1897,6 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
/* Configure the patterns that we received from the user. */
for (i = 0; i < wow->n_patterns; i++) {
-
/*
* Convert given nl80211 specific mask value to equivalent
* driver specific mask value and send it to the chip along
@@ -2850,8 +2849,9 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (p.prwise_crypto_type == 0) {
p.prwise_crypto_type = NONE_CRYPT;
ath6kl_set_cipher(vif, 0, true);
- } else if (info->crypto.n_ciphers_pairwise == 1)
+ } else if (info->crypto.n_ciphers_pairwise == 1) {
ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
+ }
switch (info->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -2897,7 +2897,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
}
if (info->inactivity_timeout) {
-
inactivity_timeout = info->inactivity_timeout;
if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
@@ -2975,7 +2974,7 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac)
+ const u8 *mac)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
@@ -2986,7 +2985,8 @@ static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
}
static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_parameters *params)
+ const u8 *mac,
+ struct station_parameters *params)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 4b46adbe8c92..b0b652042760 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -45,9 +45,9 @@ module_param(testmode, uint, 0644);
module_param(recovery_enable, uint, 0644);
module_param(heart_beat_poll, uint, 0644);
MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
-MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic" \
- "polling. This also specifies the polling interval in" \
- "msecs. Set reocvery_enable for this to be effective");
+MODULE_PARM_DESC(heart_beat_poll,
+ "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
+
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index dbfd17d0a5fa..55c4064dd506 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -172,7 +172,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_enable_reg)
{
-
ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
if (irq_proc_reg != NULL) {
@@ -219,7 +218,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
"GMBOX lookahead alias 1: 0x%x\n",
irq_proc_reg->rx_gmbox_lkahd_alias[1]);
}
-
}
if (irq_enable_reg != NULL) {
@@ -1396,7 +1394,6 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
-
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[200];
@@ -1575,7 +1572,6 @@ static ssize_t ath6kl_delete_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
-
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[100];
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index ca9ba005f287..e194c10d9f00 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -97,8 +97,8 @@ static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_en_reg)
{
-
}
+
static inline void dump_cred_dist_stats(struct htc_target *target)
{
}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index fea7709b5dda..18c070850a09 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -37,7 +37,6 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
buf = req->virt_dma_buf;
for (i = 0; i < req->scat_entries; i++) {
-
if (from_dma)
memcpy(req->scat_list[i].buf, buf,
req->scat_list[i].len);
@@ -116,7 +115,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
le32_to_cpu(regdump_val[i + 2]),
le32_to_cpu(regdump_val[i + 3]));
}
-
}
static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
@@ -701,5 +699,4 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
fail_setup:
return status;
-
}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 61f6b21fb0ae..dc6bd8cd9b83 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -197,9 +197,9 @@ struct hif_scatter_req {
/* bounce buffer for upper layers to copy to/from */
u8 *virt_dma_buf;
- struct hif_scatter_item scat_list[1];
-
u32 scat_q_depth;
+
+ struct hif_scatter_item scat_list[0];
};
struct ath6kl_irq_proc_registers {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 65e5b719093d..e481f14b9878 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -112,9 +112,9 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
if (cur_ep_dist->endpoint == ENDPOINT_0)
continue;
- if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
- else {
+ } else {
/*
* For the remaining data endpoints, we assume that
* each cred_per_msg are the same. We use a simple
@@ -129,7 +129,6 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
count = (count * 3) >> 2;
count = max(count, cur_ep_dist->cred_per_msg);
cur_ep_dist->cred_norm = count;
-
}
ath6kl_dbg(ATH6KL_DBG_CREDIT,
@@ -549,7 +548,6 @@ static int htc_check_credits(struct htc_target *target,
enum htc_endpoint_id eid, unsigned int len,
int *req_cred)
{
-
*req_cred = (len > target->tgt_cred_sz) ?
DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
@@ -608,7 +606,6 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
unsigned int len;
while (true) {
-
flags = 0;
if (list_empty(&endpoint->txq))
@@ -889,7 +886,6 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) {
-
if (list_empty(&endpoint->txq))
break;
@@ -1190,7 +1186,6 @@ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
list_add_tail(&packet->list, &container);
htc_tx_complete(endpoint, &container);
}
-
}
static void ath6kl_htc_flush_txep_all(struct htc_target *target)
@@ -1394,7 +1389,6 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
ep_cb = ep->ep_cb;
for (j = 0; j < n_msg; j++) {
-
/*
* Reset flag, any packets allocated using the
* rx_alloc() API cannot be recycled on
@@ -1424,9 +1418,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
}
}
- if (list_empty(&ep->rx_bufq))
+ if (list_empty(&ep->rx_bufq)) {
packet = NULL;
- else {
+ } else {
packet = list_first_entry(&ep->rx_bufq,
struct htc_packet, list);
list_del(&packet->list);
@@ -1487,7 +1481,6 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
spin_lock_bh(&target->rx_lock);
for (i = 0; i < msg; i++) {
-
htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
if (htc_hdr->eid >= ENDPOINT_MAX) {
@@ -1708,7 +1701,6 @@ static int htc_parse_trailer(struct htc_target *target,
lk_ahd = (struct htc_lookahead_report *) record_buf;
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
next_lk_ahds) {
-
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
lk_ahd->pre_valid, lk_ahd->post_valid);
@@ -1755,7 +1747,6 @@ static int htc_parse_trailer(struct htc_target *target,
}
return 0;
-
}
static int htc_proc_trailer(struct htc_target *target,
@@ -1776,7 +1767,6 @@ static int htc_proc_trailer(struct htc_target *target,
status = 0;
while (len > 0) {
-
if (len < sizeof(struct htc_record_hdr)) {
status = -ENOMEM;
break;
@@ -2098,7 +2088,6 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
}
if (!fetched_pkts) {
-
packet = list_first_entry(rx_pktq, struct htc_packet,
list);
@@ -2173,7 +2162,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
look_aheads[0] = msg_look_ahead;
while (true) {
-
/*
* First lookahead sets the expected endpoint IDs for all
* packets in a bundle.
@@ -2825,8 +2813,9 @@ static int ath6kl_htc_reset(struct htc_target *target)
packet->buf = packet->buf_start;
packet->endpoint = ENDPOINT_0;
list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
- } else
+ } else {
list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+ }
}
return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 67aa924ed8b3..756fe52a12c8 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -137,7 +137,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
credits_required = 0;
} else {
-
if (ep->cred_dist.credits < credits_required)
break;
@@ -169,7 +168,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
/* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue);
}
-
}
static void get_htc_packet(struct htc_target *target,
@@ -279,7 +277,6 @@ static int htc_issue_packets(struct htc_target *target,
list_add(&packet->list, pkt_queue);
break;
}
-
}
if (status != 0) {
@@ -385,7 +382,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
*/
list_for_each_entry_safe(packet, tmp_pkt,
txq, list) {
-
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Indicat overflowed TX pkts: %p\n",
__func__, packet);
@@ -403,7 +399,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
list_move_tail(&packet->list,
&send_queue);
}
-
}
if (list_empty(&send_queue)) {
@@ -454,7 +449,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
* enough transmit resources.
*/
while (true) {
-
if (get_queue_depth(&ep->txq) == 0)
break;
@@ -495,8 +489,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
}
spin_lock_bh(&target->tx_lock);
-
}
+
/* done with this endpoint, we can clear the count */
ep->tx_proc_cnt = 0;
spin_unlock_bh(&target->tx_lock);
@@ -1106,7 +1100,6 @@ free_skb:
dev_kfree_skb(skb);
return status;
-
}
static void htc_flush_rx_queue(struct htc_target *target,
@@ -1258,7 +1251,6 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
tx_alloc = 0;
} else {
-
tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
if (tx_alloc == 0) {
status = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 4f316bdcbab5..d5ef211f261c 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1192,7 +1192,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
if (board_ext_address &&
ar->fw_board_len == (board_data_size + board_ext_data_size)) {
-
/* write extended board data */
ath6kl_dbg(ATH6KL_DBG_BOOT,
"writing extended board data to 0x%x (%d B)\n",
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 5839fc23bdc7..d56554674da4 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -571,7 +571,6 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
{
-
struct ath6kl *ar = vif->ar;
vif->profile.ch = cpu_to_le16(channel);
@@ -600,7 +599,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
{
-
struct ath6kl_vif *vif;
int res = 0;
@@ -692,9 +690,9 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
cfg80211_michael_mic_failure(vif->ndev, sta->mac,
NL80211_KEYTYPE_PAIRWISE, keyid,
tsc, GFP_KERNEL);
- } else
+ } else {
ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
-
+ }
}
static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
@@ -1093,8 +1091,9 @@ static int ath6kl_open(struct net_device *dev)
if (test_bit(CONNECTED, &vif->flags)) {
netif_carrier_on(dev);
netif_wake_queue(dev);
- } else
+ } else {
netif_carrier_off(dev);
+ }
return 0;
}
@@ -1146,7 +1145,6 @@ static int ath6kl_set_features(struct net_device *dev,
dev->features = features | NETIF_F_RXCSUM;
return err;
}
-
}
return err;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 7126bdd4236c..339d89f14d32 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
int i, scat_req_sz, scat_list_sz, size;
u8 *virt_buf;
- scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
+ scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
scat_req_sz = sizeof(*s_req) + scat_list_sz;
if (!virt_scat)
@@ -425,8 +425,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
memcpy(tbuf, buf, len);
bounced = true;
- } else
+ } else {
tbuf = buf;
+ }
ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
if ((request & HIF_READ) && bounced)
@@ -441,9 +442,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
struct bus_request *req)
{
- if (req->scat_req)
+ if (req->scat_req) {
ath6kl_sdio_scat_rw(ar_sdio, req);
- else {
+ } else {
void *context;
int status;
@@ -656,7 +657,6 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
list_add_tail(&s_req->list, &ar_sdio->scat_req);
spin_unlock_bh(&ar_sdio->scat_lock);
-
}
/* scatter gather read write request */
@@ -674,9 +674,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
"hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
- if (request & HIF_SYNCHRONOUS)
+ if (request & HIF_SYNCHRONOUS) {
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
- else {
+ } else {
spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
spin_unlock_bh(&ar_sdio->wr_async_lock);
@@ -856,7 +856,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
(!ar->suspend_mode && wow)) {
-
ret = ath6kl_set_sdio_pm_caps(ar);
if (ret)
goto cut_pwr;
@@ -878,7 +877,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
!ar->suspend_mode || try_deepsleep) {
-
flags = sdio_get_host_pm_caps(func);
if (!(flags & MMC_PM_KEEP_POWER))
goto cut_pwr;
@@ -1061,7 +1059,6 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
-
/*
* Hit the credit counter with a 4-byte access, the first byte
* read will hit the counter and cause a decrement, while the
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index a580a629a0da..d5eeeae7711b 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -289,7 +289,7 @@ struct host_interest {
u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
/* test applications flags */
- u32 hi_test_apps_related ; /* 0xdc */
+ u32 hi_test_apps_related; /* 0xdc */
/* location of test script */
u32 hi_ota_testscript; /* 0xe0 */
/* location of CAL data */
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index ebb24045a8ae..40432fe7a5d2 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -125,8 +125,9 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
*flags |= WMI_DATA_HDR_FLAGS_UAPSD;
spin_unlock_bh(&conn->psq_lock);
return false;
- } else if (!conn->apsd_info)
+ } else if (!conn->apsd_info) {
return false;
+ }
if (test_bit(WMM_ENABLED, &vif->flags)) {
ether_type = be16_to_cpu(datap->h_proto);
@@ -316,8 +317,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
cookie = NULL;
ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
skb, skb->len);
- } else
+ } else {
cookie = ath6kl_alloc_cookie(ar);
+ }
if (cookie == NULL) {
spin_unlock_bh(&ar->lock);
@@ -359,7 +361,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
struct ath6kl_vif *vif = netdev_priv(dev);
u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG;
- u8 ac = 99 ; /* initialize to unmapped ac */
+ u8 ac = 99; /* initialize to unmapped ac */
bool chk_adhoc_ps_mapping = false;
int ret;
struct wmi_tx_meta_v2 meta_v2;
@@ -449,8 +451,9 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
if (ret)
goto fail_tx;
}
- } else
+ } else {
goto fail_tx;
+ }
spin_lock_bh(&ar->lock);
@@ -702,7 +705,6 @@ void ath6kl_tx_complete(struct htc_target *target,
/* reap completed packets */
while (!list_empty(packet_queue)) {
-
packet = list_first_entry(packet_queue, struct htc_packet,
list);
list_del(&packet->list);
@@ -1089,8 +1091,9 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
else
skb_queue_tail(&rxtid->q, node->skb);
node->skb = NULL;
- } else
+ } else {
stats->num_hole++;
+ }
rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@@ -1211,7 +1214,7 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
return is_queued;
spin_lock_bh(&rxtid->lock);
- for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
+ for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
if (rxtid->hold_q[idx].skb) {
/*
* There is a frame in the queue and no
@@ -1265,7 +1268,6 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
is_apsdq_empty_at_start = is_apsdq_empty;
while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
-
spin_lock_bh(&conn->psq_lock);
skb = skb_dequeue(&conn->apsdq);
is_apsdq_empty = skb_queue_empty(&conn->apsdq);
@@ -1606,16 +1608,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
if (!conn)
return;
aggr_conn = conn->aggr_conn;
- } else
+ } else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
is_amsdu, skb)) {
/* aggregation code will handle the skb */
return;
}
- } else if (!is_broadcast_ether_addr(datap->h_dest))
+ } else if (!is_broadcast_ether_addr(datap->h_dest)) {
vif->net_stats.multicast++;
+ }
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
@@ -1710,8 +1714,9 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta)
aggr_conn = sta->aggr_conn;
- } else
+ } else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
if (!aggr_conn)
return;
@@ -1766,7 +1771,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
skb_queue_head_init(&rxtid->q);
spin_lock_init(&rxtid->lock);
}
-
}
struct aggr_info *aggr_init(struct ath6kl_vif *vif)
@@ -1806,8 +1810,9 @@ void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta)
aggr_conn = sta->aggr_conn;
- } else
+ } else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
if (!aggr_conn)
return;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 56c3fd5cef65..3afc5a463d06 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -236,7 +236,6 @@ static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
break;
kfree(urb_context);
}
-
}
static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
@@ -245,7 +244,6 @@ static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
-
}
static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 8b4ce28e3ce8..4d7f9e4712e9 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -289,8 +289,9 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
sizeof(struct ath6kl_llc_snap_hdr),
layer2_priority);
- } else
+ } else {
usr_pri = layer2_priority & 0x7;
+ }
/*
* Queue the EAPOL frames in the same WMM_AC_VO queue
@@ -359,8 +360,9 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
sizeof(u32));
skb_pull(skb, hdr_size);
- } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
+ } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
+ }
datap = skb->data;
llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
@@ -936,7 +938,6 @@ ath6kl_regd_find_country_by_rd(u16 regdmn)
static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
{
-
struct ath6kl_wmi_regdomain *ev;
struct country_code_to_enum_rd *country = NULL;
struct reg_dmn_pair_mapping *regpair = NULL;
@@ -946,10 +947,9 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
ev = (struct ath6kl_wmi_regdomain *) datap;
reg_code = le32_to_cpu(ev->reg_code);
- if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG)
+ if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
country = ath6kl_regd_find_country((u16) reg_code);
- else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
-
+ } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
regpair = ath6kl_get_regpair((u16) reg_code);
country = ath6kl_regd_find_country_by_rd((u16) reg_code);
if (regpair)
@@ -1499,7 +1499,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
-
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -1530,7 +1529,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
* for delete qos stream from AP
*/
else if (reply->cac_indication == CAC_INDICATION_DELETE) {
-
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -2322,7 +2320,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
return ret;
}
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk)
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk)
{
struct sk_buff *skb;
struct wmi_add_krk_cmd *cmd;
@@ -2479,7 +2477,6 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
goto free_data_skb;
for (index = 0; index < num_pri_streams; index++) {
-
if (WARN_ON(!data_sync_bufs[index].skb))
goto free_data_skb;
@@ -2704,7 +2701,6 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
for (i = 0; i < WMM_NUM_AC; i++) {
if (stream_exist & (1 << i)) {
-
/*
* FIXME: Is this lock & unlock inside
* for loop correct? may need rework.
@@ -2870,8 +2866,9 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
cmd->asleep = cpu_to_le32(1);
- } else
+ } else {
cmd->awake = cpu_to_le32(1);
+ }
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
WMI_SET_HOST_SLEEP_MODE_CMDID,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 5c702ae4d9f8..bb23fc00111d 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -898,7 +898,6 @@ struct wmi_start_scan_cmd {
* flags here
*/
enum wmi_scan_ctrl_flags_bits {
-
/* set if can scan in the connect cmd */
CONNECT_SCAN_CTRL_FLAGS = 0x01,
@@ -2617,7 +2616,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
u8 *key_material,
u8 key_op_ctrl, u8 *mac_addr,
enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk);
int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
const u8 *pmkid, bool set);
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 8e1c7b0fe76c..8fcd586d1c39 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -53,7 +53,8 @@ obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o \
common-init.o \
- common-beacon.o
+ common-beacon.o \
+ common-debug.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 0a6163e9248c..c38399bc9aa9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -410,7 +410,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f76139bbb74f..2c42ff05efa3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -592,7 +592,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 0ac8be96097f..2154efcd3900 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -231,7 +231,7 @@ static const u32 ar9331_1p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index a01f0edb6518..b995ffe88b33 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -318,7 +318,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
@@ -348,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x210d0401},
- {0x0000a3a0, 0xab9a7144},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa},
{0x0000a3ac, 0x3c466478},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 3c9113d9b1bc..8e5c3b9786e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -257,9 +257,9 @@ static const u32 qca953x_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x1f020503},
- {0x0000a39c, 0x29180c03},
- {0x0000a3a0, 0x9a8b6844},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x000000ff},
{0x0000a3a8, 0x6a6a6a6a},
{0x0000a3ac, 0x6a6a6a6a},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index e6aec2c0207f..a5ca65240af3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -90,7 +90,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3ba03dde4215..2ca8f7e06174 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -23,8 +23,8 @@
#include <linux/leds.h>
#include <linux/completion.h>
-#include "debug.h"
#include "common.h"
+#include "debug.h"
#include "mci.h"
#include "dfs.h"
#include "spectral.h"
@@ -114,6 +114,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_TXFIFO_DEPTH 8
#define ATH_TX_ERROR 0x01
+/* Stop tx traffic 1ms before the GO goes away */
+#define ATH_P2P_PS_STOP_TIME 1000
+
#define IEEE80211_SEQ_SEQ_SHIFT 4
#define IEEE80211_SEQ_MAX 4096
#define IEEE80211_WEP_IVLEN 3
@@ -271,6 +274,7 @@ struct ath_node {
#ifdef CONFIG_ATH9K_STATION_STATISTICS
struct ath_rx_rate_stats rx_rate_stats;
#endif
+ u8 key_idx[4];
};
struct ath_tx_control {
@@ -366,11 +370,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
/********/
struct ath_vif {
+ struct ieee80211_vif *vif;
struct ath_node mcast_node;
int av_bslot;
bool primary_sta_vif;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
struct ath_buf *av_bcbuf;
+
+ /* P2P Client */
+ struct ieee80211_noa_data noa;
};
struct ath9k_vif_iter_data {
@@ -463,6 +471,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
void ath_update_survey_nf(struct ath_softc *sc, int channel);
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
void ath_ps_full_sleep(unsigned long data);
+void ath9k_p2p_ps_timer(void *priv);
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif);
/**********/
/* BTCOEX */
@@ -713,6 +723,9 @@ struct ath_softc {
struct completion paprd_complete;
wait_queue_head_t tx_wait;
+ struct ath_gen_timer *p2p_ps_timer;
+ struct ath_vif *p2p_ps_vif;
+
unsigned long driver_data;
u8 gtt_cnt;
@@ -757,6 +770,7 @@ struct ath_softc {
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
struct dfs_pattern_detector *dfs_detector;
+ u64 dfs_prev_pulse_ts;
u32 wow_enabled;
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index bd9e634879e6..e387f0b2954a 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -537,8 +537,6 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->dtim_count = 1;
cur_conf->ibss_creator = bss_conf->ibss_creator;
- cur_conf->bmiss_timeout =
- ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
/*
* It looks like mac80211 may end up using beacon interval of zero in
@@ -549,6 +547,9 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
if (cur_conf->beacon_interval == 0)
cur_conf->beacon_interval = 100;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
/*
* We don't parse dtim period from mac80211 during the driver
* initialization as it breaks association with hidden-ssid
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c
new file mode 100644
index 000000000000..3b289f933405
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "common.h"
+
+static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_hw *ah = file->private_data;
+ u32 len = 0, size = 6000;
+ char *buf;
+ size_t retval;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_modal_eeprom = {
+ .read = read_file_modal_eeprom,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
+void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah)
+{
+ debugfs_create_file("modal_eeprom", S_IRUSR, debugfs_phy, ah,
+ &fops_modal_eeprom);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_modal_eeprom);
+
+static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_hw *ah = file->private_data;
+ u32 len = 0, size = 1500;
+ ssize_t retval = 0;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_base_eeprom = {
+ .read = read_file_base_eeprom,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah)
+{
+ debugfs_create_file("base_eeprom", S_IRUSR, debugfs_phy, ah,
+ &fops_base_eeprom);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_base_eeprom);
+
+void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+ struct ath_rx_status *rs)
+{
+#define RX_PHY_ERR_INC(c) rxstats->phy_err_stats[c]++
+#define RX_CMN_STAT_INC(c) (rxstats->c++)
+
+ RX_CMN_STAT_INC(rx_pkts_all);
+ rxstats->rx_bytes_all += rs->rs_datalen;
+
+ if (rs->rs_status & ATH9K_RXERR_CRC)
+ RX_CMN_STAT_INC(crc_err);
+ if (rs->rs_status & ATH9K_RXERR_DECRYPT)
+ RX_CMN_STAT_INC(decrypt_crc_err);
+ if (rs->rs_status & ATH9K_RXERR_MIC)
+ RX_CMN_STAT_INC(mic_err);
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ RX_CMN_STAT_INC(pre_delim_crc_err);
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
+ RX_CMN_STAT_INC(post_delim_crc_err);
+ if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
+ RX_CMN_STAT_INC(decrypt_busy_err);
+
+ if (rs->rs_status & ATH9K_RXERR_PHY) {
+ RX_CMN_STAT_INC(phy_err);
+ if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
+ RX_PHY_ERR_INC(rs->rs_phyerr);
+ }
+
+#undef RX_CMN_STAT_INC
+#undef RX_PHY_ERR_INC
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_stat_rx);
+
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define RXS_ERR(s, e) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%18s : %10u\n", s, \
+ rxstats->e); \
+ } while (0)
+
+ struct ath_rx_stats *rxstats = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1600;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ RXS_ERR("PKTS-ALL", rx_pkts_all);
+ RXS_ERR("BYTES-ALL", rx_bytes_all);
+ RXS_ERR("BEACONS", rx_beacons);
+ RXS_ERR("FRAGS", rx_frags);
+ RXS_ERR("SPECTRAL", rx_spectral);
+
+ RXS_ERR("CRC ERR", crc_err);
+ RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
+ RXS_ERR("PHY ERR", phy_err);
+ RXS_ERR("MIC ERR", mic_err);
+ RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
+ RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
+ RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
+ RXS_ERR("LENGTH-ERR", rx_len_err);
+ RXS_ERR("OOM-ERR", rx_oom_err);
+ RXS_ERR("RATE-ERR", rx_rate_err);
+ RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef RXS_ERR
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats)
+{
+ debugfs_create_file("recv", S_IRUSR, debugfs_phy, rxstats,
+ &fops_recv);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_recv);
+
+static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define PHY_ERR(s, p) \
+ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+ rxstats->phy_err_stats[p]);
+
+ struct ath_rx_stats *rxstats = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1600;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef PHY_ERR
+}
+
+static const struct file_operations fops_phy_err = {
+ .read = read_file_phy_err,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats)
+{
+ debugfs_create_file("phy_err", S_IRUSR, debugfs_phy, rxstats,
+ &fops_phy_err);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_phy_err);
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h
new file mode 100644
index 000000000000..7c9788490f7f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+
+/**
+ * struct ath_rx_stats - RX Statistics
+ * @rx_pkts_all: No. of total frames received, including ones that
+ may have had errors.
+ * @rx_bytes_all: No. of total bytes received, including ones that
+ may have had errors.
+ * @crc_err: No. of frames with incorrect CRC value
+ * @decrypt_crc_err: No. of frames whose CRC check failed after
+ decryption process completed
+ * @phy_err: No. of frames whose reception failed because the PHY
+ encountered an error
+ * @mic_err: No. of frames with incorrect TKIP MIC verification failure
+ * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
+ * @post_delim_crc_err: Post-Frame delimiter CRC error detections
+ * @decrypt_busy_err: Decryption interruptions counter
+ * @phy_err_stats: Individual PHY error statistics
+ * @rx_len_err: No. of frames discarded due to bad length.
+ * @rx_oom_err: No. of frames dropped due to OOM issues.
+ * @rx_rate_err: No. of frames dropped due to rate errors.
+ * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
+ * @rx_beacons: No. of beacons received.
+ * @rx_frags: No. of rx-fragements received.
+ * @rx_spectral: No of spectral packets received.
+ */
+struct ath_rx_stats {
+ u32 rx_pkts_all;
+ u32 rx_bytes_all;
+ u32 crc_err;
+ u32 decrypt_crc_err;
+ u32 phy_err;
+ u32 mic_err;
+ u32 pre_delim_crc_err;
+ u32 post_delim_crc_err;
+ u32 decrypt_busy_err;
+ u32 phy_err_stats[ATH9K_PHYERR_MAX];
+ u32 rx_len_err;
+ u32 rx_oom_err;
+ u32 rx_rate_err;
+ u32 rx_too_many_frags_err;
+ u32 rx_beacons;
+ u32 rx_frags;
+ u32 rx_spectral;
+};
+
+void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah);
+void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah);
+void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+ struct ath_rx_status *rs);
+void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats);
+void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index ca38116838f0..ffc454b18637 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,6 +23,7 @@
#include "common-init.h"
#include "common-beacon.h"
+#include "common-debug.h"
/* Common header for Atheros 802.11n base driver cores */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 780ff1bee6f6..6cc42be48d4e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -948,151 +948,11 @@ static const struct file_operations fops_reset = {
.llseek = default_llseek,
};
-static ssize_t read_file_recv(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define RXS_ERR(s, e) \
- do { \
- len += scnprintf(buf + len, size - len, \
- "%18s : %10u\n", s, \
- sc->debug.stats.rxstats.e);\
- } while (0)
-
- struct ath_softc *sc = file->private_data;
- char *buf;
- unsigned int len = 0, size = 1600;
- ssize_t retval = 0;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- RXS_ERR("PKTS-ALL", rx_pkts_all);
- RXS_ERR("BYTES-ALL", rx_bytes_all);
- RXS_ERR("BEACONS", rx_beacons);
- RXS_ERR("FRAGS", rx_frags);
- RXS_ERR("SPECTRAL", rx_spectral);
-
- RXS_ERR("CRC ERR", crc_err);
- RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
- RXS_ERR("PHY ERR", phy_err);
- RXS_ERR("MIC ERR", mic_err);
- RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
- RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
- RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
- RXS_ERR("LENGTH-ERR", rx_len_err);
- RXS_ERR("OOM-ERR", rx_oom_err);
- RXS_ERR("RATE-ERR", rx_rate_err);
- RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef RXS_ERR
-}
-
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
-#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
-
- RX_STAT_INC(rx_pkts_all);
- sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
-
- if (rs->rs_status & ATH9K_RXERR_CRC)
- RX_STAT_INC(crc_err);
- if (rs->rs_status & ATH9K_RXERR_DECRYPT)
- RX_STAT_INC(decrypt_crc_err);
- if (rs->rs_status & ATH9K_RXERR_MIC)
- RX_STAT_INC(mic_err);
- if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
- RX_STAT_INC(pre_delim_crc_err);
- if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
- RX_STAT_INC(post_delim_crc_err);
- if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
- RX_STAT_INC(decrypt_busy_err);
-
- if (rs->rs_status & ATH9K_RXERR_PHY) {
- RX_STAT_INC(phy_err);
- if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
- RX_PHY_ERR_INC(rs->rs_phyerr);
- }
-
-#undef RX_PHY_ERR_INC
+ ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs);
}
-static const struct file_operations fops_recv = {
- .read = read_file_recv,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define PHY_ERR(s, p) \
- len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.phy_err_stats[p]);
-
- struct ath_softc *sc = file->private_data;
- char *buf;
- unsigned int len = 0, size = 1600;
- ssize_t retval = 0;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
- PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
- PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
- PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
- PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
- PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
- PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
- PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
- PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
- PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
- PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
- PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
- PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
- PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
- PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
- PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
- PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
- PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
- PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
- PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
- PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
- PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
- PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
- PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
- PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
- PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef PHY_ERR
-}
-
-static const struct file_operations fops_phy_err = {
- .read = read_file_phy_err,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1268,62 +1128,6 @@ static const struct file_operations fops_dump_nfcal = {
.llseek = default_llseek,
};
-static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- u32 len = 0, size = 1500;
- ssize_t retval = 0;
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_base_eeprom = {
- .read = read_file_base_eeprom,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- u32 len = 0, size = 6000;
- char *buf;
- size_t retval;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_modal_eeprom = {
- .read = read_file_modal_eeprom,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1524,10 +1328,10 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_misc);
debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_reset);
- debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_recv);
- debugfs_create_file("phy_err", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_phy_err);
+
+ ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
+ ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
+
debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
&ah->rxchainmask);
debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
@@ -1547,10 +1351,10 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_regdump);
debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dump_nfcal);
- debugfs_create_file("base_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_base_eeprom);
- debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_modal_eeprom);
+
+ ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
+ ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
+
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 559a68c2709c..53ae15bd0c9d 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -221,50 +221,6 @@ struct ath_rx_rate_stats {
} cck_stats[4];
};
-/**
- * struct ath_rx_stats - RX Statistics
- * @rx_pkts_all: No. of total frames received, including ones that
- may have had errors.
- * @rx_bytes_all: No. of total bytes received, including ones that
- may have had errors.
- * @crc_err: No. of frames with incorrect CRC value
- * @decrypt_crc_err: No. of frames whose CRC check failed after
- decryption process completed
- * @phy_err: No. of frames whose reception failed because the PHY
- encountered an error
- * @mic_err: No. of frames with incorrect TKIP MIC verification failure
- * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
- * @post_delim_crc_err: Post-Frame delimiter CRC error detections
- * @decrypt_busy_err: Decryption interruptions counter
- * @phy_err_stats: Individual PHY error statistics
- * @rx_len_err: No. of frames discarded due to bad length.
- * @rx_oom_err: No. of frames dropped due to OOM issues.
- * @rx_rate_err: No. of frames dropped due to rate errors.
- * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
- * @rx_beacons: No. of beacons received.
- * @rx_frags: No. of rx-fragements received.
- * @rx_spectral: No of spectral packets received.
- */
-struct ath_rx_stats {
- u32 rx_pkts_all;
- u32 rx_bytes_all;
- u32 crc_err;
- u32 decrypt_crc_err;
- u32 phy_err;
- u32 mic_err;
- u32 pre_delim_crc_err;
- u32 post_delim_crc_err;
- u32 decrypt_busy_err;
- u32 phy_err_stats[ATH9K_PHYERR_MAX];
- u32 rx_len_err;
- u32 rx_oom_err;
- u32 rx_rate_err;
- u32 rx_too_many_frags_err;
- u32 rx_beacons;
- u32 rx_frags;
- u32 rx_spectral;
-};
-
#define ANT_MAIN 0
#define ANT_ALT 1
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 857bb28b3894..726271c7c330 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -178,12 +178,12 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
pe.ts = mactime;
if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
struct dfs_pattern_detector *pd = sc->dfs_detector;
- static u64 last_ts;
ath_dbg(common, DFS,
"ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
"width=%d, rssi=%d, delta_ts=%llu\n",
- pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts);
- last_ts = pe.ts;
+ pe.freq, pe.ts, pe.width, pe.rssi,
+ pe.ts - sc->dfs_prev_pulse_ts);
+ sc->dfs_prev_pulse_ts = pe.ts;
DFS_STAT_INC(sc, pulses_processed);
if (pd != NULL && pd->add_pulse(pd, &pe)) {
DFS_STAT_INC(sc, radar_detected);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index dab1f0cab993..09a5d72f3ff5 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -325,14 +325,14 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
-#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
-#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c += a)
+#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
+#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
- struct ath_htc_rx_status *rxs);
+ struct ath_rx_status *rs);
struct ath_tx_stats {
u32 buf_queued;
@@ -345,25 +345,18 @@ struct ath_tx_stats {
u32 queue_stats[IEEE80211_NUM_ACS];
};
-struct ath_rx_stats {
+struct ath_skbrx_stats {
u32 skb_allocated;
u32 skb_completed;
u32 skb_completed_bytes;
u32 skb_dropped;
- u32 err_crc;
- u32 err_decrypt_crc;
- u32 err_mic;
- u32 err_pre_delim;
- u32 err_post_delim;
- u32 err_decrypt_busy;
- u32 err_phy;
- u32 err_phy_stats[ATH9K_PHYERR_MAX];
};
struct ath9k_debug {
struct dentry *debugfs_phy;
struct ath_tx_stats tx_stats;
struct ath_rx_stats rx_stats;
+ struct ath_skbrx_stats skbrx_stats;
};
void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
@@ -385,7 +378,7 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
#define TX_QSTAT_INC(c) do { } while (0)
static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
- struct ath_htc_rx_status *rxs)
+ struct ath_rx_status *rs)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index fb071ee4fcfb..8b529e4b8ac4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -243,39 +243,14 @@ static const struct file_operations fops_xmit = {
};
void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
- struct ath_htc_rx_status *rxs)
+ struct ath_rx_status *rs)
{
-#define RX_PHY_ERR_INC(c) priv->debug.rx_stats.err_phy_stats[c]++
-
- if (rxs->rs_status & ATH9K_RXERR_CRC)
- priv->debug.rx_stats.err_crc++;
- if (rxs->rs_status & ATH9K_RXERR_DECRYPT)
- priv->debug.rx_stats.err_decrypt_crc++;
- if (rxs->rs_status & ATH9K_RXERR_MIC)
- priv->debug.rx_stats.err_mic++;
- if (rxs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
- priv->debug.rx_stats.err_pre_delim++;
- if (rxs->rs_status & ATH9K_RX_DELIM_CRC_POST)
- priv->debug.rx_stats.err_post_delim++;
- if (rxs->rs_status & ATH9K_RX_DECRYPT_BUSY)
- priv->debug.rx_stats.err_decrypt_busy++;
-
- if (rxs->rs_status & ATH9K_RXERR_PHY) {
- priv->debug.rx_stats.err_phy++;
- if (rxs->rs_phyerr < ATH9K_PHYERR_MAX)
- RX_PHY_ERR_INC(rxs->rs_phyerr);
- }
-
-#undef RX_PHY_ERR_INC
+ ath9k_cmn_debug_stat_rx(&priv->debug.rx_stats, rs);
}
-static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+static ssize_t read_file_skb_rx(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
-#define PHY_ERR(s, p) \
- len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
- priv->debug.rx_stats.err_phy_stats[p]);
-
struct ath9k_htc_priv *priv = file->private_data;
char *buf;
unsigned int len = 0, size = 1500;
@@ -287,63 +262,13 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs allocated",
- priv->debug.rx_stats.skb_allocated);
+ priv->debug.skbrx_stats.skb_allocated);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs completed",
- priv->debug.rx_stats.skb_completed);
+ priv->debug.skbrx_stats.skb_completed);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs Dropped",
- priv->debug.rx_stats.skb_dropped);
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "CRC ERR",
- priv->debug.rx_stats.err_crc);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT CRC ERR",
- priv->debug.rx_stats.err_decrypt_crc);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "MIC ERR",
- priv->debug.rx_stats.err_mic);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "PRE-DELIM CRC ERR",
- priv->debug.rx_stats.err_pre_delim);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "POST-DELIM CRC ERR",
- priv->debug.rx_stats.err_post_delim);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT BUSY ERR",
- priv->debug.rx_stats.err_decrypt_busy);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "TOTAL PHY ERR",
- priv->debug.rx_stats.err_phy);
-
-
- PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
- PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
- PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
- PHY_ERR("RATE", ATH9K_PHYERR_RATE);
- PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
- PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
- PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
- PHY_ERR("TOR", ATH9K_PHYERR_TOR);
- PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
- PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
- PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
- PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
- PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
- PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
- PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
- PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
- PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
- PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
- PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
- PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
- PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
- PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
- PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
- PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
- PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
- PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+ priv->debug.skbrx_stats.skb_dropped);
if (len > size)
len = size;
@@ -352,12 +277,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
kfree(buf);
return retval;
-
-#undef PHY_ERR
}
-static const struct file_operations fops_recv = {
- .read = read_file_recv,
+static const struct file_operations fops_skb_rx = {
+ .read = read_file_skb_rx,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@@ -486,423 +409,6 @@ static const struct file_operations fops_debug = {
.llseek = default_llseek,
};
-static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath9k_htc_priv *priv = file->private_data;
- struct ath_common *common = ath9k_hw_common(priv->ah);
- struct base_eep_header *pBase = NULL;
- unsigned int len = 0, size = 1500;
- ssize_t retval = 0;
- char *buf;
-
- pBase = ath9k_htc_get_eeprom_base(priv);
-
- if (pBase == NULL) {
- ath_err(common, "Unknown EEPROM type\n");
- return 0;
- }
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "Major Version",
- pBase->version >> 12);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "Minor Version",
- pBase->version & 0xFFF);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "Checksum",
- pBase->checksum);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "Length",
- pBase->length);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain1",
- pBase->regDmn[0]);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain2",
- pBase->regDmn[1]);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Mask", pBase->txMask);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "RX Mask", pBase->rxMask);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 5GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 2GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Big Endian",
- !!(pBase->eepMisc & 0x01));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Major Ver",
- (pBase->binBuildNumber >> 24) & 0xFF);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Minor Ver",
- (pBase->binBuildNumber >> 16) & 0xFF);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Build",
- (pBase->binBuildNumber >> 8) & 0xFF);
-
- /*
- * UB91 specific data.
- */
- if (AR_SREV_9271(priv->ah)) {
- struct base_eep_header_4k *pBase4k =
- &priv->ah->eeprom.map4k.baseEepHeader;
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Gain type",
- pBase4k->txGainType);
- }
-
- /*
- * UB95 specific data.
- */
- if (priv->ah->hw_version.usbdev == AR9287_USB) {
- struct base_eep_ar9287_header *pBase9287 =
- &priv->ah->eeprom.map9287.baseEepHeader;
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10ddB\n",
- "Power Table Offset",
- pBase9287->pwrTableOffset);
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "OpenLoop Power Ctrl",
- pBase9287->openLoopPwrCntl);
- }
-
- len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_base_eeprom = {
- .read = read_file_base_eeprom,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_4k_modal_eeprom(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define PR_EEP(_s, _val) \
- do { \
- len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
- _s, (_val)); \
- } while (0)
-
- struct ath9k_htc_priv *priv = file->private_data;
- struct modal_eep_4k_header *pModal = &priv->ah->eeprom.map4k.modalHeader;
- unsigned int len = 0, size = 2048;
- ssize_t retval = 0;
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
- PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
- PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
- PR_EEP("Switch Settle", pModal->switchSettling);
- PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
- PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
- PR_EEP("ADC Desired size", pModal->adcDesiredSize);
- PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
- PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
- PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
- PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
- PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
- PR_EEP("CCA Threshold)", pModal->thresh62);
- PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
- PR_EEP("xpdGain", pModal->xpdGain);
- PR_EEP("External PD", pModal->xpd);
- PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
- PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
- PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
- PR_EEP("O/D Bias Version", pModal->version);
- PR_EEP("CCK OutputBias", pModal->ob_0);
- PR_EEP("BPSK OutputBias", pModal->ob_1);
- PR_EEP("QPSK OutputBias", pModal->ob_2);
- PR_EEP("16QAM OutputBias", pModal->ob_3);
- PR_EEP("64QAM OutputBias", pModal->ob_4);
- PR_EEP("CCK Driver1_Bias", pModal->db1_0);
- PR_EEP("BPSK Driver1_Bias", pModal->db1_1);
- PR_EEP("QPSK Driver1_Bias", pModal->db1_2);
- PR_EEP("16QAM Driver1_Bias", pModal->db1_3);
- PR_EEP("64QAM Driver1_Bias", pModal->db1_4);
- PR_EEP("CCK Driver2_Bias", pModal->db2_0);
- PR_EEP("BPSK Driver2_Bias", pModal->db2_1);
- PR_EEP("QPSK Driver2_Bias", pModal->db2_2);
- PR_EEP("16QAM Driver2_Bias", pModal->db2_3);
- PR_EEP("64QAM Driver2_Bias", pModal->db2_4);
- PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
- PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
- PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
- PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
- PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
- PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
- PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
- PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
- PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
- PR_EEP("Ant. Diversity ctl1", pModal->antdiv_ctl1);
- PR_EEP("Ant. Diversity ctl2", pModal->antdiv_ctl2);
- PR_EEP("TX Diversity", pModal->tx_diversity);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef PR_EEP
-}
-
-static ssize_t read_def_modal_eeprom(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define PR_EEP(_s, _val) \
- do { \
- if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
- pModal = &priv->ah->eeprom.def.modalHeader[1]; \
- len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
- _s, (_val), "|"); \
- } \
- if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
- pModal = &priv->ah->eeprom.def.modalHeader[0]; \
- len += scnprintf(buf + len, size - len, "%9d\n",\
- (_val)); \
- } \
- } while (0)
-
- struct ath9k_htc_priv *priv = file->private_data;
- struct base_eep_header *pBase = &priv->ah->eeprom.def.baseEepHeader;
- struct modal_eep_header *pModal = NULL;
- unsigned int len = 0, size = 3500;
- ssize_t retval = 0;
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len += scnprintf(buf + len, size - len,
- "%31s %15s\n", "2G", "5G");
- len += scnprintf(buf + len, size - len,
- "%32s %16s\n", "====", "====\n");
-
- PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
- PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
- PR_EEP("Chain2 Ant. Control", pModal->antCtrlChain[2]);
- PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
- PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
- PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
- PR_EEP("Chain2 Ant. Gain", pModal->antennaGainCh[2]);
- PR_EEP("Switch Settle", pModal->switchSettling);
- PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
- PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
- PR_EEP("Chain2 TxRxAtten", pModal->txRxAttenCh[2]);
- PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
- PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
- PR_EEP("Chain2 RxTxMargin", pModal->rxTxMarginCh[2]);
- PR_EEP("ADC Desired size", pModal->adcDesiredSize);
- PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
- PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
- PR_EEP("Chain1 xlna Gain", pModal->xlnaGainCh[1]);
- PR_EEP("Chain2 xlna Gain", pModal->xlnaGainCh[2]);
- PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
- PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
- PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
- PR_EEP("CCA Threshold)", pModal->thresh62);
- PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
- PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
- PR_EEP("Chain2 NF Threshold", pModal->noiseFloorThreshCh[2]);
- PR_EEP("xpdGain", pModal->xpdGain);
- PR_EEP("External PD", pModal->xpd);
- PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
- PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
- PR_EEP("Chain2 I Coefficient", pModal->iqCalICh[2]);
- PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
- PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
- PR_EEP("Chain2 Q Coefficient", pModal->iqCalQCh[2]);
- PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
- PR_EEP("Chain0 OutputBias", pModal->ob);
- PR_EEP("Chain0 DriverBias", pModal->db);
- PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
- PR_EEP("2chain pwr decrease", pModal->pwrDecreaseFor2Chain);
- PR_EEP("3chain pwr decrease", pModal->pwrDecreaseFor3Chain);
- PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
- PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
- PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
- PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
- PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
- PR_EEP("Chain2 bswAtten", pModal->bswAtten[2]);
- PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
- PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
- PR_EEP("Chain2 bswMargin", pModal->bswMargin[2]);
- PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
- PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
- PR_EEP("Chain1 xatten2Db", pModal->xatten2Db[1]);
- PR_EEP("Chain2 xatten2Db", pModal->xatten2Db[2]);
- PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
- PR_EEP("Chain1 xatten2Margin", pModal->xatten2Margin[1]);
- PR_EEP("Chain2 xatten2Margin", pModal->xatten2Margin[2]);
- PR_EEP("Chain1 OutputBias", pModal->ob_ch1);
- PR_EEP("Chain1 DriverBias", pModal->db_ch1);
- PR_EEP("LNA Control", pModal->lna_ctl);
- PR_EEP("XPA Bias Freq0", pModal->xpaBiasLvlFreq[0]);
- PR_EEP("XPA Bias Freq1", pModal->xpaBiasLvlFreq[1]);
- PR_EEP("XPA Bias Freq2", pModal->xpaBiasLvlFreq[2]);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef PR_EEP
-}
-
-static ssize_t read_9287_modal_eeprom(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define PR_EEP(_s, _val) \
- do { \
- len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
- _s, (_val)); \
- } while (0)
-
- struct ath9k_htc_priv *priv = file->private_data;
- struct modal_eep_ar9287_header *pModal = &priv->ah->eeprom.map9287.modalHeader;
- unsigned int len = 0, size = 3000;
- ssize_t retval = 0;
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
- PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
- PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
- PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
- PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
- PR_EEP("Switch Settle", pModal->switchSettling);
- PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
- PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
- PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
- PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
- PR_EEP("ADC Desired size", pModal->adcDesiredSize);
- PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
- PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
- PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
- PR_EEP("CCA Threshold)", pModal->thresh62);
- PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
- PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
- PR_EEP("xpdGain", pModal->xpdGain);
- PR_EEP("External PD", pModal->xpd);
- PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
- PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
- PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
- PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
- PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
- PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
- PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
- PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
- PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
- PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
- PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
- PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
- PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
- PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
- PR_EEP("AR92x7 Version", pModal->version);
- PR_EEP("DriverBias1", pModal->db1);
- PR_EEP("DriverBias2", pModal->db1);
- PR_EEP("CCK OutputBias", pModal->ob_cck);
- PR_EEP("PSK OutputBias", pModal->ob_psk);
- PR_EEP("QAM OutputBias", pModal->ob_qam);
- PR_EEP("PAL_OFF OutputBias", pModal->ob_pal_off);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef PR_EEP
-}
-
-static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath9k_htc_priv *priv = file->private_data;
-
- if (AR_SREV_9271(priv->ah))
- return read_4k_modal_eeprom(file, user_buf, count, ppos);
- else if (priv->ah->hw_version.usbdev == AR9280_USB)
- return read_def_modal_eeprom(file, user_buf, count, ppos);
- else if (priv->ah->hw_version.usbdev == AR9287_USB)
- return read_9287_modal_eeprom(file, user_buf, count, ppos);
-
- return 0;
-}
-
-static const struct file_operations fops_modal_eeprom = {
- .read = read_file_modal_eeprom,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-
/* Ethtool support for get-stats */
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -947,6 +453,8 @@ int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
#define STXBASE priv->debug.tx_stats
#define SRXBASE priv->debug.rx_stats
+#define SKBTXBASE priv->debug.tx_stats
+#define SKBRXBASE priv->debug.skbrx_stats
#define ASTXQ(a) \
data[i++] = STXBASE.a[IEEE80211_AC_BE]; \
data[i++] = STXBASE.a[IEEE80211_AC_BK]; \
@@ -960,24 +468,24 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
struct ath9k_htc_priv *priv = hw->priv;
int i = 0;
- data[i++] = STXBASE.skb_success;
- data[i++] = STXBASE.skb_success_bytes;
- data[i++] = SRXBASE.skb_completed;
- data[i++] = SRXBASE.skb_completed_bytes;
+ data[i++] = SKBTXBASE.skb_success;
+ data[i++] = SKBTXBASE.skb_success_bytes;
+ data[i++] = SKBRXBASE.skb_completed;
+ data[i++] = SKBRXBASE.skb_completed_bytes;
ASTXQ(queue_stats);
- data[i++] = SRXBASE.err_crc;
- data[i++] = SRXBASE.err_decrypt_crc;
- data[i++] = SRXBASE.err_phy;
- data[i++] = SRXBASE.err_mic;
- data[i++] = SRXBASE.err_pre_delim;
- data[i++] = SRXBASE.err_post_delim;
- data[i++] = SRXBASE.err_decrypt_busy;
+ data[i++] = SRXBASE.crc_err;
+ data[i++] = SRXBASE.decrypt_crc_err;
+ data[i++] = SRXBASE.phy_err;
+ data[i++] = SRXBASE.mic_err;
+ data[i++] = SRXBASE.pre_delim_crc_err;
+ data[i++] = SRXBASE.post_delim_crc_err;
+ data[i++] = SRXBASE.decrypt_busy_err;
- data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_RADAR];
- data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_OFDM_TIMING];
- data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_CCK_TIMING];
+ data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_RADAR];
+ data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_OFDM_TIMING];
+ data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_CCK_TIMING];
WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
}
@@ -1001,18 +509,21 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
priv, &fops_tgt_rx_stats);
debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy,
priv, &fops_xmit);
- debugfs_create_file("recv", S_IRUSR, priv->debug.debugfs_phy,
- priv, &fops_recv);
+ debugfs_create_file("skb_rx", S_IRUSR, priv->debug.debugfs_phy,
+ priv, &fops_skb_rx);
+
+ ath9k_cmn_debug_recv(priv->debug.debugfs_phy, &priv->debug.rx_stats);
+ ath9k_cmn_debug_phy_err(priv->debug.debugfs_phy, &priv->debug.rx_stats);
+
debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy,
priv, &fops_slot);
debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy,
priv, &fops_queue);
debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy,
priv, &fops_debug);
- debugfs_create_file("base_eeprom", S_IRUSR, priv->debug.debugfs_phy,
- priv, &fops_base_eeprom);
- debugfs_create_file("modal_eeprom", S_IRUSR, priv->debug.debugfs_phy,
- priv, &fops_modal_eeprom);
+
+ ath9k_cmn_debug_base_eeprom(priv->debug.debugfs_phy, priv->ah);
+ ath9k_cmn_debug_modal_eeprom(priv->debug.debugfs_phy, priv->ah);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 289f3d8924b5..bb86eb2ffc95 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -996,8 +996,6 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
goto rx_next;
}
- ath9k_htc_err_stat_rx(priv, rxstatus);
-
/* Get the RX status information */
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
@@ -1005,6 +1003,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
/* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
* After this, we can drop this part of skb. */
rx_status_htc_to_ath(&rx_stats, rxstatus);
+ ath9k_htc_err_stat_rx(priv, &rx_stats);
rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c8a9dfab1fee..2a8ed8375ec0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -26,7 +26,6 @@
#include "ar9003_mac.h"
#include "ar9003_mci.h"
#include "ar9003_phy.h"
-#include "debug.h"
#include "ath9k.h"
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
@@ -246,6 +245,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
return;
case AR9300_DEVID_AR953X:
ah->hw_version.macVersion = AR_SREV_VERSION_9531;
+ if (ah->get_mac_revision)
+ ah->hw_version.macRev = ah->get_mac_revision();
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 36ae6490e554..0246b990fe87 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -61,6 +61,10 @@ static int ath9k_ps_enable;
module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+static int ath9k_use_chanctx;
+module_param_named(use_chanctx, ath9k_use_chanctx, int, 0444);
+MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency");
+
bool is_ath9k_unloaded;
#ifdef CONFIG_MAC80211_LEDS
@@ -508,7 +512,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
sc->tx99_power = MAX_RATE_POWER + 1;
init_waitqueue_head(&sc->tx_wait);
- if (!pdata) {
+ if (!pdata || pdata->use_eeprom) {
ah->ah_flags |= AH_USE_EEPROM;
sc->sc_ah->led_pin = -1;
} else {
@@ -589,6 +593,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
goto err_btcoex;
+ sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
+ NULL, sc, AR_FIRST_NDP_TIMER);
+
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
ath_fill_led_pin(sc);
@@ -643,17 +650,20 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
}
static const struct ieee80211_iface_limit if_limits[] = {
- { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_WDS) },
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
{ .max = 8, .types =
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
- BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_AP) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) },
};
+static const struct ieee80211_iface_limit wds_limits[] = {
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) },
+};
+
static const struct ieee80211_iface_limit if_dfs_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
@@ -670,6 +680,13 @@ static const struct ieee80211_iface_combination if_comb[] = {
.num_different_channels = 1,
.beacon_int_infra_match = true,
},
+ {
+ .limits = wds_limits,
+ .n_limits = ARRAY_SIZE(wds_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ },
#ifdef CONFIG_ATH9K_DFS_CERTIFIED
{
.limits = if_dfs_limits,
@@ -711,19 +728,23 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ hw->wiphy->features |= (NL80211_FEATURE_ACTIVE_MONITOR |
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE);
if (!config_enabled(CONFIG_ATH9K_TX99)) {
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
hw->wiphy->iface_combinations = if_comb;
- hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ if (!ath9k_use_chanctx) {
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_WDS);
+ } else
+ hw->wiphy->n_iface_combinations = 1;
}
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -855,6 +876,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
{
int i = 0;
+ if (sc->p2p_ps_timer)
+ ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
+
ath9k_deinit_btcoex(sc);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 51ce36f108f9..275205ab5f15 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -958,3 +958,25 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
return;
}
EXPORT_SYMBOL(ath9k_hw_set_interrupts);
+
+#define ATH9K_HW_MAX_DCU 10
+#define ATH9K_HW_SLICE_PER_DCU 16
+#define ATH9K_HW_BIT_IN_SLICE 16
+void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set)
+{
+ int dcu_idx;
+ u32 filter;
+
+ for (dcu_idx = 0; dcu_idx < 10; dcu_idx++) {
+ filter = SM(set, AR_D_TXBLK_WRITE_COMMAND);
+ filter |= SM(dcu_idx, AR_D_TXBLK_WRITE_DCU);
+ filter |= SM((destidx / ATH9K_HW_SLICE_PER_DCU),
+ AR_D_TXBLK_WRITE_SLICE);
+ filter |= BIT(destidx % ATH9K_HW_BIT_IN_SLICE);
+ ath_dbg(ath9k_hw_common(ah), PS,
+ "DCU%d staid %d set %d txfilter %08x\n",
+ dcu_idx, destidx, set, filter);
+ REG_WRITE(ah, AR_D_TXBLK_BASE, filter);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_set_tx_filter);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 89df634e81f9..da7686757535 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -729,6 +729,7 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_abortpcurecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
+void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set);
/* Interrupt Handling */
bool ath9k_hw_intrpend(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index d69853b848ce..62ac95d6bb9d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -261,6 +261,8 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
sc->gtt_cnt = 0;
ieee80211_wake_queues(sc->hw);
+ ath9k_p2p_ps_timer(sc);
+
return true;
}
@@ -419,6 +421,7 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
an->sc = sc;
an->sta = sta;
an->vif = vif;
+ memset(&an->key_idx, 0, sizeof(an->key_idx));
ath_tx_node_init(sc, an);
}
@@ -1119,6 +1122,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
+ avp->vif = vif;
+
an->sc = sc;
an->sta = NULL;
an->vif = vif;
@@ -1163,6 +1168,29 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
return 0;
}
+static void
+ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ s32 tsf, target_tsf;
+
+ if (!avp || !avp->noa.has_next_tsf)
+ return;
+
+ ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
+
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+
+ target_tsf = avp->noa.next_tsf;
+ if (!avp->noa.absent)
+ target_tsf -= ATH_P2P_PS_STOP_TIME;
+
+ if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
+ target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
+
+ ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
+}
+
static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1174,6 +1202,13 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ if (avp == sc->p2p_ps_vif) {
+ sc->p2p_ps_vif = NULL;
+ ath9k_update_p2p_ps_timer(sc, NULL);
+ }
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
sc->nvifs--;
sc->tx99_vif = NULL;
@@ -1427,8 +1462,10 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
return 0;
key = ath_key_config(common, vif, sta, &ps_key);
- if (key > 0)
+ if (key > 0) {
an->ps_key = key;
+ an->key_idx[0] = key;
+ }
return 0;
}
@@ -1446,6 +1483,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
ath_key_delete(common, &ps_key);
an->ps_key = 0;
+ an->key_idx[0] = 0;
}
static int ath9k_sta_remove(struct ieee80211_hw *hw,
@@ -1460,6 +1498,19 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
return 0;
}
+static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
+ struct ath_node *an,
+ bool set)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+ if (!an->key_idx[i])
+ continue;
+ ath9k_hw_set_tx_filter(ah, an->key_idx[i], set);
+ }
+}
+
static void ath9k_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
@@ -1472,8 +1523,10 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
case STA_NOTIFY_SLEEP:
an->sleeping = true;
ath_tx_aggr_sleep(sta, sc, an);
+ ath9k_sta_set_tx_filter(sc->sc_ah, an, true);
break;
case STA_NOTIFY_AWAKE:
+ ath9k_sta_set_tx_filter(sc->sc_ah, an, false);
an->sleeping = false;
ath_tx_aggr_wakeup(sc, an);
break;
@@ -1529,7 +1582,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int ret = 0;
+ struct ath_node *an = NULL;
+ int ret = 0, i;
if (ath9k_modparam_nohwcrypt)
return -ENOSPC;
@@ -1551,13 +1605,16 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ath_dbg(common, CONFIG, "Set HW Key\n");
+ ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
+ if (sta)
+ an = (struct ath_node *)sta->drv_priv;
switch (cmd) {
case SET_KEY:
if (sta)
ath9k_del_ps_key(sc, vif, sta);
+ key->hw_key_idx = 0;
ret = ath_key_config(common, vif, sta, key);
if (ret >= 0) {
key->hw_key_idx = ret;
@@ -1570,9 +1627,27 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
}
+ if (an && key->hw_key_idx) {
+ for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+ if (an->key_idx[i])
+ continue;
+ an->key_idx[i] = key->hw_key_idx;
+ break;
+ }
+ WARN_ON(i == ARRAY_SIZE(an->key_idx));
+ }
break;
case DISABLE_KEY:
ath_key_delete(common, key);
+ if (an) {
+ for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+ if (an->key_idx[i] != key->hw_key_idx)
+ continue;
+ an->key_idx[i] = 0;
+ break;
+ }
+ }
+ key->hw_key_idx = 0;
break;
default:
ret = -EINVAL;
@@ -1636,6 +1711,66 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
ath9k_set_assoc_state(sc, vif);
}
+void ath9k_p2p_ps_timer(void *priv)
+{
+ struct ath_softc *sc = priv;
+ struct ath_vif *avp = sc->p2p_ps_vif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct ath_node *an;
+ u32 tsf;
+
+ if (!avp)
+ return;
+
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+ if (!avp->noa.absent)
+ tsf += ATH_P2P_PS_STOP_TIME;
+
+ if (!avp->noa.has_next_tsf ||
+ avp->noa.next_tsf - tsf > BIT(31))
+ ieee80211_update_p2p_noa(&avp->noa, tsf);
+
+ ath9k_update_p2p_ps_timer(sc, avp);
+
+ rcu_read_lock();
+
+ vif = avp->vif;
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta)
+ goto out;
+
+ an = (void *) sta->drv_priv;
+ if (an->sleeping == !!avp->noa.absent)
+ goto out;
+
+ an->sleeping = avp->noa.absent;
+ if (an->sleeping)
+ ath_tx_aggr_sleep(sta, sc, an);
+ else
+ ath_tx_aggr_wakeup(sc, an);
+
+out:
+ rcu_read_unlock();
+}
+
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ u32 tsf;
+
+ if (!sc->p2p_ps_timer)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
+ return;
+
+ sc->p2p_ps_vif = avp;
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+ ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
+ ath9k_update_p2p_ps_timer(sc, avp);
+}
+
static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1650,6 +1785,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ unsigned long flags;
int slottime;
ath9k_ps_wakeup(sc);
@@ -1710,6 +1846,15 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_P2P_PS) {
+ spin_lock_bh(&sc->sc_pcu_lock);
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (!(sc->ps_flags & PS_BEACON_SYNC))
+ ath9k_update_p2p_ps(sc, vif);
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ spin_unlock_bh(&sc->sc_pcu_lock);
+ }
+
if (changed & CHECK_ANI)
ath_check_ani(sc);
@@ -1883,7 +2028,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
return !!npend;
}
-static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
@@ -2084,14 +2230,6 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
clear_bit(ATH_OP_SCANNING, &common->op_flags);
}
-static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_chan_def *chandef)
-{
- /* depend on vif->csa_active only */
- return;
-}
-
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2139,5 +2277,4 @@ struct ieee80211_ops ath9k_ops = {
#endif
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
- .channel_switch_beacon = ath9k_channel_switch_beacon,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 914dbc6b1720..4dec09e565ed 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -686,7 +686,7 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
struct ath_softc *sc = (struct ath_softc *) common->priv;
struct ath9k_platform_data *pdata = sc->dev->platform_data;
- if (pdata) {
+ if (pdata && !pdata->use_eeprom) {
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
ath_err(common,
"%s: eeprom read failed, offset %08x is out of range\n",
@@ -914,6 +914,7 @@ static int ath_pci_suspend(struct device *device)
*/
ath9k_stop_btcoex(sc);
ath9k_hw_disable(sc->sc_ah);
+ del_timer_sync(&sc->sleep_timer);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 19df969ec909..9105a92364f7 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -34,7 +34,8 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
* buffer (or rx fifo). This can incorrectly acknowledge packets
* to a sender if last desc is self-linked.
*/
-static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf,
+ bool flush)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -59,18 +60,19 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
common->rx_bufsize,
0);
- if (sc->rx.rxlink == NULL)
- ath9k_hw_putrxbuf(ah, bf->bf_daddr);
- else
+ if (sc->rx.rxlink)
*sc->rx.rxlink = bf->bf_daddr;
+ else if (!flush)
+ ath9k_hw_putrxbuf(ah, bf->bf_daddr);
sc->rx.rxlink = &ds->ds_link;
}
-static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf,
+ bool flush)
{
if (sc->rx.buf_hold)
- ath_rx_buf_link(sc, sc->rx.buf_hold);
+ ath_rx_buf_link(sc, sc->rx.buf_hold, flush);
sc->rx.buf_hold = bf;
}
@@ -442,7 +444,7 @@ int ath_startrecv(struct ath_softc *sc)
sc->rx.buf_hold = NULL;
sc->rx.rxlink = NULL;
list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
- ath_rx_buf_link(sc, bf);
+ ath_rx_buf_link(sc, bf, false);
}
/* We could have deleted elements so the list may be empty now */
@@ -538,7 +540,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
sc->ps_flags &= ~PS_BEACON_SYNC;
ath_dbg(common, PS,
"Reconfigure beacon timers based on synchronized timestamp\n");
- ath9k_set_beacon(sc);
+ if (!(WARN_ON_ONCE(sc->cur_beacon_conf.beacon_interval == 0)))
+ ath9k_set_beacon(sc);
+ if (sc->p2p_ps_vif)
+ ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
}
if (ath_beacon_dtim_pending_cab(skb)) {
@@ -1115,12 +1120,12 @@ requeue_drop_frag:
requeue:
list_add_tail(&bf->list, &sc->rx.rxbuf);
- if (edma) {
- ath_rx_edma_buf_link(sc, qtype);
- } else {
- ath_rx_buf_relink(sc, bf);
+ if (!edma) {
+ ath_rx_buf_relink(sc, bf, flush);
if (!flush)
ath9k_hw_rxena(ah);
+ } else if (!flush) {
+ ath_rx_edma_buf_link(sc, qtype);
}
if (!budget--)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index b1fd3fa84983..f1bbce3f7774 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -505,9 +505,6 @@
#define AR_D_QCUMASK 0x000003FF
#define AR_D_QCUMASK_RESV0 0xFFFFFC00
-#define AR_D_TXBLK_CMD 0x1038
-#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i))
-
#define AR_D0_LCL_IFS 0x1040
#define AR_D1_LCL_IFS 0x1044
#define AR_D2_LCL_IFS 0x1048
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4c8cdb097b65..f8ded84b7be8 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1707,7 +1707,9 @@ found:
return 0;
}
-static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void carl9170_op_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ar9170 *ar = hw->priv;
unsigned int vid;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index ca115f33746f..f35c7f30f9a6 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1076,8 +1076,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
carl9170_set_state(ar, CARL9170_STOPPED);
- return request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
+ err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
&ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
+ if (err) {
+ usb_put_dev(udev);
+ usb_put_dev(udev);
+ carl9170_free(ar);
+ }
+ return err;
}
static void carl9170_usb_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index a1a69c5db409..650be79c7ac9 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -73,9 +73,52 @@ static const struct radar_types etsi_radar_types_v15 = {
.radar_types = etsi_radar_ref_types_v15,
};
-/* for now, we support ETSI radar types, FCC and JP are TODO */
+#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+{ \
+ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
+ PMIN - PRI_TOLERANCE, \
+ PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, \
+}
+
+static const struct radar_detector_specs fcc_radar_ref_types[] = {
+ FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
+ FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
+ FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
+ FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
+ FCC_PATTERN(4, 50, 100, 1000, 2000, 20, 1),
+ FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
+};
+
+static const struct radar_types fcc_radar_types = {
+ .region = NL80211_DFS_FCC,
+ .num_radar_types = ARRAY_SIZE(fcc_radar_ref_types),
+ .radar_types = fcc_radar_ref_types,
+};
+
+#define JP_PATTERN FCC_PATTERN
+static const struct radar_detector_specs jp_radar_ref_types[] = {
+ JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
+ JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
+ JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
+ JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
+ JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
+ JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
+ JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
+ JP_PATTERN(7, 50, 100, 1000, 2000, 20, 1),
+ JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
+};
+
+static const struct radar_types jp_radar_types = {
+ .region = NL80211_DFS_JP,
+ .num_radar_types = ARRAY_SIZE(jp_radar_ref_types),
+ .radar_types = jp_radar_ref_types,
+};
+
static const struct radar_types *dfs_domains[] = {
&etsi_radar_types_v15,
+ &fcc_radar_types,
+ &jp_radar_types,
};
/**
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 7bf0ef8a1f56..63986931829e 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -2068,7 +2068,7 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
if (!msg_ind)
goto nomem;
msg_ind->msg_len = len;
- msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ msg_ind->msg = kmemdup(buf, len, GFP_KERNEL);
if (!msg_ind->msg) {
kfree(msg_ind);
nomem:
@@ -2080,7 +2080,6 @@ nomem:
msg_header->msg_type);
break;
}
- memcpy(msg_ind->msg, buf, len);
mutex_lock(&wcn->hal_ind_mutex);
list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 4806a49cb61b..820d4ebd9322 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -172,7 +172,7 @@ static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
static int wil_cfg80211_get_station(struct wiphy *wiphy,
struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
@@ -288,6 +288,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
}
wil->scan_request = request;
+ mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
memset(&cmd, 0, sizeof(cmd));
cmd.cmd.num_channels = 0;
@@ -671,7 +672,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
}
static int wil_cfg80211_del_station(struct wiphy *wiphy,
- struct net_device *dev, u8 *mac)
+ struct net_device *dev, const u8 *mac)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ecdabe4adec3..8d4bc4bfb664 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -35,7 +35,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
void __iomem *x = wmi_addr(wil, vring->hwtail);
seq_printf(s, "VRING %s = {\n", name);
- seq_printf(s, " pa = 0x%016llx\n", (unsigned long long)vring->pa);
+ seq_printf(s, " pa = %pad\n", &vring->pa);
seq_printf(s, " va = 0x%p\n", vring->va);
seq_printf(s, " size = %d\n", vring->size);
seq_printf(s, " swtail = %d\n", vring->swtail);
@@ -473,7 +473,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
u[0], u[1], u[2], u[3]);
seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[4], u[5], u[6], u[7]);
- seq_printf(s, " SKB = %p\n", skb);
+ seq_printf(s, " SKB = 0x%p\n", skb);
if (skb) {
skb_get(skb);
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 5824cd41e4ba..73593aa3cd98 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -338,7 +338,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
}
if (isr)
- wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+ wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
wil->isr_misc = 0;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 95f4efe9ef37..11e6d9d22eae 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -81,7 +81,7 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
memset(&sta->stats, 0, sizeof(sta->stats));
}
-static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
{
int cid = -ENOENT;
struct net_device *ndev = wil_to_ndev(wil);
@@ -150,6 +150,15 @@ static void wil_connect_timer_fn(ulong x)
schedule_work(&wil->disconnect_worker);
}
+static void wil_scan_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ clear_bit(wil_status_fwready, &wil->status);
+ wil_err(wil, "Scan timeout detected, start fw error recovery\n");
+ schedule_work(&wil->fw_error_worker);
+}
+
static void wil_fw_error_worker(struct work_struct *work)
{
struct wil6210_priv *wil = container_of(work,
@@ -161,12 +170,30 @@ static void wil_fw_error_worker(struct work_struct *work)
if (no_fw_recovery)
return;
+ /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
+ * passed since last recovery attempt
+ */
+ if (time_is_after_jiffies(wil->last_fw_recovery +
+ WIL6210_FW_RECOVERY_TO))
+ wil->recovery_count++;
+ else
+ wil->recovery_count = 1; /* fw was alive for a long time */
+
+ if (wil->recovery_count > WIL6210_FW_RECOVERY_RETRIES) {
+ wil_err(wil, "too many recovery attempts (%d), giving up\n",
+ wil->recovery_count);
+ return;
+ }
+
+ wil->last_fw_recovery = jiffies;
+
mutex_lock(&wil->mutex);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_MONITOR:
- wil_info(wil, "fw error recovery started...\n");
+ wil_info(wil, "fw error recovery started (try %d)...\n",
+ wil->recovery_count);
wil_reset(wil);
/* need to re-allocate Rx ring after reset */
@@ -230,6 +257,7 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->pending_connect_cid = -1;
setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
+ setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
INIT_WORK(&wil->connect_worker, wil_connect_worker);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
@@ -249,10 +277,12 @@ int wil_priv_init(struct wil6210_priv *wil)
return -EAGAIN;
}
+ wil->last_fw_recovery = jiffies;
+
return 0;
}
-void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
{
del_timer_sync(&wil->connect_timer);
_wil6210_disconnect(wil, bssid);
@@ -260,6 +290,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
void wil_priv_deinit(struct wil6210_priv *wil)
{
+ del_timer_sync(&wil->scan_timer);
cancel_work_sync(&wil->disconnect_worker);
cancel_work_sync(&wil->fw_error_worker);
mutex_lock(&wil->mutex);
@@ -363,8 +394,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
wil_err(wil, "Firmware not ready\n");
return -ETIME;
} else {
- wil_dbg_misc(wil, "FW ready after %d ms\n",
- jiffies_to_msecs(to-left));
+ wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
+ jiffies_to_msecs(to-left), wil->hw_version);
}
return 0;
}
@@ -391,6 +422,7 @@ int wil_reset(struct wil6210_priv *wil)
if (wil->scan_request) {
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
+ del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, true);
wil->scan_request = NULL;
}
@@ -520,6 +552,7 @@ static int __wil_down(struct wil6210_priv *wil)
napi_disable(&wil->napi_tx);
if (wil->scan_request) {
+ del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, true);
wil->scan_request = NULL;
}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index fdcaeb820e75..106b6dcb773a 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -32,12 +32,26 @@ static int wil_stop(struct net_device *ndev)
return wil_down(wil);
}
+static int wil_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ if (new_mtu < 68 || new_mtu > IEEE80211_MAX_DATA_LEN_DMG)
+ return -EINVAL;
+
+ wil_dbg_misc(wil, "change MTU %d -> %d\n", ndev->mtu, new_mtu);
+ ndev->mtu = new_mtu;
+
+ return 0;
+}
+
static const struct net_device_ops wil_netdev_ops = {
.ndo_open = wil_open,
.ndo_stop = wil_stop,
.ndo_start_xmit = wil_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = wil_change_mtu,
};
static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index f1e1bb338d68..1e2e07b9d13d 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -74,8 +74,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
if (rc)
goto release_irq;
- wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
-
return 0;
release_irq:
@@ -140,7 +138,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_reg;
}
/* rollback to err_iounmap */
- dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr);
+ dev_info(&pdev->dev, "CSR at %pR -> 0x%p\n", &pdev->resource[0], csr);
wil = wil_if_alloc(dev, csr);
if (IS_ERR(wil)) {
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index d04629fe053f..747ae1275877 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -49,10 +49,17 @@ static void wil_release_reorder_frames(struct wil6210_priv *wil,
{
int index;
- while (seq_less(r->head_seq_num, hseq)) {
+ /* note: this function is never called with
+ * hseq preceding r->head_seq_num, i.e it is always true
+ * !seq_less(hseq, r->head_seq_num)
+ * and thus on loop exit it should be
+ * r->head_seq_num == hseq
+ */
+ while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
index = reorder_index(r, r->head_seq_num);
wil_release_reorder_frame(wil, r, index);
}
+ r->head_seq_num = hseq;
}
static void wil_reorder_release(struct wil6210_priv *wil,
@@ -91,6 +98,22 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
spin_lock(&r->reorder_lock);
+ /** Due to the race between WMI events, where BACK establishment
+ * reported, and data Rx, few packets may be pass up before reorder
+ * buffer get allocated. Catch up by pretending SSN is what we
+ * see in the 1-st Rx packet
+ */
+ if (r->first_time) {
+ r->first_time = false;
+ if (seq != r->head_seq_num) {
+ wil_err(wil, "Error: 1-st frame with wrong sequence"
+ " %d, should be %d. Fixing...\n", seq,
+ r->head_seq_num);
+ r->head_seq_num = seq;
+ r->ssn = seq;
+ }
+ }
+
/* frame with out of date sequence number */
if (seq_less(seq, r->head_seq_num)) {
dev_kfree_skb(skb);
@@ -162,6 +185,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
r->head_seq_num = ssn;
r->buf_size = size;
r->stored_mpdu_num = 0;
+ r->first_time = true;
return r;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index c8c547457eb4..0784ef3d4ce2 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -64,6 +64,22 @@ static inline int wil_vring_avail_tx(struct vring *vring)
return vring->size - used - 1;
}
+/**
+ * wil_vring_wmark_low - low watermark for available descriptor space
+ */
+static inline int wil_vring_wmark_low(struct vring *vring)
+{
+ return vring->size/8;
+}
+
+/**
+ * wil_vring_wmark_high - high watermark for available descriptor space
+ */
+static inline int wil_vring_wmark_high(struct vring *vring)
+{
+ return vring->size/4;
+}
+
static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
{
struct device *dev = wil_to_dev(wil);
@@ -98,8 +114,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
_d->dma.status = TX_DMA_STATUS_DU;
}
- wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
- vring->va, (unsigned long long)vring->pa, vring->ctx);
+ wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
+ vring->va, &vring->pa, vring->ctx);
return 0;
}
@@ -880,8 +896,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
pa = dma_map_single(dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
- skb->data, (unsigned long long)pa);
+ wil_dbg_txrx(wil, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb),
+ skb->data, &pa);
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
@@ -1007,7 +1023,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
rc = wil_tx_vring(wil, vring, skb);
/* do we still have enough room in the vring? */
- if (wil_vring_avail_tx(vring) < vring->size/8)
+ if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))
netif_tx_stop_all_queues(wil_to_ndev(wil));
switch (rc) {
@@ -1116,7 +1132,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
done++;
}
}
- if (wil_vring_avail_tx(vring) > vring->size/4)
+ if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring))
netif_tx_wake_all_queues(wil_to_ndev(wil));
return done;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 2a2dec75f026..e25edc52398f 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -35,11 +35,14 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MEM_SIZE (2*1024*1024UL)
#define WIL6210_RX_RING_SIZE (128)
-#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (512)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL6210_ITR_TRSH (10000) /* arbitrary - about 15 IRQs/msec */
+#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */
+#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000)
+#define WIL6210_SCAN_TO msecs_to_jiffies(10000)
/* Hardware definitions begin */
@@ -301,6 +304,7 @@ struct wil_tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u8 dialog_token;
+ bool first_time; /* is it 1-st time this buffer used? */
};
struct wil6210_stats {
@@ -360,6 +364,8 @@ struct wil6210_priv {
u32 fw_version;
u32 hw_version;
u8 n_mids; /* number of additional MIDs as reported by FW */
+ int recovery_count; /* num of FW recovery attempts in a short time */
+ unsigned long last_fw_recovery; /* jiffies of last fw recovery */
/* profile */
u32 monitor_flags;
u32 secure_pcp; /* create secure PCP? */
@@ -381,6 +387,7 @@ struct wil6210_priv {
struct work_struct disconnect_worker;
struct work_struct fw_error_worker; /* for FW error recovery */
struct timer_list connect_timer;
+ struct timer_list scan_timer; /* detect scan timeout */
int pending_connect_cid;
struct list_head pending_wmi_ev;
/*
@@ -507,7 +514,7 @@ void wil_wdev_free(struct wil6210_priv *wil);
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
int wmi_pcp_stop(struct wil6210_priv *wil);
-void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid);
int wil_rx_init(struct wil6210_priv *wil);
void wil_rx_fini(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 2ba56eef0c45..6cc0e182cc70 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -192,7 +192,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
might_sleep();
if (!test_bit(wil_status_fwready, &wil->status)) {
- wil_err(wil, "FW not ready\n");
+ wil_err(wil, "WMI: cannot send command while FW not ready\n");
return -EAGAIN;
}
@@ -276,8 +276,8 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
wil->fw_version = le32_to_cpu(evt->sw_version);
wil->n_mids = evt->numof_additional_mids;
- wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
- evt->mac, wil->n_mids);
+ wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+ evt->mac, wil->n_mids);
if (!is_valid_ether_addr(ndev->dev_addr)) {
memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@@ -290,7 +290,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
int len)
{
- wil_dbg_wmi(wil, "WMI: FW ready\n");
+ wil_dbg_wmi(wil, "WMI: got FW ready event\n");
set_bit(wil_status_fwready, &wil->status);
/* reuse wmi_ready for the firmware ready indication */
@@ -348,9 +348,10 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
{
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
- bool aborted = (data->status != 0);
+ bool aborted = (data->status != WMI_SCAN_SUCCESS);
wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+ del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, aborted);
wil->scan_request = NULL;
} else {
@@ -658,21 +659,27 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
u8 *cmd;
void __iomem *src;
ulong flags;
+ unsigned n;
if (!test_bit(wil_status_reset_done, &wil->status)) {
wil_err(wil, "Reset not completed\n");
return;
}
- for (;;) {
+ for (n = 0;; n++) {
u16 len;
r->head = ioread32(wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.head));
- if (r->tail == r->head)
+ if (r->tail == r->head) {
+ if (n == 0)
+ wil_dbg_wmi(wil, "No events?\n");
return;
+ }
- /* read cmd from tail */
+ wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n",
+ r->head, r->tail);
+ /* read cmd descriptor from tail */
wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
sizeof(struct wil6210_mbox_ring_desc));
if (d_tail.sync == 0) {
@@ -680,13 +687,18 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
return;
}
+ /* read cmd header from descriptor */
if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
wil_err(wil, "Mbox evt at 0x%08x?\n",
le32_to_cpu(d_tail.addr));
return;
}
-
len = le16_to_cpu(hdr.len);
+ wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+ hdr.flags);
+
+ /* read cmd buffer from descriptor */
src = wmi_buffer(wil, d_tail.addr) +
sizeof(struct wil6210_mbox_hdr);
evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
@@ -702,9 +714,6 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
iowrite32(0, wil->csr + HOSTADDR(r->tail) +
offsetof(struct wil6210_mbox_ring_desc, sync));
/* indicate */
- wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
- le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
- hdr.flags);
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
@@ -734,6 +743,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
wil_dbg_wmi(wil, "queue_work -> %d\n", q);
}
}
+ if (n > 1)
+ wil_dbg_wmi(wil, "%s -> %d events processed\n", __func__, n);
}
int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
@@ -802,6 +813,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
.network_type = wmi_nettype,
.disable_sec_offload = 1,
.channel = chan - 1,
+ .pcp_max_assoc_sta = WIL6210_MAX_CID,
};
struct {
struct wil6210_mbox_hdr_wmi wmi;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 50b8528394f4..17334c852866 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -28,7 +28,7 @@
#define __WILOCITY_WMI_H__
/* General */
-
+#define WILOCITY_MAX_ASSOC_STA (8)
#define WMI_MAC_LEN (6)
#define WMI_PROX_RANGE_NUM (3)
@@ -219,15 +219,6 @@ struct wmi_disconnect_sta_cmd {
__le16 disconnect_reason;
} __packed;
-/*
- * WMI_RECONNECT_CMDID
- */
-struct wmi_reconnect_cmd {
- u8 channel; /* hint */
- u8 reserved;
- u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
-} __packed;
-
/*
* WMI_SET_PMK_CMDID
@@ -296,11 +287,13 @@ enum wmi_scan_type {
WMI_LONG_SCAN = 0,
WMI_SHORT_SCAN = 1,
WMI_PBC_SCAN = 2,
+ WMI_ACTIVE_SCAN = 3,
+ WMI_DIRECT_SCAN = 4,
};
struct wmi_start_scan_cmd {
- u8 reserved[8];
-
+ u8 direct_scan_mac_addr[6];
+ u8 reserved[2];
__le32 home_dwell_time; /* Max duration in the home channel(ms) */
__le32 force_scan_interval; /* Time interval between scans (ms)*/
u8 scan_type; /* wmi_scan_type */
@@ -332,6 +325,7 @@ struct wmi_probed_ssid_cmd {
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
+
/*
* WMI_SET_APPIE_CMDID
* Add Application specified IE to a management frame
@@ -427,7 +421,7 @@ struct wmi_bcon_ctrl_cmd {
__le16 frag_num;
__le64 ss_mask;
u8 network_type;
- u8 reserved;
+ u8 pcp_max_assoc_sta;
u8 disable_sec_offload;
u8 disable_sec;
} __packed;
@@ -450,7 +444,7 @@ enum wmi_port_role {
struct wmi_port_allocate_cmd {
u8 mac[WMI_MAC_LEN];
u8 port_role;
- u8 midid;
+ u8 mid;
} __packed;
/*
@@ -467,6 +461,7 @@ struct wmi_delete_port_cmd {
enum wmi_discovery_mode {
WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
WMI_DISCOVERY_MODE_OFFLOAD = 1,
+ WMI_DISCOVERY_MODE_PEER2PEER = 2,
};
struct wmi_p2p_cfg_cmd {
@@ -493,7 +488,8 @@ struct wmi_power_mgmt_cfg_cmd {
*/
struct wmi_pcp_start_cmd {
__le16 bcon_interval;
- u8 reserved0[10];
+ u8 pcp_max_assoc_sta;
+ u8 reserved0[9];
u8 network_type;
u8 channel;
u8 disable_sec_offload;
@@ -857,6 +853,7 @@ enum wmi_event_id {
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+ WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
@@ -1040,16 +1037,23 @@ enum wmi_disconnect_reason {
struct wmi_disconnect_event {
__le16 protocol_reason_status; /* reason code, see 802.11 spec. */
u8 bssid[WMI_MAC_LEN]; /* set if known */
- u8 disconnect_reason; /* see wmi_disconnect_reason_e */
- u8 assoc_resp_len;
- u8 assoc_info[0];
+ u8 disconnect_reason; /* see wmi_disconnect_reason */
+ u8 assoc_resp_len; /* not in use */
+ u8 assoc_info[0]; /* not in use */
} __packed;
/*
* WMI_SCAN_COMPLETE_EVENTID
*/
+enum scan_status {
+ WMI_SCAN_SUCCESS = 0,
+ WMI_SCAN_FAILED = 1,
+ WMI_SCAN_ABORTED = 2,
+ WMI_SCAN_REJECTED = 3,
+};
+
struct wmi_scan_complete_event {
- __le32 status;
+ __le32 status; /* scan_status */
} __packed;
/*
@@ -1256,6 +1260,14 @@ struct wmi_rx_mgmt_info {
u8 channel; /* From Radio MNGR */
} __packed;
+
+/*
+ * WMI_TX_MGMT_PACKET_EVENTID
+ */
+struct wmi_tx_mgmt_packet_event {
+ u8 payload[0];
+} __packed;
+
struct wmi_rx_mgmt_packet_event {
struct wmi_rx_mgmt_info info;
u8 payload[0];
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 088d544ec63f..40fd9b7b1426 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -1,7 +1,8 @@
config B43
tristate "Broadcom 43xx wireless support (mac80211 stack)"
- depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
- select SSB
+ depends on (BCMA_POSSIBLE || SSB_POSSIBLE) && MAC80211 && HAS_DMA
+ select BCMA if B43_BCMA
+ select SSB if B43_SSB
select FW_LOADER
---help---
b43 is a driver for the Broadcom 43xx series wireless devices.
@@ -27,14 +28,33 @@ config B43
If unsure, say M.
config B43_BCMA
- bool "Support for BCMA bus"
- depends on B43 && (BCMA = y || BCMA = B43)
- default y
+ bool
config B43_SSB
bool
- depends on B43 && (SSB = y || SSB = B43)
- default y
+
+choice
+ prompt "Supported bus types"
+ depends on B43
+ default B43_BUSES_BCMA_AND_SSB
+
+config B43_BUSES_BCMA_AND_SSB
+ bool "BCMA and SSB"
+ depends on BCMA_POSSIBLE && SSB_POSSIBLE
+ select B43_BCMA
+ select B43_SSB
+
+config B43_BUSES_BCMA
+ bool "BCMA only"
+ depends on BCMA_POSSIBLE
+ select B43_BCMA
+
+config B43_BUSES_SSB
+ bool "SSB only"
+ depends on SSB_POSSIBLE
+ select B43_SSB
+
+endchoice
# Auto-select SSB PCI-HOST support, if possible
config B43_PCI_AUTOSELECT
@@ -53,7 +73,7 @@ config B43_PCICORE_AUTOSELECT
config B43_PCMCIA
bool "Broadcom 43xx PCMCIA device support"
- depends on B43 && SSB_PCMCIAHOST_POSSIBLE
+ depends on B43 && B43_SSB && SSB_PCMCIAHOST_POSSIBLE
select SSB_PCMCIAHOST
---help---
Broadcom 43xx PCMCIA device support.
@@ -73,7 +93,7 @@ config B43_PCMCIA
config B43_SDIO
bool "Broadcom 43xx SDIO device support"
- depends on B43 && SSB_SDIOHOST_POSSIBLE
+ depends on B43 && B43_SSB && SSB_SDIOHOST_POSSIBLE
select SSB_SDIOHOST
---help---
Broadcom 43xx device support for Soft-MAC SDIO devices.
@@ -98,7 +118,7 @@ config B43_BCMA_PIO
config B43_PIO
bool
- depends on B43
+ depends on B43 && B43_SSB
select SSB_BLOCKIO
default y
@@ -116,7 +136,7 @@ config B43_PHY_N
config B43_PHY_LP
bool "Support for low-power (LP-PHY) devices"
- depends on B43
+ depends on B43 && B43_SSB
default y
---help---
Support for the LP-PHY.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 54376fddfaf9..4113b6934764 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -915,10 +915,6 @@ struct b43_wl {
char rng_name[30 + 1];
#endif /* CONFIG_B43_HWRNG */
- /* List of all wireless devices on this chip */
- struct list_head devlist;
- u8 nr_devs;
-
bool radiotap_enabled;
bool radio_enabled;
diff --git a/drivers/net/wireless/b43/bus.h b/drivers/net/wireless/b43/bus.h
index 184c95659279..f3205c6988bc 100644
--- a/drivers/net/wireless/b43/bus.h
+++ b/drivers/net/wireless/b43/bus.h
@@ -5,7 +5,9 @@ enum b43_bus_type {
#ifdef CONFIG_B43_BCMA
B43_BUS_BCMA,
#endif
+#ifdef CONFIG_B43_SSB
B43_BUS_SSB,
+#endif
};
struct b43_bus_dev {
@@ -52,13 +54,21 @@ struct b43_bus_dev {
static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev)
{
+#ifdef CONFIG_B43_SSB
return (dev->bus_type == B43_BUS_SSB &&
dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA);
+#else
+ return false;
+#endif
}
static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev)
{
+#ifdef CONFIG_B43_SSB
return (dev->bus_type == B43_BUS_SSB &&
dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO);
+#else
+ return false;
+#endif
}
struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 69fc3d65531a..0d6a0bb1f876 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -182,7 +182,7 @@ static struct ieee80211_rate __b43_ratetable[] = {
#define b43_g_ratetable (__b43_ratetable + 0)
#define b43_g_ratetable_size 12
-#define CHAN4G(_channel, _freq, _flags) { \
+#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
@@ -191,23 +191,31 @@ static struct ieee80211_rate __b43_ratetable[] = {
.max_power = 30, \
}
static struct ieee80211_channel b43_2ghz_chantable[] = {
- CHAN4G(1, 2412, 0),
- CHAN4G(2, 2417, 0),
- CHAN4G(3, 2422, 0),
- CHAN4G(4, 2427, 0),
- CHAN4G(5, 2432, 0),
- CHAN4G(6, 2437, 0),
- CHAN4G(7, 2442, 0),
- CHAN4G(8, 2447, 0),
- CHAN4G(9, 2452, 0),
- CHAN4G(10, 2457, 0),
- CHAN4G(11, 2462, 0),
- CHAN4G(12, 2467, 0),
- CHAN4G(13, 2472, 0),
- CHAN4G(14, 2484, 0),
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
};
-#undef CHAN4G
+#undef CHAN2G
+#define CHAN4G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 4000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
#define CHAN5G(_channel, _flags) { \
.band = IEEE80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
@@ -217,6 +225,18 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
.max_power = 30, \
}
static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
+ CHAN4G(184, 0), CHAN4G(186, 0),
+ CHAN4G(188, 0), CHAN4G(190, 0),
+ CHAN4G(192, 0), CHAN4G(194, 0),
+ CHAN4G(196, 0), CHAN4G(198, 0),
+ CHAN4G(200, 0), CHAN4G(202, 0),
+ CHAN4G(204, 0), CHAN4G(206, 0),
+ CHAN4G(208, 0), CHAN4G(210, 0),
+ CHAN4G(212, 0), CHAN4G(214, 0),
+ CHAN4G(216, 0), CHAN4G(218, 0),
+ CHAN4G(220, 0), CHAN4G(222, 0),
+ CHAN4G(224, 0), CHAN4G(226, 0),
+ CHAN4G(228, 0),
CHAN5G(32, 0), CHAN5G(34, 0),
CHAN5G(36, 0), CHAN5G(38, 0),
CHAN5G(40, 0), CHAN5G(42, 0),
@@ -260,18 +280,7 @@ static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
CHAN5G(170, 0), CHAN5G(172, 0),
CHAN5G(174, 0), CHAN5G(176, 0),
CHAN5G(178, 0), CHAN5G(180, 0),
- CHAN5G(182, 0), CHAN5G(184, 0),
- CHAN5G(186, 0), CHAN5G(188, 0),
- CHAN5G(190, 0), CHAN5G(192, 0),
- CHAN5G(194, 0), CHAN5G(196, 0),
- CHAN5G(198, 0), CHAN5G(200, 0),
- CHAN5G(202, 0), CHAN5G(204, 0),
- CHAN5G(206, 0), CHAN5G(208, 0),
- CHAN5G(210, 0), CHAN5G(212, 0),
- CHAN5G(214, 0), CHAN5G(216, 0),
- CHAN5G(218, 0), CHAN5G(220, 0),
- CHAN5G(222, 0), CHAN5G(224, 0),
- CHAN5G(226, 0), CHAN5G(228, 0),
+ CHAN5G(182, 0),
};
static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
@@ -295,6 +304,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
CHAN5G(208, 0), CHAN5G(212, 0),
CHAN5G(216, 0),
};
+#undef CHAN4G
#undef CHAN5G
static struct ieee80211_supported_band b43_band_5GHz_nphy = {
@@ -1175,18 +1185,7 @@ static void b43_bcma_phy_reset(struct b43_wldev *dev)
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
udelay(2);
- /* Take PHY out of reset */
- flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
- flags &= ~B43_BCMA_IOCTL_PHY_RESET;
- flags |= BCMA_IOCTL_FGC;
- bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
- udelay(1);
-
- /* Do not force clock anymore */
- flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
- flags &= ~BCMA_IOCTL_FGC;
- bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
- udelay(1);
+ b43_phy_take_out_of_reset(dev);
}
static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
@@ -1195,18 +1194,22 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
B43_BCMA_CLKCTLST_PHY_PLL_REQ;
u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
B43_BCMA_CLKCTLST_PHY_PLL_ST;
+ u32 flags;
+
+ flags = B43_BCMA_IOCTL_PHY_CLKEN;
+ if (gmode)
+ flags |= B43_BCMA_IOCTL_GMODE;
+ b43_device_enable(dev, flags);
- b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
b43_bcma_phy_reset(dev);
bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
}
#endif
+#ifdef CONFIG_B43_SSB
static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
{
- struct ssb_device *sdev = dev->dev->sdev;
- u32 tmslow;
u32 flags = 0;
if (gmode)
@@ -1218,18 +1221,9 @@ static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
b43_device_enable(dev, flags);
msleep(2); /* Wait for the PLL to turn on. */
- /* Now take the PHY out of Reset again */
- tmslow = ssb_read32(sdev, SSB_TMSLOW);
- tmslow |= SSB_TMSLOW_FGC;
- tmslow &= ~B43_TMSLOW_PHYRESET;
- ssb_write32(sdev, SSB_TMSLOW, tmslow);
- ssb_read32(sdev, SSB_TMSLOW); /* flush */
- msleep(1);
- tmslow &= ~SSB_TMSLOW_FGC;
- ssb_write32(sdev, SSB_TMSLOW, tmslow);
- ssb_read32(sdev, SSB_TMSLOW); /* flush */
- msleep(1);
+ b43_phy_take_out_of_reset(dev);
}
+#endif
void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode)
{
@@ -2704,32 +2698,37 @@ static int b43_upload_initvals(struct b43_wldev *dev)
struct b43_firmware *fw = &dev->fw;
const struct b43_iv *ivals;
size_t count;
- int err;
hdr = (const struct b43_fw_header *)(fw->initvals.data->data);
ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len);
count = be32_to_cpu(hdr->size);
- err = b43_write_initvals(dev, ivals, count,
+ return b43_write_initvals(dev, ivals, count,
fw->initvals.data->size - hdr_len);
- if (err)
- goto out;
- if (fw->initvals_band.data) {
- hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
- ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
- count = be32_to_cpu(hdr->size);
- err = b43_write_initvals(dev, ivals, count,
- fw->initvals_band.data->size - hdr_len);
- if (err)
- goto out;
- }
-out:
+}
- return err;
+static int b43_upload_initvals_band(struct b43_wldev *dev)
+{
+ const size_t hdr_len = sizeof(struct b43_fw_header);
+ const struct b43_fw_header *hdr;
+ struct b43_firmware *fw = &dev->fw;
+ const struct b43_iv *ivals;
+ size_t count;
+
+ if (!fw->initvals_band.data)
+ return 0;
+
+ hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
+ ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
+ count = be32_to_cpu(hdr->size);
+ return b43_write_initvals(dev, ivals, count,
+ fw->initvals_band.data->size - hdr_len);
}
/* Initialize the GPIOs
* http://bcm-specs.sipsolutions.net/GPIO
*/
+
+#ifdef CONFIG_B43_SSB
static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
{
struct ssb_bus *bus = dev->dev->sdev->bus;
@@ -2740,10 +2739,13 @@ static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
return bus->chipco.dev;
#endif
}
+#endif
static int b43_gpio_init(struct b43_wldev *dev)
{
+#ifdef CONFIG_B43_SSB
struct ssb_device *gpiodev;
+#endif
u32 mask, set;
b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0);
@@ -2802,7 +2804,9 @@ static int b43_gpio_init(struct b43_wldev *dev)
/* Turn off all GPIO stuff. Call this on module unload, for example. */
static void b43_gpio_cleanup(struct b43_wldev *dev)
{
+#ifdef CONFIG_B43_SSB
struct ssb_device *gpiodev;
+#endif
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
@@ -3086,6 +3090,10 @@ static int b43_chip_init(struct b43_wldev *dev)
if (err)
goto err_gpio_clean;
+ err = b43_upload_initvals_band(dev);
+ if (err)
+ goto err_gpio_clean;
+
/* Turn the Analog on and initialize the PHY. */
phy->ops->switch_analog(dev, 1);
err = b43_phy_init(dev);
@@ -3685,37 +3693,6 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
-static void b43_put_phy_into_reset(struct b43_wldev *dev)
-{
- u32 tmp;
-
- switch (dev->dev->bus_type) {
-#ifdef CONFIG_B43_BCMA
- case B43_BUS_BCMA:
- b43err(dev->wl,
- "Putting PHY into reset not supported on BCMA\n");
- break;
-#endif
-#ifdef CONFIG_B43_SSB
- case B43_BUS_SSB:
- tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
- tmp &= ~B43_TMSLOW_GMODE;
- tmp |= B43_TMSLOW_PHYRESET;
- tmp |= SSB_TMSLOW_FGC;
- ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
- msleep(1);
-
- tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
- tmp &= ~SSB_TMSLOW_FGC;
- tmp |= B43_TMSLOW_PHYRESET;
- ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
- msleep(1);
-
- break;
-#endif
- }
-}
-
static const char *band_to_string(enum ieee80211_band band)
{
switch (band) {
@@ -3731,94 +3708,75 @@ static const char *band_to_string(enum ieee80211_band band)
}
/* Expects wl->mutex locked */
-static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
+static int b43_switch_band(struct b43_wldev *dev,
+ struct ieee80211_channel *chan)
{
- struct b43_wldev *up_dev = NULL;
- struct b43_wldev *down_dev;
- struct b43_wldev *d;
- int err;
- bool uninitialized_var(gmode);
- int prev_status;
+ struct b43_phy *phy = &dev->phy;
+ bool gmode;
+ u32 tmp;
- /* Find a device and PHY which supports the band. */
- list_for_each_entry(d, &wl->devlist, list) {
- switch (chan->band) {
- case IEEE80211_BAND_5GHZ:
- if (d->phy.supports_5ghz) {
- up_dev = d;
- gmode = false;
- }
- break;
- case IEEE80211_BAND_2GHZ:
- if (d->phy.supports_2ghz) {
- up_dev = d;
- gmode = true;
- }
- break;
- default:
- B43_WARN_ON(1);
- return -EINVAL;
- }
- if (up_dev)
- break;
+ switch (chan->band) {
+ case IEEE80211_BAND_5GHZ:
+ gmode = false;
+ break;
+ case IEEE80211_BAND_2GHZ:
+ gmode = true;
+ break;
+ default:
+ B43_WARN_ON(1);
+ return -EINVAL;
}
- if (!up_dev) {
- b43err(wl, "Could not find a device for %s-GHz band operation\n",
+
+ if (!((gmode && phy->supports_2ghz) ||
+ (!gmode && phy->supports_5ghz))) {
+ b43err(dev->wl, "This device doesn't support %s-GHz band\n",
band_to_string(chan->band));
return -ENODEV;
}
- if ((up_dev == wl->current_dev) &&
- (!!wl->current_dev->phy.gmode == !!gmode)) {
+
+ if (!!phy->gmode == !!gmode) {
/* This device is already running. */
return 0;
}
- b43dbg(wl, "Switching to %s-GHz band\n",
+
+ b43dbg(dev->wl, "Switching to %s GHz band\n",
band_to_string(chan->band));
- down_dev = wl->current_dev;
- prev_status = b43_status(down_dev);
- /* Shutdown the currently running core. */
- if (prev_status >= B43_STAT_STARTED)
- down_dev = b43_wireless_core_stop(down_dev);
- if (prev_status >= B43_STAT_INITIALIZED)
- b43_wireless_core_exit(down_dev);
+ /* Some new devices don't need disabling radio for band switching */
+ if (!(phy->type == B43_PHYTYPE_N && phy->rev >= 3))
+ b43_software_rfkill(dev, true);
- if (down_dev != up_dev) {
- /* We switch to a different core, so we put PHY into
- * RESET on the old core. */
- b43_put_phy_into_reset(down_dev);
+ phy->gmode = gmode;
+ b43_phy_put_into_reset(dev);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ if (gmode)
+ tmp |= B43_BCMA_IOCTL_GMODE;
+ else
+ tmp &= ~B43_BCMA_IOCTL_GMODE;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ if (gmode)
+ tmp |= B43_TMSLOW_GMODE;
+ else
+ tmp &= ~B43_TMSLOW_GMODE;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ break;
+#endif
}
+ b43_phy_take_out_of_reset(dev);
- /* Now start the new core. */
- up_dev->phy.gmode = gmode;
- if (prev_status >= B43_STAT_INITIALIZED) {
- err = b43_wireless_core_init(up_dev);
- if (err) {
- b43err(wl, "Fatal: Could not initialize device for "
- "selected %s-GHz band\n",
- band_to_string(chan->band));
- goto init_failure;
- }
- }
- if (prev_status >= B43_STAT_STARTED) {
- err = b43_wireless_core_start(up_dev);
- if (err) {
- b43err(wl, "Fatal: Could not start device for "
- "selected %s-GHz band\n",
- band_to_string(chan->band));
- b43_wireless_core_exit(up_dev);
- goto init_failure;
- }
- }
- B43_WARN_ON(b43_status(up_dev) != prev_status);
+ b43_upload_initvals_band(dev);
- wl->current_dev = up_dev;
+ b43_phy_init(dev);
return 0;
-init_failure:
- /* Whoops, failed to init the new core. No core is operating now. */
- wl->current_dev = NULL;
- return err;
}
/* Write the short and long frame retry limit values. */
@@ -3851,8 +3809,10 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
dev = wl->current_dev;
+ b43_mac_suspend(dev);
+
/* Switch the band (if necessary). This might change the active core. */
- err = b43_switch_band(wl, conf->chandef.chan);
+ err = b43_switch_band(dev, conf->chandef.chan);
if (err)
goto out_unlock_mutex;
@@ -3871,8 +3831,6 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
else
phy->is_40mhz = false;
- b43_mac_suspend(dev);
-
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
b43_set_retry_limits(dev, conf->short_frame_max_tx_count,
conf->long_frame_max_tx_count);
@@ -4582,8 +4540,12 @@ static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
struct ssb_bus *bus;
u32 tmp;
+#ifdef CONFIG_B43_SSB
if (dev->dev->bus_type != B43_BUS_SSB)
return;
+#else
+ return;
+#endif
bus = dev->dev->sdev->bus;
@@ -4738,7 +4700,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
}
if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)
hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */
-#ifdef CONFIG_SSB_DRIVER_PCICORE
+#if defined(CONFIG_B43_SSB) && defined(CONFIG_SSB_DRIVER_PCICORE)
if (dev->dev->bus_type == B43_BUS_SSB &&
dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
dev->dev->sdev->bus->pcicore.dev->id.revision <= 10)
@@ -5129,10 +5091,82 @@ static void b43_wireless_core_detach(struct b43_wldev *dev)
b43_phy_free(dev);
}
+static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
+ bool *have_5ghz_phy)
+{
+ u16 dev_id = 0;
+
+#ifdef CONFIG_B43_BCMA
+ if (dev->dev->bus_type == B43_BUS_BCMA &&
+ dev->dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI)
+ dev_id = dev->dev->bdev->bus->host_pci->device;
+#endif
+#ifdef CONFIG_B43_SSB
+ if (dev->dev->bus_type == B43_BUS_SSB &&
+ dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
+ dev_id = dev->dev->sdev->bus->host_pci->device;
+#endif
+ /* Override with SPROM value if available */
+ if (dev->dev->bus_sprom->dev_id)
+ dev_id = dev->dev->bus_sprom->dev_id;
+
+ /* Note: below IDs can be "virtual" (not maching e.g. real PCI ID) */
+ switch (dev_id) {
+ case 0x4324: /* BCM4306 */
+ case 0x4312: /* BCM4311 */
+ case 0x4319: /* BCM4318 */
+ case 0x4328: /* BCM4321 */
+ case 0x432b: /* BCM4322 */
+ case 0x4350: /* BCM43222 */
+ case 0x4353: /* BCM43224 */
+ case 0x0576: /* BCM43224 */
+ case 0x435f: /* BCM6362 */
+ case 0x4331: /* BCM4331 */
+ case 0x4359: /* BCM43228 */
+ case 0x43a0: /* BCM4360 */
+ case 0x43b1: /* BCM4352 */
+ /* Dual band devices */
+ *have_2ghz_phy = true;
+ *have_5ghz_phy = true;
+ return;
+ case 0x4321: /* BCM4306 */
+ case 0x4313: /* BCM4311 */
+ case 0x431a: /* BCM4318 */
+ case 0x432a: /* BCM4321 */
+ case 0x432d: /* BCM4322 */
+ case 0x4352: /* BCM43222 */
+ case 0x4333: /* BCM4331 */
+ case 0x43a2: /* BCM4360 */
+ case 0x43b3: /* BCM4352 */
+ /* 5 GHz only devices */
+ *have_2ghz_phy = false;
+ *have_5ghz_phy = true;
+ return;
+ }
+
+ /* As a fallback, try to guess using PHY type */
+ switch (dev->phy.type) {
+ case B43_PHYTYPE_A:
+ *have_2ghz_phy = false;
+ *have_5ghz_phy = true;
+ return;
+ case B43_PHYTYPE_G:
+ case B43_PHYTYPE_N:
+ case B43_PHYTYPE_LP:
+ case B43_PHYTYPE_HT:
+ case B43_PHYTYPE_LCN:
+ *have_2ghz_phy = true;
+ *have_5ghz_phy = false;
+ return;
+ }
+
+ B43_WARN_ON(1);
+}
+
static int b43_wireless_core_attach(struct b43_wldev *dev)
{
struct b43_wl *wl = dev->wl;
- struct pci_dev *pdev = NULL;
+ struct b43_phy *phy = &dev->phy;
int err;
u32 tmp;
bool have_2ghz_phy = false, have_5ghz_phy = false;
@@ -5144,19 +5178,15 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
* that in core_init(), too.
*/
-#ifdef CONFIG_B43_SSB
- if (dev->dev->bus_type == B43_BUS_SSB &&
- dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
- pdev = dev->dev->sdev->bus->host_pci;
-#endif
-
err = b43_bus_powerup(dev, 0);
if (err) {
b43err(wl, "Bus powerup failed\n");
goto out;
}
- /* Get the PHY type. */
+ phy->do_full_init = true;
+
+ /* Try to guess supported bands for the first init needs */
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
@@ -5178,51 +5208,32 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
}
dev->phy.gmode = have_2ghz_phy;
- dev->phy.radio_on = true;
b43_wireless_core_reset(dev, dev->phy.gmode);
+ /* Get the PHY type. */
err = b43_phy_versioning(dev);
if (err)
goto err_powerdown;
- /* Check if this device supports multiband. */
- if (!pdev ||
- (pdev->device != 0x4312 &&
- pdev->device != 0x4319 && pdev->device != 0x4324)) {
- /* No multiband support. */
- have_2ghz_phy = false;
+
+ /* Get real info about supported bands */
+ b43_supported_bands(dev, &have_2ghz_phy, &have_5ghz_phy);
+
+ /* We don't support 5 GHz on some PHYs yet */
+ switch (dev->phy.type) {
+ case B43_PHYTYPE_A:
+ case B43_PHYTYPE_G:
+ case B43_PHYTYPE_N:
+ case B43_PHYTYPE_LP:
+ case B43_PHYTYPE_HT:
+ b43warn(wl, "5 GHz band is unsupported on this PHY\n");
have_5ghz_phy = false;
- switch (dev->phy.type) {
- case B43_PHYTYPE_A:
- have_5ghz_phy = true;
- break;
- case B43_PHYTYPE_LP: //FIXME not always!
-#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
- have_5ghz_phy = 1;
-#endif
- case B43_PHYTYPE_G:
- case B43_PHYTYPE_N:
- case B43_PHYTYPE_HT:
- case B43_PHYTYPE_LCN:
- have_2ghz_phy = true;
- break;
- default:
- B43_WARN_ON(1);
- }
}
- if (dev->phy.type == B43_PHYTYPE_A) {
- /* FIXME */
- b43err(wl, "IEEE 802.11a devices are unsupported\n");
+
+ if (!have_2ghz_phy && !have_5ghz_phy) {
+ b43err(wl, "b43 can't support any band on this device\n");
err = -EOPNOTSUPP;
goto err_powerdown;
}
- if (1 /* disable A-PHY */) {
- /* FIXME: For now we disable the A-PHY on multi-PHY devices. */
- if (dev->phy.type != B43_PHYTYPE_N &&
- dev->phy.type != B43_PHYTYPE_LP) {
- have_2ghz_phy = true;
- have_5ghz_phy = false;
- }
- }
err = b43_phy_allocate(dev);
if (err)
@@ -5270,7 +5281,6 @@ static void b43_one_core_detach(struct b43_bus_dev *dev)
b43_debugfs_remove_device(wldev);
b43_wireless_core_detach(wldev);
list_del(&wldev->list);
- wl->nr_devs--;
b43_bus_set_wldev(dev, NULL);
kfree(wldev);
}
@@ -5295,8 +5305,6 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
if (err)
goto err_kfree_wldev;
- list_add(&wldev->list, &wl->devlist);
- wl->nr_devs++;
b43_bus_set_wldev(dev, wldev);
b43_debugfs_add_device(wldev);
@@ -5314,6 +5322,7 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
(pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) && \
(pdev->subsystem_device == _subdevice) )
+#ifdef CONFIG_B43_SSB
static void b43_sprom_fixup(struct ssb_bus *bus)
{
struct pci_dev *pdev;
@@ -5345,6 +5354,7 @@ static void b43_wireless_exit(struct b43_bus_dev *dev, struct b43_wl *wl)
ssb_set_devtypedata(dev->sdev, NULL);
ieee80211_free_hw(hw);
}
+#endif
static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
{
@@ -5386,7 +5396,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
wl->hw = hw;
mutex_init(&wl->mutex);
spin_lock_init(&wl->hardirq_lock);
- INIT_LIST_HEAD(&wl->devlist);
INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
INIT_WORK(&wl->tx_work, b43_tx_work);
@@ -5486,39 +5495,42 @@ int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
struct b43_bus_dev *dev;
struct b43_wl *wl;
int err;
- int first = 0;
dev = b43_bus_dev_ssb_init(sdev);
if (!dev)
return -ENOMEM;
wl = ssb_get_devtypedata(sdev);
- if (!wl) {
- /* Probing the first core. Must setup common struct b43_wl */
- first = 1;
- b43_sprom_fixup(sdev->bus);
- wl = b43_wireless_init(dev);
- if (IS_ERR(wl)) {
- err = PTR_ERR(wl);
- goto out;
- }
- ssb_set_devtypedata(sdev, wl);
- B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+ if (wl) {
+ b43err(NULL, "Dual-core devices are not supported\n");
+ err = -ENOTSUPP;
+ goto err_ssb_kfree_dev;
+ }
+
+ b43_sprom_fixup(sdev->bus);
+
+ wl = b43_wireless_init(dev);
+ if (IS_ERR(wl)) {
+ err = PTR_ERR(wl);
+ goto err_ssb_kfree_dev;
}
+ ssb_set_devtypedata(sdev, wl);
+ B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+
err = b43_one_core_attach(dev, wl);
if (err)
- goto err_wireless_exit;
+ goto err_ssb_wireless_exit;
/* setup and start work to load firmware */
INIT_WORK(&wl->firmware_load, b43_request_firmware);
schedule_work(&wl->firmware_load);
- out:
return err;
- err_wireless_exit:
- if (first)
- b43_wireless_exit(dev, wl);
+err_ssb_wireless_exit:
+ b43_wireless_exit(dev, wl);
+err_ssb_kfree_dev:
+ kfree(dev);
return err;
}
@@ -5546,13 +5558,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
/* Unregister HW RNG driver */
b43_rng_exit(wl);
- if (list_empty(&wl->devlist)) {
- b43_leds_unregister(wl);
- /* Last core on the chip unregistered.
- * We can destroy common struct b43_wl.
- */
- b43_wireless_exit(dev, wl);
- }
+ b43_leds_unregister(wl);
+ b43_wireless_exit(dev, wl);
}
static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index dbaa51890198..08244b3b327e 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -96,12 +96,16 @@ int b43_phy_init(struct b43_wldev *dev)
phy->channel = ops->get_default_chan(dev);
- ops->software_rfkill(dev, false);
+ phy->ops->switch_analog(dev, true);
+ b43_software_rfkill(dev, false);
+
err = ops->init(dev);
if (err) {
b43err(dev->wl, "PHY init failed\n");
goto err_block_rf;
}
+ phy->do_full_init = false;
+
/* Make sure to switch hardware and firmware (SHM) to
* the default channel. */
err = b43_switch_channel(dev, ops->get_default_chan(dev));
@@ -113,10 +117,11 @@ int b43_phy_init(struct b43_wldev *dev)
return 0;
err_phy_exit:
+ phy->do_full_init = true;
if (ops->exit)
ops->exit(dev);
err_block_rf:
- ops->software_rfkill(dev, true);
+ b43_software_rfkill(dev, true);
return err;
}
@@ -125,7 +130,8 @@ void b43_phy_exit(struct b43_wldev *dev)
{
const struct b43_phy_operations *ops = dev->phy.ops;
- ops->software_rfkill(dev, true);
+ b43_software_rfkill(dev, true);
+ dev->phy.do_full_init = true;
if (ops->exit)
ops->exit(dev);
}
@@ -312,6 +318,90 @@ void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
}
}
+void b43_phy_put_into_reset(struct b43_wldev *dev)
+{
+ u32 tmp;
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~B43_BCMA_IOCTL_GMODE;
+ tmp |= B43_BCMA_IOCTL_PHY_RESET;
+ tmp |= BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ udelay(1);
+
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ udelay(1);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~B43_TMSLOW_GMODE;
+ tmp |= B43_TMSLOW_PHYRESET;
+ tmp |= SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ usleep_range(1000, 2000);
+
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ usleep_range(1000, 2000);
+
+ break;
+#endif
+ }
+}
+
+void b43_phy_take_out_of_reset(struct b43_wldev *dev)
+{
+ u32 tmp;
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ /* Unset reset bit (with forcing clock) */
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~B43_BCMA_IOCTL_PHY_RESET;
+ tmp &= ~B43_BCMA_IOCTL_PHY_CLKEN;
+ tmp |= BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ udelay(1);
+
+ /* Do not force clock anymore */
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~BCMA_IOCTL_FGC;
+ tmp |= B43_BCMA_IOCTL_PHY_CLKEN;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ udelay(1);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ /* Unset reset bit (with forcing clock) */
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~B43_TMSLOW_PHYRESET;
+ tmp &= ~B43_TMSLOW_PHYCLKEN;
+ tmp |= SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
+ usleep_range(1000, 2000);
+
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~SSB_TMSLOW_FGC;
+ tmp |= B43_TMSLOW_PHYCLKEN;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
+ usleep_range(1000, 2000);
+ break;
+#endif
+ }
+}
+
int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
{
struct b43_phy *phy = &(dev->phy);
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index f1b999349876..4ad6240d9ff4 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -231,9 +231,12 @@ struct b43_phy {
/* HT info */
bool is_40mhz;
- /* GMODE bit enabled? */
+ /* Is GMODE (2 GHz mode) bit enabled? */
bool gmode;
+ /* After power reset full init has to be performed */
+ bool do_full_init;
+
/* Analog Type */
u8 analog;
/* B43_PHYTYPE_ */
@@ -390,6 +393,9 @@ void b43_phy_lock(struct b43_wldev *dev);
*/
void b43_phy_unlock(struct b43_wldev *dev);
+void b43_phy_put_into_reset(struct b43_wldev *dev);
+void b43_phy_take_out_of_reset(struct b43_wldev *dev);
+
/**
* b43_switch_channel - Switch to another channel
*/
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 12f467b8d564..8f5c14bc10e6 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1587,6 +1587,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
static void b43_phy_initb6(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@@ -1670,7 +1671,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
b43_radio_write16(dev, 0x50, 0x20);
}
if (phy->radio_rev <= 2) {
- b43_radio_write16(dev, 0x7C, 0x20);
+ b43_radio_write16(dev, 0x50, 0x20);
b43_radio_write16(dev, 0x5A, 0x70);
b43_radio_write16(dev, 0x5B, 0x7B);
b43_radio_write16(dev, 0x5C, 0xB0);
@@ -1686,9 +1687,8 @@ static void b43_phy_initb6(struct b43_wldev *dev)
b43_phy_write(dev, 0x2A, 0x8AC0);
b43_phy_write(dev, 0x0038, 0x0668);
b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
- if (phy->radio_rev <= 5) {
+ if (phy->radio_rev == 4 || phy->radio_rev == 5)
b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003);
- }
if (phy->radio_rev <= 2)
b43_radio_write16(dev, 0x005D, 0x000D);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 24ccbe96e0c8..86569f6a8705 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -257,6 +257,72 @@ static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
}
}
+static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
+ enum n_intc_override intc_override,
+ u16 value, u8 core_sel)
+{
+ u16 reg, tmp, tmp2, val;
+ int core;
+
+ for (core = 0; core < 2; core++) {
+ if ((core_sel == 1 && core != 0) ||
+ (core_sel == 2 && core != 1))
+ continue;
+
+ reg = (core == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+
+ switch (intc_override) {
+ case N_INTC_OVERRIDE_OFF:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case N_INTC_OVERRIDE_TRSW:
+ b43_phy_maskset(dev, reg, ~0xC0, value << 6);
+ b43_phy_set(dev, reg, 0x400);
+
+ b43_phy_mask(dev, 0x2ff, ~0xC000 & 0xFFFF);
+ b43_phy_set(dev, 0x2ff, 0x2000);
+ b43_phy_set(dev, 0x2ff, 0x0001);
+ break;
+ case N_INTC_OVERRIDE_PA:
+ tmp = 0x0030;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ val = value << 5;
+ else
+ val = value << 4;
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ b43_phy_set(dev, reg, 0x1000);
+ break;
+ case N_INTC_OVERRIDE_EXT_LNA_PU:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ tmp2 = 0x0004;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ tmp2 = 0x0001;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ b43_phy_mask(dev, reg, ~tmp2);
+ break;
+ case N_INTC_OVERRIDE_EXT_LNA_GAIN:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ tmp2 = 0x0008;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ tmp2 = 0x0002;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ b43_phy_mask(dev, reg, ~tmp2);
+ break;
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
enum n_intc_override intc_override,
@@ -265,6 +331,12 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
u8 i, j;
u16 reg, tmp, val;
+ if (dev->phy.rev >= 7) {
+ b43_nphy_rf_ctl_intc_override_rev7(dev, intc_override, value,
+ core);
+ return;
+ }
+
B43_WARN_ON(dev->phy.rev < 3);
for (i = 0; i < 2; i++) {
@@ -419,7 +491,8 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
static const u16 clip[] = { 0xFFFF, 0xFFFF };
if (nphy->deaf_count++ == 0) {
nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
- b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_classifier(dev, 0x7,
+ B43_NPHY_CLASSCTL_WAITEDEN);
b43_nphy_read_clip_detection(dev, nphy->clip_state);
b43_nphy_write_clip_detection(dev, clip);
}
@@ -627,13 +700,11 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
- if (dev->phy.n->init_por) {
+ if (dev->phy.do_full_init) {
b43_radio_2057_rcal(dev);
b43_radio_2057_rccal(dev);
}
b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
-
- dev->phy.n->init_por = false;
}
/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
@@ -734,9 +805,16 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
u16 bias, cbias;
u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
+ bool is_pkg_fab_smic;
B43_WARN_ON(dev->phy.rev < 3);
+ is_pkg_fab_smic =
+ ((dev->dev->chip_id == BCMA_CHIP_ID_BCM43224 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM43225 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM43421) &&
+ dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
+
b43_chantab_radio_2056_upload(dev, e);
b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
@@ -744,7 +822,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
- if (dev->dev->chip_id == 0x4716) {
+ if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
} else {
@@ -752,6 +831,13 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
}
}
+ if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
+ }
if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
@@ -767,7 +853,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev,
offset | B2056_TX_PADG_IDAC, 0xcc);
- if (dev->dev->chip_id == 0x4716) {
+ if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
bias = 0x40;
cbias = 0x45;
pag_boost = 0x5;
@@ -776,6 +863,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
} else {
bias = 0x25;
cbias = 0x20;
+ if (is_pkg_fab_smic) {
+ bias = 0x2a;
+ cbias = 0x38;
+ }
pag_boost = 0x4;
pgag_boost = 0x03;
mixg_boost = 0x65;
@@ -844,6 +935,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
mixa_boost = 0xF;
}
+ cbias = is_pkg_fab_smic ? 0x35 : 0x30;
+
for (i = 0; i < 2; i++) {
offset = i ? B2056_TX1 : B2056_TX0;
@@ -862,11 +955,11 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev,
offset | B2056_TX_PADA_CASCBIAS, 0x03);
b43_radio_write(dev,
- offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+ offset | B2056_TX_INTPAA_IAUX_STAT, 0x30);
b43_radio_write(dev,
- offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+ offset | B2056_TX_INTPAA_IMAIN_STAT, 0x30);
b43_radio_write(dev,
- offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+ offset | B2056_TX_INTPAA_CASCBIAS, cbias);
}
}
@@ -933,7 +1026,7 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
- if (dev->phy.n->init_por)
+ if (dev->phy.do_full_init)
b43_radio_2056_rcal(dev);
}
@@ -946,8 +1039,6 @@ static void b43_radio_init2056(struct b43_wldev *dev)
b43_radio_init2056_pre(dev);
b2056_upload_inittabs(dev, 0, 0);
b43_radio_init2056_post(dev);
-
- dev->phy.n->init_por = false;
}
/**************************************************
@@ -1164,23 +1255,20 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
u16 seq_mode;
u32 tmp;
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, true);
+ b43_nphy_stay_in_carrier_search(dev, true);
if ((nphy->bb_mult_save & 0x80000000) == 0) {
tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
}
+ /* TODO: add modify_bbmult argument */
if (!dev->phy.is_40mhz)
tmp = 0x6464;
else
tmp = 0x4747;
b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, false);
-
b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
if (loops != 0xFFFF)
@@ -1213,6 +1301,8 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43err(dev->wl, "run samples timeout\n");
b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+
+ b43_nphy_stay_in_carrier_search(dev, false);
}
/**************************************************
@@ -1588,8 +1678,8 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
struct b43_phy_n *nphy = dev->phy.n;
u16 saved_regs_phy_rfctl[2];
- u16 saved_regs_phy[13];
- u16 regs_to_store[] = {
+ u16 saved_regs_phy[22];
+ u16 regs_to_store_rev3[] = {
B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
@@ -1598,6 +1688,20 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
};
+ u16 regs_to_store_rev7[] = {
+ B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
+ B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
+ B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
+ 0x342, 0x343, 0x346, 0x347,
+ 0x2ff,
+ B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
+ B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+ 0x340, 0x341, 0x344, 0x345,
+ B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
+ };
+ u16 *regs_to_store;
+ int regs_amount;
u16 class;
@@ -1617,6 +1721,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
u8 rx_core_state;
int core, i, j, vcm;
+ if (dev->phy.rev >= 7) {
+ regs_to_store = regs_to_store_rev7;
+ regs_amount = ARRAY_SIZE(regs_to_store_rev7);
+ } else {
+ regs_to_store = regs_to_store_rev3;
+ regs_amount = ARRAY_SIZE(regs_to_store_rev3);
+ }
+ BUG_ON(regs_amount > ARRAY_SIZE(saved_regs_phy));
+
class = b43_nphy_classifier(dev, 0, 0);
b43_nphy_classifier(dev, 7, 4);
b43_nphy_read_clip_detection(dev, clip_state);
@@ -1624,22 +1737,29 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
- for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+ for (i = 0; i < regs_amount; i++)
saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7);
b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
- b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+
+ if (dev->phy.rev >= 7) {
+ /* TODO */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ } else {
+ }
} else {
- b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+ } else {
+ b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+ }
}
rx_core_state = b43_nphy_get_rx_core_state(dev);
@@ -1654,8 +1774,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
/* Grab RSSI results for every possible VCM */
for (vcm = 0; vcm < 8; vcm++) {
- b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
- vcm << 2);
+ if (dev->phy.rev >= 7)
+ ;
+ else
+ b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+ 0xE3, vcm << 2);
b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8);
}
@@ -1682,8 +1805,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
}
/* Select the best VCM */
- b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
- vcm_final << 2);
+ if (dev->phy.rev >= 7)
+ ;
+ else
+ b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+ 0xE3, vcm_final << 2);
for (i = 0; i < 4; i++) {
if (core != i / 2)
@@ -1736,9 +1862,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX);
- b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
- for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+ for (i = 0; i < regs_amount; i++)
b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
/* Store for future configuration */
@@ -2494,8 +2620,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
struct ssb_sprom *sprom = dev->dev->bus_sprom;
/* TX to RX */
- u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
- u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ u8 tx2rx_events[7] = { 0x4, 0x3, 0x5, 0x2, 0x1, 0x8, 0x1F };
+ u8 tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1 };
/* RX to TX */
u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
0x1F };
@@ -2503,6 +2629,23 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+ u16 vmids[5][4] = {
+ { 0xa2, 0xb4, 0xb4, 0x89, }, /* 0 */
+ { 0xb4, 0xb4, 0xb4, 0x24, }, /* 1 */
+ { 0xa2, 0xb4, 0xb4, 0x74, }, /* 2 */
+ { 0xa2, 0xb4, 0xb4, 0x270, }, /* 3 */
+ { 0xa2, 0xb4, 0xb4, 0x00, }, /* 4 and 5 */
+ };
+ u16 gains[5][4] = {
+ { 0x02, 0x02, 0x02, 0x00, }, /* 0 */
+ { 0x02, 0x02, 0x02, 0x02, }, /* 1 */
+ { 0x02, 0x02, 0x02, 0x04, }, /* 2 */
+ { 0x02, 0x02, 0x02, 0x00, }, /* 3 */
+ { 0x02, 0x02, 0x02, 0x00, }, /* 4 and 5 */
+ };
+ u16 *vmid, *gain;
+
+ u8 pdet_range;
u16 tmp16;
u32 tmp32;
@@ -2561,7 +2704,71 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
- /* TODO */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ pdet_range = sprom->fem.ghz2.pdet_range;
+ else
+ pdet_range = sprom->fem.ghz5.pdet_range;
+ vmid = vmids[min_t(u16, pdet_range, 4)];
+ gain = gains[min_t(u16, pdet_range, 4)];
+ switch (pdet_range) {
+ case 3:
+ if (!(dev->phy.rev >= 4 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ break;
+ /* FALL THROUGH */
+ case 0:
+ case 1:
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+ break;
+ case 2:
+ if (dev->phy.rev >= 6) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ vmid[3] = 0x94;
+ else
+ vmid[3] = 0x8e;
+ gain[3] = 3;
+ } else if (dev->phy.rev == 5) {
+ vmid[3] = 0x84;
+ gain[3] = 2;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+ break;
+ case 4:
+ case 5:
+ if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
+ if (pdet_range == 4) {
+ vmid[3] = 0x8e;
+ tmp16 = 0x96;
+ gain[3] = 0x2;
+ } else {
+ vmid[3] = 0x89;
+ tmp16 = 0x89;
+ gain[3] = 0;
+ }
+ } else {
+ if (pdet_range == 4) {
+ vmid[3] = 0x89;
+ tmp16 = 0x8b;
+ gain[3] = 0x2;
+ } else {
+ vmid[3] = 0x74;
+ tmp16 = 0x70;
+ gain[3] = 0;
+ }
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+ vmid[3] = tmp16;
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+ break;
+ }
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
@@ -2600,7 +2807,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
/* Dropped probably-always-true condition */
b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
- b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
+ b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH0, 0x0341);
b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
@@ -3211,6 +3418,20 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
u8 idx, delta;
u8 i, stf_mode;
+ /* Array adj_pwr_tbl corresponds to the hardware table. It consists of
+ * 21 groups, each containing 4 entries.
+ *
+ * First group has entries for CCK modulation.
+ * The rest of groups has 1 entry per modulation (SISO, CDD, STBC, SDM).
+ *
+ * Group 0 is for CCK
+ * Groups 1..4 use BPSK (group per coding rate)
+ * Groups 5..8 use QPSK (group per coding rate)
+ * Groups 9..12 use 16-QAM (group per coding rate)
+ * Groups 13..16 use 64-QAM (group per coding rate)
+ * Groups 17..20 are unknown
+ */
+
for (i = 0; i < 4; i++)
nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
@@ -3409,10 +3630,8 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
}
b43_nphy_tx_prepare_adjusted_power_table(dev);
- /*
b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
- */
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, false);
@@ -5124,7 +5343,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
if (phy->rev >= 3 && phy->rev <= 6)
- b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
b43_nphy_tx_lp_fbw(dev);
if (phy->rev >= 3)
b43_nphy_spur_workaround(dev);
@@ -5338,7 +5557,6 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
nphy->spur_avoid = (phy->rev >= 3) ?
B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
- nphy->init_por = true;
nphy->gain_boost = true; /* this way we follow wl, assume it is true */
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -5379,8 +5597,6 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
}
-
- nphy->init_por = true;
}
static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -5441,8 +5657,11 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
{
/* Register 1 is a 32-bit register. */
B43_WARN_ON(reg == 1);
- /* N-PHY needs 0x100 for read access */
- reg |= 0x100;
+
+ if (dev->phy.rev >= 7)
+ reg |= 0x200; /* Radio 0x2057 */
+ else
+ reg |= 0x100;
b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
@@ -5488,10 +5707,12 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
}
} else {
if (dev->phy.rev >= 7) {
- b43_radio_2057_init(dev);
+ if (!dev->phy.radio_on)
+ b43_radio_2057_init(dev);
b43_switch_channel(dev, dev->phy.channel);
} else if (dev->phy.rev >= 3) {
- b43_radio_init2056(dev);
+ if (!dev->phy.radio_on)
+ b43_radio_init2056(dev);
b43_switch_channel(dev, dev->phy.channel);
} else {
b43_radio_init2055(dev);
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 9a5b6bc27d24..ecfbf66dbc3b 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -931,7 +931,6 @@ struct b43_phy_n {
u16 papd_epsilon_offset[2];
s32 preamble_override;
u32 bb_mult_save;
- bool init_por;
bool gain_boost;
bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index b4fd9345d673..2ce25607c60d 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -48,7 +48,7 @@ struct b2056_inittabs_pts {
unsigned int rx_length;
};
-static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -232,7 +232,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -380,7 +380,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
[B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -530,7 +530,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -714,7 +714,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -862,7 +862,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
[B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1012,7 +1012,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1196,7 +1196,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1352,7 +1352,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1502,7 +1502,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1686,7 +1686,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1842,7 +1842,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1992,7 +1992,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2176,7 +2176,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2332,7 +2332,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2482,7 +2482,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2666,7 +2666,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2822,7 +2822,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2972,24 +2972,69 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-#define INITTABSPTS(prefix) \
- .syn = prefix##_syn, \
- .syn_length = ARRAY_SIZE(prefix##_syn), \
- .tx = prefix##_tx, \
- .tx_length = ARRAY_SIZE(prefix##_tx), \
- .rx = prefix##_rx, \
- .rx_length = ARRAY_SIZE(prefix##_rx)
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_syn[] = {
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
+ [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+ [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+};
-static const struct b2056_inittabs_pts b2056_inittabs[] = {
- [3] = { INITTABSPTS(b2056_inittab_rev3) },
- [4] = { INITTABSPTS(b2056_inittab_rev4) },
- [5] = { INITTABSPTS(b2056_inittab_rev5) },
- [6] = { INITTABSPTS(b2056_inittab_rev6) },
- [7] = { INITTABSPTS(b2056_inittab_rev7) },
- [8] = { INITTABSPTS(b2056_inittab_rev8) },
- [9] = { INITTABSPTS(b2056_inittab_rev7) },
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_tx[] = {
+ [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
+ [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_rx[] = {
+ [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
+ [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
};
+#define INITTABSPTS(prefix) \
+ static const struct b2056_inittabs_pts prefix = { \
+ .syn = prefix##_syn, \
+ .syn_length = ARRAY_SIZE(prefix##_syn), \
+ .tx = prefix##_tx, \
+ .tx_length = ARRAY_SIZE(prefix##_tx), \
+ .rx = prefix##_rx, \
+ .rx_length = ARRAY_SIZE(prefix##_rx), \
+ }
+
+INITTABSPTS(b2056_inittab_phy_rev3);
+INITTABSPTS(b2056_inittab_phy_rev4);
+INITTABSPTS(b2056_inittab_radio_rev5);
+INITTABSPTS(b2056_inittab_radio_rev6);
+INITTABSPTS(b2056_inittab_radio_rev7_9);
+INITTABSPTS(b2056_inittab_radio_rev8);
+INITTABSPTS(b2056_inittab_radio_rev11);
+
#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
@@ -3041,7 +3086,7 @@ static const struct b2056_inittabs_pts b2056_inittabs[] = {
.phy_regs.phy_bw6 = r5
/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -4036,7 +4081,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev4[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -5031,7 +5076,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev5[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -6026,7 +6071,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev6[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -7021,7 +7066,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev7_9[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -8016,7 +8061,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[]
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev8[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -9011,6 +9056,1154 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] =
},
};
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev11[] = {
+ {
+ .freq = 4920,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
+ },
+ {
+ .freq = 4930,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
+ },
+ {
+ .freq = 4940,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
+ },
+ {
+ .freq = 4950,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
+ },
+ {
+ .freq = 4960,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
+ },
+ {
+ .freq = 4970,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
+ },
+ {
+ .freq = 4980,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
+ },
+ {
+ .freq = 4990,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
+ },
+ {
+ .freq = 5000,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
+ },
+ {
+ .freq = 5010,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
+ },
+ {
+ .freq = 5020,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
+ },
+ {
+ .freq = 5030,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
+ },
+ {
+ .freq = 5040,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
+ },
+ {
+ .freq = 5050,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
+ },
+ {
+ .freq = 5060,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
+ },
+ {
+ .freq = 5070,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
+ },
+ {
+ .freq = 5080,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
+ },
+ {
+ .freq = 5090,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
+ },
+ {
+ .freq = 5100,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
+ },
+ {
+ .freq = 5110,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
+ },
+ {
+ .freq = 5120,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
+ },
+ {
+ .freq = 5130,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
+ },
+ {
+ .freq = 5140,
+ RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
+ },
+ {
+ .freq = 5160,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
+ },
+ {
+ .freq = 5170,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
+ },
+ {
+ .freq = 5180,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+ },
+ {
+ .freq = 5190,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
+ },
+ {
+ .freq = 5200,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+ },
+ {
+ .freq = 5210,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
+ },
+ {
+ .freq = 5220,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+ },
+ {
+ .freq = 5230,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
+ },
+ {
+ .freq = 5240,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+ },
+ {
+ .freq = 5250,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
+ },
+ {
+ .freq = 5260,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
+ 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
+ },
+ {
+ .freq = 5270,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
+ 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
+ },
+ {
+ .freq = 5280,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
+ },
+ {
+ .freq = 5290,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
+ },
+ {
+ .freq = 5300,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
+ },
+ {
+ .freq = 5310,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
+ },
+ {
+ .freq = 5320,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
+ },
+ {
+ .freq = 5330,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
+ },
+ {
+ .freq = 5340,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
+ },
+ {
+ .freq = 5350,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
+ },
+ {
+ .freq = 5360,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
+ },
+ {
+ .freq = 5370,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
+ },
+ {
+ .freq = 5380,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
+ },
+ {
+ .freq = 5390,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
+ },
+ {
+ .freq = 5400,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
+ },
+ {
+ .freq = 5410,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
+ },
+ {
+ .freq = 5420,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
+ },
+ {
+ .freq = 5430,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
+ 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
+ },
+ {
+ .freq = 5440,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
+ },
+ {
+ .freq = 5450,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
+ },
+ {
+ .freq = 5460,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
+ },
+ {
+ .freq = 5470,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
+ },
+ {
+ .freq = 5480,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
+ },
+ {
+ .freq = 5490,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
+ },
+ {
+ .freq = 5500,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
+ },
+ {
+ .freq = 5510,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
+ },
+ {
+ .freq = 5520,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
+ },
+ {
+ .freq = 5530,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
+ },
+ {
+ .freq = 5540,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
+ },
+ {
+ .freq = 5550,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
+ },
+ {
+ .freq = 5560,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
+ },
+ {
+ .freq = 5570,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
+ },
+ {
+ .freq = 5580,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
+ },
+ {
+ .freq = 5590,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
+ },
+ {
+ .freq = 5600,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
+ },
+ {
+ .freq = 5610,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
+ },
+ {
+ .freq = 5620,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
+ },
+ {
+ .freq = 5630,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
+ },
+ {
+ .freq = 5640,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
+ },
+ {
+ .freq = 5650,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
+ },
+ {
+ .freq = 5660,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
+ },
+ {
+ .freq = 5670,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
+ },
+ {
+ .freq = 5680,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
+ },
+ {
+ .freq = 5690,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
+ },
+ {
+ .freq = 5700,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
+ },
+ {
+ .freq = 5710,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
+ },
+ {
+ .freq = 5720,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
+ },
+ {
+ .freq = 5725,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
+ },
+ {
+ .freq = 5730,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
+ },
+ {
+ .freq = 5735,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
+ },
+ {
+ .freq = 5740,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
+ },
+ {
+ .freq = 5745,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+ },
+ {
+ .freq = 5750,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6d, 0x00),
+ PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
+ },
+ {
+ .freq = 5755,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
+ },
+ {
+ .freq = 5760,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
+ },
+ {
+ .freq = 5765,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+ },
+ {
+ .freq = 5770,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
+ },
+ {
+ .freq = 5775,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
+ },
+ {
+ .freq = 5780,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
+ },
+ {
+ .freq = 5785,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+ },
+ {
+ .freq = 5790,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
+ },
+ {
+ .freq = 5795,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
+ },
+ {
+ .freq = 5800,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
+ },
+ {
+ .freq = 5805,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+ },
+ {
+ .freq = 5810,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
+ },
+ {
+ .freq = 5815,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
+ },
+ {
+ .freq = 5820,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
+ },
+ {
+ .freq = 5825,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x69, 0x00),
+ PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+ },
+ {
+ .freq = 5830,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x69, 0x00),
+ PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
+ },
+ {
+ .freq = 5840,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
+ },
+ {
+ .freq = 5850,
+ RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
+ },
+ {
+ .freq = 5860,
+ RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
+ },
+ {
+ .freq = 5870,
+ RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
+ },
+ {
+ .freq = 5880,
+ RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
+ },
+ {
+ .freq = 5890,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
+ },
+ {
+ .freq = 5900,
+ RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
+ },
+ {
+ .freq = 5910,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
+ },
+ {
+ .freq = 2412,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ {
+ .freq = 2417,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ {
+ .freq = 2422,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ {
+ .freq = 2427,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ {
+ .freq = 2432,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ {
+ .freq = 2437,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ {
+ .freq = 2442,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ {
+ .freq = 2447,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ {
+ .freq = 2452,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ {
+ .freq = 2457,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ {
+ .freq = 2462,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ {
+ .freq = 2467,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ {
+ .freq = 2472,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+ {
+ .freq = 2484,
+ RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+ },
+};
+
+static const struct b2056_inittabs_pts
+*b43_nphy_get_inittabs_rev3(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ switch (dev->phy.rev) {
+ case 3:
+ return &b2056_inittab_phy_rev3;
+ case 4:
+ return &b2056_inittab_phy_rev4;
+ default:
+ switch (phy->radio_rev) {
+ case 5:
+ return &b2056_inittab_radio_rev5;
+ case 6:
+ return &b2056_inittab_radio_rev6;
+ case 7:
+ case 9:
+ return &b2056_inittab_radio_rev7_9;
+ case 8:
+ return &b2056_inittab_radio_rev8;
+ case 11:
+ return &b2056_inittab_radio_rev11;
+ }
+ }
+
+ return NULL;
+}
+
static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
bool ignore_uploadflag, u16 routing,
const struct b2056_inittab_entry *e,
@@ -9037,11 +10230,11 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
{
const struct b2056_inittabs_pts *pts;
- if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ pts = b43_nphy_get_inittabs_rev3(dev);
+ if (!pts) {
B43_WARN_ON(1);
return;
}
- pts = &b2056_inittabs[dev->phy.rev];
b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
B2056_SYN, pts->syn, pts->syn_length);
@@ -9060,11 +10253,12 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
const struct b2056_inittabs_pts *pts;
const struct b2056_inittab_entry *e;
- if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ pts = b43_nphy_get_inittabs_rev3(dev);
+ if (!pts) {
B43_WARN_ON(1);
return;
}
- pts = &b2056_inittabs[dev->phy.rev];
+
e = &pts->syn[B2056_SYN_PLL_CP2];
b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
@@ -9073,38 +10267,46 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{
+ struct b43_phy *phy = &dev->phy;
const struct b43_nphy_channeltab_entry_rev3 *e;
unsigned int length, i;
- switch (dev->phy.rev) {
+ switch (phy->rev) {
case 3:
- e = b43_nphy_channeltab_rev3;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev3);
+ e = b43_nphy_channeltab_phy_rev3;
+ length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev3);
break;
case 4:
- e = b43_nphy_channeltab_rev4;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev4);
- break;
- case 5:
- e = b43_nphy_channeltab_rev5;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
- break;
- case 6:
- e = b43_nphy_channeltab_rev6;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
- break;
- case 7:
- case 9:
- e = b43_nphy_channeltab_rev7_9;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
- break;
- case 8:
- e = b43_nphy_channeltab_rev8;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
+ e = b43_nphy_channeltab_phy_rev4;
+ length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev4);
break;
default:
- B43_WARN_ON(1);
- return NULL;
+ switch (phy->radio_rev) {
+ case 5:
+ e = b43_nphy_channeltab_radio_rev5;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev5);
+ break;
+ case 6:
+ e = b43_nphy_channeltab_radio_rev6;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev6);
+ break;
+ case 7:
+ case 9:
+ e = b43_nphy_channeltab_radio_rev7_9;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev7_9);
+ break;
+ case 8:
+ e = b43_nphy_channeltab_radio_rev8;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev8);
+ break;
+ case 11:
+ e = b43_nphy_channeltab_radio_rev11;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev11);
+ break;
+ default:
+ B43_WARN_ON(1);
+ return NULL;
+ }
}
for (i = 0; i < length; i++, e++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 94c755fdda14..4047c05e3807 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1627,74 +1627,7 @@ static const u32 b43_ntab_tdtrn_r3[] = {
0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
};
-static const u32 b43_ntab_noisevar0_r3[] = {
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-};
-
-static const u32 b43_ntab_noisevar1_r3[] = {
+static const u32 b43_ntab_noisevar_r3[] = {
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
@@ -3109,31 +3042,32 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
antswlut = sprom->fem.ghz2.antswlut;
/* Static tables */
- ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
- ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
- ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
- ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
- ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
- ntab_upload(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
- ntab_upload(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
- ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
- ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
- ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
- ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
- ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
- ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
- ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
- ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
- ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3);
- ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3);
- ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
- ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
- ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
- ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
- ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
- ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
- ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
- ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
+ if (dev->phy.do_full_init) {
+ ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
+ ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
+ ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
+ ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
+ ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
+ ntab_upload(dev, B43_NTAB_NOISEVAR_R3, b43_ntab_noisevar_r3);
+ ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
+ ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
+ ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
+ ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
+ ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
+ ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
+ ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
+ ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
+ ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3);
+ ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3);
+ ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
+ ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
+ ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
+ ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
+ ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
+ ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
+ ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
+ ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
+ }
/* Volatile tables */
if (antswlut < ARRAY_SIZE(b43_ntab_antswctl_r3))
@@ -3146,20 +3080,22 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
static void b43_nphy_tables_init_rev0(struct b43_wldev *dev)
{
/* Static tables */
- ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
- ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
- ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
- ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
- ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
- ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
- ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
- ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
- ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
- ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
- ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
- ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
- ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
- ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
+ if (dev->phy.do_full_init) {
+ ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
+ ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
+ ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
+ ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
+ ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
+ ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
+ ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
+ ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
+ ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
+ ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
+ ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
+ ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
+ ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
+ ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
+ }
/* Volatile tables */
ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9ff33adcff89..3a58aee4c4cf 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -143,8 +143,7 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
-#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
-#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
+#define B43_NTAB_NOISEVAR_R3 B43_NTAB32(16, 0) /* noise variance */
#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index 9b1a038be08b..c218c08fb2f5 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -441,7 +441,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */
{
- b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480);
+ b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0x7654);
}
static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 31adb8cf0291..6e6ef3fc2247 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -408,7 +408,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
mac_ctl |= B43_TXH_MAC_HWSEQ;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
mac_ctl |= B43_TXH_MAC_STMSDU;
- if (phy->type == B43_PHYTYPE_A)
+ if (!phy->gmode)
mac_ctl |= B43_TXH_MAC_5GHZ;
/* Overwrite rates[0].count to make the retry calculation
@@ -811,9 +811,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
break;
case B43_PHYTYPE_G:
status.band = IEEE80211_BAND_2GHZ;
- /* chanid is the radio channel cookie value as used
- * to tune the radio. */
- status.freq = chanid + 2400;
+ /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
+ * has been modified to be compatible with N-PHY and others.
+ */
+ if (dev->fw.rev >= 508)
+ status.freq = ieee80211_channel_to_frequency(chanid, status.band);
+ else
+ status.freq = chanid + 2400;
break;
case B43_PHYTYPE_N:
case B43_PHYTYPE_LP:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 1d2ceac3a221..98e67c18f276 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -33,7 +33,7 @@ brcmfmac-objs += \
bcdc.o \
dhd_common.o \
dhd_linux.o \
- nvram.o \
+ firmware.o \
btcoex.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 939d6b132922..16f9ab2568a8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -186,7 +186,7 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
bool success);
/* Sets dongle media info (drv_version, mac address). */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index c4535616064e..7735328fff21 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -63,7 +63,6 @@ struct brcmf_bus_dcmd {
*/
struct brcmf_bus_ops {
int (*preinit)(struct device *dev);
- int (*init)(struct device *dev);
void (*stop)(struct device *dev);
int (*txdata)(struct device *dev, struct sk_buff *skb);
int (*txctl)(struct device *dev, unsigned char *msg, uint len);
@@ -99,6 +98,7 @@ struct brcmf_bus {
unsigned long tx_realloc;
u32 chip;
u32 chiprev;
+ bool always_use_fws_queue;
struct brcmf_bus_ops *ops;
};
@@ -113,11 +113,6 @@ static inline int brcmf_bus_preinit(struct brcmf_bus *bus)
return bus->ops->preinit(bus->dev);
}
-static inline int brcmf_bus_init(struct brcmf_bus *bus)
-{
- return bus->ops->init(bus->dev);
-}
-
static inline void brcmf_bus_stop(struct brcmf_bus *bus)
{
bus->ops->stop(bus->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 6a8983a1fb9c..ed3e32ce8c23 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,6 +32,9 @@
#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
+/* boost value for RSSI_DELTA in preferred join selection */
+#define BRCMF_JOIN_PREF_RSSI_BOOST 8
+
bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
@@ -246,6 +249,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
u8 buf[BRCMF_DCMD_SMLEN];
+ struct brcmf_join_pref_params join_pref_params[2];
char *ptr;
s32 err;
@@ -298,6 +302,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
goto done;
}
+ /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
+ join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+ join_pref_params[0].len = 2;
+ join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+ join_pref_params[0].band = WLC_BAND_5G;
+ join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+ join_pref_params[1].len = 2;
+ join_pref_params[1].rssi_gain = 0;
+ join_pref_params[1].band = 0;
+ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+ sizeof(join_pref_params));
+ if (err)
+ brcmf_err("Set join_pref error (%d)\n", err);
+
/* Setup event_msgs, enable E_IF */
err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
BRCMF_EVENTING_MASK_LEN);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 7d28cd385092..09dd8c13d844 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -190,7 +190,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
int ret;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- struct ethhdr *eh;
+ struct ethhdr *eh = (struct ethhdr *)(skb->data);
brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
@@ -236,6 +236,9 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
goto done;
}
+ if (eh->h_proto == htons(ETH_P_PAE))
+ atomic_inc(&ifp->pend_8021x_cnt);
+
ret = brcmf_fws_process_skb(ifp, skb);
done:
@@ -538,31 +541,26 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
brcmf_netif_rx(ifp, skb);
}
-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
bool success)
{
struct brcmf_if *ifp;
struct ethhdr *eh;
- u8 ifidx;
u16 type;
- int res;
-
- res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
ifp = drvr->iflist[ifidx];
if (!ifp)
goto done;
- if (res == 0) {
- eh = (struct ethhdr *)(txp->data);
- type = ntohs(eh->h_proto);
+ eh = (struct ethhdr *)(txp->data);
+ type = ntohs(eh->h_proto);
- if (type == ETH_P_PAE) {
- atomic_dec(&ifp->pend_8021x_cnt);
- if (waitqueue_active(&ifp->pend_8021x_wait))
- wake_up(&ifp->pend_8021x_wait);
- }
+ if (type == ETH_P_PAE) {
+ atomic_dec(&ifp->pend_8021x_cnt);
+ if (waitqueue_active(&ifp->pend_8021x_wait))
+ wake_up(&ifp->pend_8021x_wait);
}
+
if (!success)
ifp->stats.tx_errors++;
done:
@@ -573,13 +571,17 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ u8 ifidx;
/* await txstatus signal for firmware if active */
if (brcmf_fws_fc_active(drvr->fws)) {
if (!success)
brcmf_fws_bustxfail(drvr->fws, txp);
} else {
- brcmf_txfinalize(drvr, txp, success);
+ if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
+ brcmu_pkt_buf_free_skb(txp);
+ else
+ brcmf_txfinalize(drvr, txp, ifidx, success);
}
}
@@ -914,13 +916,6 @@ int brcmf_bus_start(struct device *dev)
brcmf_dbg(TRACE, "\n");
- /* Bring up the bus */
- ret = brcmf_bus_init(bus_if);
- if (ret != 0) {
- brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
- return ret;
- }
-
/* add primary networking interface */
ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
if (IS_ERR(ifp))
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 13c89a0c4ba7..8fa0dbbbda72 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -42,7 +42,7 @@
#include <soc.h>
#include "sdio_host.h"
#include "chip.h"
-#include "nvram.h"
+#include "firmware.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -632,43 +632,28 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
};
-
-static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus,
- enum brcmf_firmware_type type)
+static const char *brcmf_sdio_get_fwname(struct brcmf_chip *ci,
+ enum brcmf_firmware_type type)
{
- const struct firmware *fw;
- const char *name;
- int err, i;
+ int i;
for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
- if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
- brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
+ if (brcmf_fwname_data[i].chipid == ci->chip &&
+ brcmf_fwname_data[i].revmsk & BIT(ci->chiprev)) {
switch (type) {
case BRCMF_FIRMWARE_BIN:
- name = brcmf_fwname_data[i].bin;
- break;
+ return brcmf_fwname_data[i].bin;
case BRCMF_FIRMWARE_NVRAM:
- name = brcmf_fwname_data[i].nv;
- break;
+ return brcmf_fwname_data[i].nv;
default:
brcmf_err("invalid firmware type (%d)\n", type);
return NULL;
}
- goto found;
}
}
brcmf_err("Unknown chipid %d [%d]\n",
- bus->ci->chip, bus->ci->chiprev);
+ ci->chip, ci->chiprev);
return NULL;
-
-found:
- err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
- if ((err) || (!fw)) {
- brcmf_err("fail to request firmware %s (%d)\n", name, err);
- return NULL;
- }
-
- return fw;
}
static void pkt_align(struct sk_buff *p, int len, int align)
@@ -3278,20 +3263,13 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
}
static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
- const struct firmware *nv)
+ void *vars, u32 varsz)
{
- void *vars;
- u32 varsz;
int address;
int err;
brcmf_dbg(TRACE, "Enter\n");
- vars = brcmf_nvram_strip(nv, &varsz);
-
- if (vars == NULL)
- return -EINVAL;
-
address = bus->ci->ramsize - varsz + bus->ci->rambase;
err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
if (err)
@@ -3300,15 +3278,14 @@ static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
err = -EIO;
- brcmf_nvram_free(vars);
-
return err;
}
-static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
+ const struct firmware *fw,
+ void *nvram, u32 nvlen)
{
int bcmerror = -EFAULT;
- const struct firmware *fw;
u32 rstvec;
sdio_claim_host(bus->sdiodev->func[1]);
@@ -3317,12 +3294,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
/* Keep arm in reset */
brcmf_chip_enter_download(bus->ci);
- fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
- if (fw == NULL) {
- bcmerror = -ENOENT;
- goto err;
- }
-
rstvec = get_unaligned_le32(fw->data);
brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
@@ -3330,17 +3301,12 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
release_firmware(fw);
if (bcmerror) {
brcmf_err("dongle image file download failed\n");
+ brcmf_fw_nvram_free(nvram);
goto err;
}
- fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
- if (fw == NULL) {
- bcmerror = -ENOENT;
- goto err;
- }
-
- bcmerror = brcmf_sdio_download_nvram(bus, fw);
- release_firmware(fw);
+ bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
+ brcmf_fw_nvram_free(nvram);
if (bcmerror) {
brcmf_err("dongle nvram file download failed\n");
goto err;
@@ -3490,97 +3456,6 @@ done:
return err;
}
-static int brcmf_sdio_bus_init(struct device *dev)
-{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- struct brcmf_sdio *bus = sdiodev->bus;
- int err, ret = 0;
- u8 saveclk;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* try to download image and nvram to the dongle */
- if (bus_if->state == BRCMF_BUS_DOWN) {
- bus->alp_only = true;
- err = brcmf_sdio_download_firmware(bus);
- if (err)
- return err;
- bus->alp_only = false;
- }
-
- if (!bus->sdiodev->bus_if->drvr)
- return 0;
-
- /* Start the watchdog timer */
- bus->sdcnt.tickcnt = 0;
- brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
-
- sdio_claim_host(bus->sdiodev->func[1]);
-
- /* Make sure backplane clock is on, needed to generate F2 interrupt */
- brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
- if (bus->clkstate != CLK_AVAIL)
- goto exit;
-
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
- }
- if (err) {
- brcmf_err("Failed to force clock for F2: err %d\n", err);
- goto exit;
- }
-
- /* Enable function 2 (frame transfers) */
- w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
- offsetof(struct sdpcmd_regs, tosbmailboxdata));
- err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
-
-
- brcmf_dbg(INFO, "enable F2: err=%d\n", err);
-
- /* If F2 successfully enabled, set core and enable interrupts */
- if (!err) {
- /* Set up the interrupt mask and enable interrupts */
- bus->hostintmask = HOSTINTMASK;
- w_sdreg32(bus, bus->hostintmask,
- offsetof(struct sdpcmd_regs, hostintmask));
-
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
- } else {
- /* Disable F2 again */
- sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
- ret = -ENODEV;
- }
-
- if (brcmf_chip_sr_capable(bus->ci)) {
- brcmf_sdio_sr_init(bus);
- } else {
- /* Restore previous clock setting */
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- saveclk, &err);
- }
-
- if (ret == 0) {
- ret = brcmf_sdiod_intr_register(bus->sdiodev);
- if (ret != 0)
- brcmf_err("intr register failed:%d\n", ret);
- }
-
- /* If we didn't come up, turn off backplane clock */
- if (ret != 0)
- brcmf_sdio_clkctl(bus, CLK_NONE, false);
-
-exit:
- sdio_release_host(bus->sdiodev->func[1]);
-
- return ret;
-}
-
void brcmf_sdio_isr(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -4020,13 +3895,114 @@ brcmf_sdio_watchdog(unsigned long data)
static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.stop = brcmf_sdio_bus_stop,
.preinit = brcmf_sdio_bus_preinit,
- .init = brcmf_sdio_bus_init,
.txdata = brcmf_sdio_bus_txdata,
.txctl = brcmf_sdio_bus_txctl,
.rxctl = brcmf_sdio_bus_rxctl,
.gettxq = brcmf_sdio_bus_gettxq,
};
+static void brcmf_sdio_firmware_callback(struct device *dev,
+ const struct firmware *code,
+ void *nvram, u32 nvram_len)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+ int err = 0;
+ u8 saveclk;
+
+ brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+
+ /* try to download image and nvram to the dongle */
+ if (bus_if->state == BRCMF_BUS_DOWN) {
+ bus->alp_only = true;
+ err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
+ if (err)
+ goto fail;
+ bus->alp_only = false;
+ }
+
+ if (!bus_if->drvr)
+ return;
+
+ /* Start the watchdog timer */
+ bus->sdcnt.tickcnt = 0;
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
+
+ sdio_claim_host(sdiodev->func[1]);
+
+ /* Make sure backplane clock is on, needed to generate F2 interrupt */
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+ if (bus->clkstate != CLK_AVAIL)
+ goto release;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ brcmf_err("Failed to force clock for F2: err %d\n", err);
+ goto release;
+ }
+
+ /* Enable function 2 (frame transfers) */
+ w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
+ offsetof(struct sdpcmd_regs, tosbmailboxdata));
+ err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
+
+
+ brcmf_dbg(INFO, "enable F2: err=%d\n", err);
+
+ /* If F2 successfully enabled, set core and enable interrupts */
+ if (!err) {
+ /* Set up the interrupt mask and enable interrupts */
+ bus->hostintmask = HOSTINTMASK;
+ w_sdreg32(bus, bus->hostintmask,
+ offsetof(struct sdpcmd_regs, hostintmask));
+
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
+ } else {
+ /* Disable F2 again */
+ sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+ goto release;
+ }
+
+ if (brcmf_chip_sr_capable(bus->ci)) {
+ brcmf_sdio_sr_init(bus);
+ } else {
+ /* Restore previous clock setting */
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ saveclk, &err);
+ }
+
+ if (err == 0) {
+ err = brcmf_sdiod_intr_register(sdiodev);
+ if (err != 0)
+ brcmf_err("intr register failed:%d\n", err);
+ }
+
+ /* If we didn't come up, turn off backplane clock */
+ if (err != 0)
+ brcmf_sdio_clkctl(bus, CLK_NONE, false);
+
+ sdio_release_host(sdiodev->func[1]);
+
+ err = brcmf_bus_start(dev);
+ if (err != 0) {
+ brcmf_err("dongle is not responding\n");
+ goto fail;
+ }
+ return;
+
+release:
+ sdio_release_host(sdiodev->func[1]);
+fail:
+ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
+ device_release_driver(dev);
+}
+
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret;
@@ -4110,8 +4086,13 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
goto fail;
}
+ /* Query the F2 block size, set roundup accordingly */
+ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->roundup = min(max_roundup, bus->blocksize);
+
/* Allocate buffers */
if (bus->sdiodev->bus_if->maxctl) {
+ bus->sdiodev->bus_if->maxctl += bus->roundup;
bus->rxblen =
roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
ALIGNMENT) + bus->head_align;
@@ -4139,10 +4120,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
bus->idletime = BRCMF_IDLE_INTERVAL;
bus->idleclock = BRCMF_IDLE_ACTIVE;
- /* Query the F2 block size, set roundup accordingly */
- bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
- bus->roundup = min(max_roundup, bus->blocksize);
-
/* SR state */
bus->sleeping = false;
bus->sr_enabled = false;
@@ -4150,10 +4127,14 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
brcmf_sdio_debugfs_create(bus);
brcmf_dbg(INFO, "completed!!\n");
- /* if firmware path present try to download and bring up bus */
- ret = brcmf_bus_start(bus->sdiodev->dev);
+ ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
+ brcmf_sdio_get_fwname(bus->ci,
+ BRCMF_FIRMWARE_BIN),
+ brcmf_sdio_get_fwname(bus->ci,
+ BRCMF_FIRMWARE_NVRAM),
+ brcmf_sdio_firmware_callback);
if (ret != 0) {
- brcmf_err("dongle is not responding\n");
+ brcmf_err("async firmware request failed: %d\n", ret);
goto fail;
}
@@ -4173,9 +4154,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
- if (bus->sdiodev->bus_if->drvr) {
- brcmf_detach(bus->sdiodev->dev);
- }
+ brcmf_detach(bus->sdiodev->dev);
cancel_work_sync(&bus->datawork);
if (bus->brcmf_wq)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
new file mode 100644
index 000000000000..7b7d237c1ddb
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+
+#include "dhd_dbg.h"
+#include "firmware.h"
+
+enum nvram_parser_state {
+ IDLE,
+ KEY,
+ VALUE,
+ COMMENT,
+ END
+};
+
+/**
+ * struct nvram_parser - internal info for parser.
+ *
+ * @state: current parser state.
+ * @fwnv: input buffer being parsed.
+ * @nvram: output buffer with parse result.
+ * @nvram_len: lenght of parse result.
+ * @line: current line.
+ * @column: current column in line.
+ * @pos: byte offset in input buffer.
+ * @entry: start position of key,value entry.
+ */
+struct nvram_parser {
+ enum nvram_parser_state state;
+ const struct firmware *fwnv;
+ u8 *nvram;
+ u32 nvram_len;
+ u32 line;
+ u32 column;
+ u32 pos;
+ u32 entry;
+};
+
+static bool is_nvram_char(char c)
+{
+ /* comment marker excluded */
+ if (c == '#')
+ return false;
+
+ /* key and value may have any other readable character */
+ return (c > 0x20 && c < 0x7f);
+}
+
+static bool is_whitespace(char c)
+{
+ return (c == ' ' || c == '\r' || c == '\n' || c == '\t');
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
+{
+ char c;
+
+ c = nvp->fwnv->data[nvp->pos];
+ if (c == '\n')
+ return COMMENT;
+ if (is_whitespace(c))
+ goto proceed;
+ if (c == '#')
+ return COMMENT;
+ if (is_nvram_char(c)) {
+ nvp->entry = nvp->pos;
+ return KEY;
+ }
+ brcmf_dbg(INFO, "warning: ln=%d:col=%d: ignoring invalid character\n",
+ nvp->line, nvp->column);
+proceed:
+ nvp->column++;
+ nvp->pos++;
+ return IDLE;
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
+{
+ enum nvram_parser_state st = nvp->state;
+ char c;
+
+ c = nvp->fwnv->data[nvp->pos];
+ if (c == '=') {
+ st = VALUE;
+ } else if (!is_nvram_char(c)) {
+ brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
+ nvp->line, nvp->column);
+ return COMMENT;
+ }
+
+ nvp->column++;
+ nvp->pos++;
+ return st;
+}
+
+static enum nvram_parser_state
+brcmf_nvram_handle_value(struct nvram_parser *nvp)
+{
+ char c;
+ char *skv;
+ char *ekv;
+ u32 cplen;
+
+ c = nvp->fwnv->data[nvp->pos];
+ if (!is_nvram_char(c)) {
+ /* key,value pair complete */
+ ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
+ skv = (u8 *)&nvp->fwnv->data[nvp->entry];
+ cplen = ekv - skv;
+ /* copy to output buffer */
+ memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
+ nvp->nvram_len += cplen;
+ nvp->nvram[nvp->nvram_len] = '\0';
+ nvp->nvram_len++;
+ return IDLE;
+ }
+ nvp->pos++;
+ nvp->column++;
+ return VALUE;
+}
+
+static enum nvram_parser_state
+brcmf_nvram_handle_comment(struct nvram_parser *nvp)
+{
+ char *eol, *sol;
+
+ sol = (char *)&nvp->fwnv->data[nvp->pos];
+ eol = strchr(sol, '\n');
+ if (eol == NULL)
+ return END;
+
+ /* eat all moving to next line */
+ nvp->line++;
+ nvp->column = 1;
+ nvp->pos += (eol - sol) + 1;
+ return IDLE;
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_end(struct nvram_parser *nvp)
+{
+ /* final state */
+ return END;
+}
+
+static enum nvram_parser_state
+(*nv_parser_states[])(struct nvram_parser *nvp) = {
+ brcmf_nvram_handle_idle,
+ brcmf_nvram_handle_key,
+ brcmf_nvram_handle_value,
+ brcmf_nvram_handle_comment,
+ brcmf_nvram_handle_end
+};
+
+static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
+ const struct firmware *nv)
+{
+ memset(nvp, 0, sizeof(*nvp));
+ nvp->fwnv = nv;
+ /* Alloc for extra 0 byte + roundup by 4 + length field */
+ nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
+ if (!nvp->nvram)
+ return -ENOMEM;
+
+ nvp->line = 1;
+ nvp->column = 1;
+ return 0;
+}
+
+/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
+ * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
+ * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
+ * End of buffer is completed with token identifying length of buffer.
+ */
+static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
+{
+ struct nvram_parser nvp;
+ u32 pad;
+ u32 token;
+ __le32 token_le;
+
+ if (brcmf_init_nvram_parser(&nvp, nv) < 0)
+ return NULL;
+
+ while (nvp.pos < nv->size) {
+ nvp.state = nv_parser_states[nvp.state](&nvp);
+ if (nvp.state == END)
+ break;
+ }
+ pad = nvp.nvram_len;
+ *new_length = roundup(nvp.nvram_len + 1, 4);
+ while (pad != *new_length) {
+ nvp.nvram[pad] = 0;
+ pad++;
+ }
+
+ token = *new_length / 4;
+ token = (~token << 16) | (token & 0x0000FFFF);
+ token_le = cpu_to_le32(token);
+
+ memcpy(&nvp.nvram[*new_length], &token_le, sizeof(token_le));
+ *new_length += sizeof(token_le);
+
+ return nvp.nvram;
+}
+
+void brcmf_fw_nvram_free(void *nvram)
+{
+ kfree(nvram);
+}
+
+struct brcmf_fw {
+ struct device *dev;
+ u16 flags;
+ const struct firmware *code;
+ const char *nvram_name;
+ void (*done)(struct device *dev, const struct firmware *fw,
+ void *nvram_image, u32 nvram_len);
+};
+
+static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
+{
+ struct brcmf_fw *fwctx = ctx;
+ u32 nvram_length = 0;
+ void *nvram = NULL;
+
+ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
+ if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+ goto fail;
+
+ if (fw) {
+ nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
+ release_firmware(fw);
+ if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+ goto fail;
+ }
+
+ fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+ kfree(fwctx);
+ return;
+
+fail:
+ brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
+ if (fwctx->code)
+ release_firmware(fwctx->code);
+ device_release_driver(fwctx->dev);
+ kfree(fwctx);
+}
+
+static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
+{
+ struct brcmf_fw *fwctx = ctx;
+ int ret;
+
+ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
+ if (!fw)
+ goto fail;
+
+ /* only requested code so done here */
+ if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
+ fwctx->done(fwctx->dev, fw, NULL, 0);
+ kfree(fwctx);
+ return;
+ }
+ fwctx->code = fw;
+ ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
+ fwctx->dev, GFP_KERNEL, fwctx,
+ brcmf_fw_request_nvram_done);
+
+ if (!ret)
+ return;
+
+ /* when nvram is optional call .done() callback here */
+ if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
+ fwctx->done(fwctx->dev, fw, NULL, 0);
+ kfree(fwctx);
+ return;
+ }
+
+ /* failed nvram request */
+ release_firmware(fw);
+fail:
+ brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
+ device_release_driver(fwctx->dev);
+ kfree(fwctx);
+}
+
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len))
+{
+ struct brcmf_fw *fwctx;
+
+ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
+ if (!fw_cb || !code)
+ return -EINVAL;
+
+ if ((flags & BRCMF_FW_REQUEST_NVRAM) && !nvram)
+ return -EINVAL;
+
+ fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL);
+ if (!fwctx)
+ return -ENOMEM;
+
+ fwctx->dev = dev;
+ fwctx->flags = flags;
+ fwctx->done = fw_cb;
+ if (flags & BRCMF_FW_REQUEST_NVRAM)
+ fwctx->nvram_name = nvram;
+
+ return request_firmware_nowait(THIS_MODULE, true, code, dev,
+ GFP_KERNEL, fwctx,
+ brcmf_fw_request_code_done);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
index d454580928c9..6431bfd7afff 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
@@ -13,12 +13,24 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef BRCMFMAC_NVRAM_H
-#define BRCMFMAC_NVRAM_H
+#ifndef BRCMFMAC_FIRMWARE_H
+#define BRCMFMAC_FIRMWARE_H
+#define BRCMF_FW_REQUEST 0x000F
+#define BRCMF_FW_REQUEST_NVRAM 0x0001
+#define BRCMF_FW_REQ_FLAGS 0x00F0
+#define BRCMF_FW_REQ_NV_OPTIONAL 0x0010
-void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length);
-void brcmf_nvram_free(void *nvram);
-
+void brcmf_fw_nvram_free(void *nvram);
+/*
+ * Request firmware(s) asynchronously. When the asynchronous request
+ * fails it will not use the callback, but call device_release_driver()
+ * instead which will call the driver .remove() callback.
+ */
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len));
-#endif /* BRCMFMAC_NVRAM_H */
+#endif /* BRCMFMAC_FIRMWARE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 614e4888504f..2bc68a2137fc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -53,6 +53,14 @@
#define BRCMF_OBSS_COEX_OFF 0
#define BRCMF_OBSS_COEX_ON 1
+/* join preference types for join_pref iovar */
+enum brcmf_join_pref_types {
+ BRCMF_JOIN_PREF_RSSI = 1,
+ BRCMF_JOIN_PREF_WPA,
+ BRCMF_JOIN_PREF_BAND,
+ BRCMF_JOIN_PREF_RSSI_DELTA,
+};
+
enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_CLIENT,
BRCMF_FIL_P2P_IF_GO,
@@ -282,6 +290,22 @@ struct brcmf_assoc_params_le {
__le16 chanspec_list[1];
};
+/**
+ * struct join_pref params - parameters for preferred join selection.
+ *
+ * @type: preference type (see enum brcmf_join_pref_types).
+ * @len: length of bytes following (currently always 2).
+ * @rssi_gain: signal gain for selection (only when @type is RSSI_DELTA).
+ * @band: band to which selection preference applies.
+ * This is used if @type is BAND or RSSI_DELTA.
+ */
+struct brcmf_join_pref_params {
+ u8 type;
+ u8 len;
+ u8 rssi_gain;
+ u8 band;
+};
+
/* used for join with or without a specific bssid and channel list */
struct brcmf_join_params {
struct brcmf_ssid_le ssid_le;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index c3e7d76dbf35..699908de314a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -476,6 +476,7 @@ struct brcmf_fws_info {
bool bus_flow_blocked;
bool creditmap_received;
u8 mode;
+ bool avoid_queueing;
};
/*
@@ -1369,13 +1370,12 @@ done:
}
static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
- struct sk_buff *skb, u32 genbit,
- u16 seq)
+ struct sk_buff *skb, u8 ifidx,
+ u32 genbit, u16 seq)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u32 hslot;
int ret;
- u8 ifidx;
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
@@ -1389,29 +1389,21 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
entry->generation = genbit;
- ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
- if (ret == 0) {
- brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
- brcmf_skbcb(skb)->htod_seq = seq;
- if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
- brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
- brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
- } else {
- brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
- }
- ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
- skb);
+ brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
+ brcmf_skbcb(skb)->htod_seq = seq;
+ if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
+ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
+ brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
+ } else {
+ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
}
+ ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
if (ret != 0) {
- /* suppress q is full or hdrpull failed, drop this packet */
- brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
- true);
+ /* suppress q is full drop this packet */
+ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true);
} else {
- /*
- * Mark suppressed to avoid a double free during
- * wlfc cleanup
- */
+ /* Mark suppressed to avoid a double free during wlfc cleanup */
brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
}
@@ -1428,6 +1420,7 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
struct sk_buff *skb;
struct brcmf_skbuff_cb *skcb;
struct brcmf_fws_mac_descriptor *entry = NULL;
+ u8 ifidx;
brcmf_dbg(DATA, "flags %d\n", flags);
@@ -1476,12 +1469,15 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
}
brcmf_fws_macdesc_return_req_credit(skb);
+ if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) {
+ brcmu_pkt_buf_free_skb(skb);
+ return -EINVAL;
+ }
if (!remove_from_hanger)
- ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit,
- seq);
-
+ ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx,
+ genbit, seq);
if (remove_from_hanger || ret)
- brcmf_txfinalize(fws->drvr, skb, true);
+ brcmf_txfinalize(fws->drvr, skb, ifidx, true);
return 0;
}
@@ -1868,7 +1864,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
struct ethhdr *eh = (struct ethhdr *)(skb->data);
int fifo = BRCMF_FWS_FIFO_BCMC;
bool multicast = is_multicast_ether_addr(eh->h_dest);
- bool pae = eh->h_proto == htons(ETH_P_PAE);
+ int rc = 0;
brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
/* determine the priority */
@@ -1876,8 +1872,13 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
skb->priority = cfg80211_classify8021d(skb, NULL);
drvr->tx_multicast += !!multicast;
- if (pae)
- atomic_inc(&ifp->pend_8021x_cnt);
+
+ if (fws->avoid_queueing) {
+ rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
+ if (rc < 0)
+ brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
+ return rc;
+ }
/* set control buffer information */
skcb->if_flags = 0;
@@ -1899,15 +1900,12 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_fws_schedule_deq(fws);
} else {
brcmf_err("drop skb: no hanger slot\n");
- if (pae) {
- atomic_dec(&ifp->pend_8021x_cnt);
- if (waitqueue_active(&ifp->pend_8021x_wait))
- wake_up(&ifp->pend_8021x_wait);
- }
- brcmu_pkt_buf_free_skb(skb);
+ brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
+ rc = -ENOMEM;
}
brcmf_fws_unlock(fws);
- return 0;
+
+ return rc;
}
void brcmf_fws_reset_interface(struct brcmf_if *ifp)
@@ -1982,7 +1980,8 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
brcmf_fws_lock(fws);
if (ret < 0)
- brcmf_txfinalize(drvr, skb, false);
+ brcmf_txfinalize(drvr, skb, ifidx,
+ false);
if (fws->bus_flow_blocked)
break;
}
@@ -2039,6 +2038,13 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
fws->drvr = drvr;
fws->fcmode = fcmode;
+ if ((drvr->bus_if->always_use_fws_queue == false) &&
+ (fcmode == BRCMF_FWS_FCMODE_NONE)) {
+ fws->avoid_queueing = true;
+ brcmf_dbg(INFO, "FWS queueing will be avoided\n");
+ return 0;
+ }
+
fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
if (fws->fws_wq == NULL) {
brcmf_err("workqueue creation failed\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
deleted file mode 100644
index d5ef86db631b..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/firmware.h>
-
-#include "nvram.h"
-
-/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
- * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
- * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
- * End of buffer is completed with token identifying length of buffer.
- */
-void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
-{
- u8 *nvram;
- u32 i;
- u32 len;
- u32 column;
- u8 val;
- bool comment;
- u32 token;
- __le32 token_le;
-
- /* Alloc for extra 0 byte + roundup by 4 + length field */
- nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
- if (!nvram)
- return NULL;
-
- len = 0;
- column = 0;
- comment = false;
- for (i = 0; i < nv->size; i++) {
- val = nv->data[i];
- if (val == 0)
- break;
- if (val == '\r')
- continue;
- if (comment && (val != '\n'))
- continue;
- comment = false;
- if (val == '#') {
- comment = true;
- continue;
- }
- if (val == '\n') {
- if (column == 0)
- continue;
- nvram[len] = 0;
- len++;
- column = 0;
- continue;
- }
- nvram[len] = val;
- len++;
- column++;
- }
- column = len;
- *new_length = roundup(len + 1, 4);
- while (column != *new_length) {
- nvram[column] = 0;
- column++;
- }
-
- token = *new_length / 4;
- token = (~token << 16) | (token & 0x0000FFFF);
- token_le = cpu_to_le32(token);
-
- memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
- *new_length += sizeof(token_le);
-
- return nvram;
-}
-
-void brcmf_nvram_free(void *nvram)
-{
- kfree(nvram);
-}
-
-
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 24f65cd53859..6db51a666f61 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -25,6 +25,7 @@
#include <dhd_bus.h>
#include <dhd_dbg.h>
+#include "firmware.h"
#include "usb_rdl.h"
#include "usb.h"
@@ -61,12 +62,6 @@ struct brcmf_usb_image {
u8 *image;
int image_len;
};
-static struct list_head fw_image_list;
-
-struct intr_transfer_buf {
- u32 notification;
- u32 reserved;
-};
struct brcmf_usbdev_info {
struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
@@ -75,7 +70,7 @@ struct brcmf_usbdev_info {
struct list_head rx_postq;
struct list_head tx_freeq;
struct list_head tx_postq;
- uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
+ uint rx_pipe, tx_pipe, rx_pipe2;
int rx_low_watermark;
int tx_low_watermark;
@@ -87,7 +82,7 @@ struct brcmf_usbdev_info {
struct brcmf_usbreq *tx_reqs;
struct brcmf_usbreq *rx_reqs;
- u8 *image; /* buffer for combine fw and nvram */
+ const u8 *image; /* buffer for combine fw and nvram */
int image_len;
struct usb_device *usbdev;
@@ -104,10 +99,6 @@ struct brcmf_usbdev_info {
ulong ctl_op;
struct urb *bulk_urb; /* used for FW download */
- struct urb *intr_urb; /* URB for interrupt endpoint */
- int intr_size; /* Size of interrupt message */
- int interval; /* Interrupt polling interval */
- struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
};
static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -531,39 +522,6 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
}
}
-static void
-brcmf_usb_intr_complete(struct urb *urb)
-{
- struct brcmf_usbdev_info *devinfo =
- (struct brcmf_usbdev_info *)urb->context;
- int err;
-
- brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
-
- if (devinfo == NULL)
- return;
-
- if (unlikely(urb->status)) {
- if (urb->status == -ENOENT ||
- urb->status == -ESHUTDOWN ||
- urb->status == -ENODEV) {
- brcmf_usb_state_change(devinfo,
- BRCMFMAC_USB_STATE_DOWN);
- }
- }
-
- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) {
- brcmf_err("intr cb when DBUS down, ignoring\n");
- return;
- }
-
- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
- err = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
- if (err)
- brcmf_err("usb_submit_urb, err=%d\n", err);
- }
-}
-
static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
{
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
@@ -619,7 +577,6 @@ static int brcmf_usb_up(struct device *dev)
{
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
u16 ifnum;
- int ret;
brcmf_dbg(USB, "Enter\n");
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
@@ -628,23 +585,6 @@ static int brcmf_usb_up(struct device *dev)
/* Success, indicate devinfo is fully up */
brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
- if (devinfo->intr_urb) {
- usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
- devinfo->intr_pipe,
- &devinfo->intr,
- devinfo->intr_size,
- (usb_complete_t)brcmf_usb_intr_complete,
- devinfo,
- devinfo->interval);
-
- ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
- if (ret) {
- brcmf_err("USB_SUBMIT_URB failed with status %d\n",
- ret);
- return -EINVAL;
- }
- }
-
if (devinfo->ctl_urb) {
devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
@@ -681,8 +621,6 @@ static void brcmf_usb_down(struct device *dev)
return;
brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
- if (devinfo->intr_urb)
- usb_kill_urb(devinfo->intr_urb);
if (devinfo->ctl_urb)
usb_kill_urb(devinfo->ctl_urb);
@@ -1021,7 +959,7 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
}
err = brcmf_usb_dlstart(devinfo,
- devinfo->image, devinfo->image_len);
+ (u8 *)devinfo->image, devinfo->image_len);
if (err == 0)
err = brcmf_usb_dlrun(devinfo);
return err;
@@ -1036,7 +974,6 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
brcmf_usb_free_q(&devinfo->rx_freeq, false);
brcmf_usb_free_q(&devinfo->tx_freeq, false);
- usb_free_urb(devinfo->intr_urb);
usb_free_urb(devinfo->ctl_urb);
usb_free_urb(devinfo->bulk_urb);
@@ -1080,68 +1017,20 @@ static int check_file(const u8 *headers)
return -1;
}
-static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
+static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
{
- s8 *fwname;
- const struct firmware *fw;
- struct brcmf_usb_image *fw_image;
- int err;
-
- brcmf_dbg(USB, "Enter\n");
switch (devinfo->bus_pub.devid) {
case 43143:
- fwname = BRCMF_USB_43143_FW_NAME;
- break;
+ return BRCMF_USB_43143_FW_NAME;
case 43235:
case 43236:
case 43238:
- fwname = BRCMF_USB_43236_FW_NAME;
- break;
+ return BRCMF_USB_43236_FW_NAME;
case 43242:
- fwname = BRCMF_USB_43242_FW_NAME;
- break;
+ return BRCMF_USB_43242_FW_NAME;
default:
- return -EINVAL;
- break;
- }
- brcmf_dbg(USB, "Loading FW %s\n", fwname);
- list_for_each_entry(fw_image, &fw_image_list, list) {
- if (fw_image->fwname == fwname) {
- devinfo->image = fw_image->image;
- devinfo->image_len = fw_image->image_len;
- return 0;
- }
- }
- /* fw image not yet loaded. Load it now and add to list */
- err = request_firmware(&fw, fwname, devinfo->dev);
- if (!fw) {
- brcmf_err("fail to request firmware %s\n", fwname);
- return err;
- }
- if (check_file(fw->data) < 0) {
- brcmf_err("invalid firmware %s\n", fwname);
- return -EINVAL;
+ return NULL;
}
-
- fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
- if (!fw_image)
- return -ENOMEM;
- INIT_LIST_HEAD(&fw_image->list);
- list_add_tail(&fw_image->list, &fw_image_list);
- fw_image->fwname = fwname;
- fw_image->image = vmalloc(fw->size);
- if (!fw_image->image)
- return -ENOMEM;
-
- memcpy(fw_image->image, fw->data, fw->size);
- fw_image->image_len = fw->size;
-
- release_firmware(fw);
-
- devinfo->image = fw_image->image;
- devinfo->image_len = fw_image->image_len;
-
- return 0;
}
@@ -1186,11 +1075,6 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
goto error;
devinfo->tx_freecount = ntxq;
- devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!devinfo->intr_urb) {
- brcmf_err("usb_alloc_urb (intr) failed\n");
- goto error;
- }
devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!devinfo->ctl_urb) {
brcmf_err("usb_alloc_urb (ctl) failed\n");
@@ -1202,16 +1086,6 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
goto error;
}
- if (!brcmf_usb_dlneeded(devinfo))
- return &devinfo->bus_pub;
-
- brcmf_dbg(USB, "Start fw downloading\n");
- if (brcmf_usb_get_fw(devinfo))
- goto error;
-
- if (brcmf_usb_fw_download(devinfo))
- goto error;
-
return &devinfo->bus_pub;
error:
@@ -1222,18 +1096,77 @@ error:
static struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
- .init = brcmf_usb_up,
.stop = brcmf_usb_down,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
};
+static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
+{
+ int ret;
+
+ /* Attach to the common driver interface */
+ ret = brcmf_attach(devinfo->dev);
+ if (ret) {
+ brcmf_err("brcmf_attach failed\n");
+ return ret;
+ }
+
+ ret = brcmf_usb_up(devinfo->dev);
+ if (ret)
+ goto fail;
+
+ ret = brcmf_bus_start(devinfo->dev);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ brcmf_detach(devinfo->dev);
+ return ret;
+}
+
+static void brcmf_usb_probe_phase2(struct device *dev,
+ const struct firmware *fw,
+ void *nvram, u32 nvlen)
+{
+ struct brcmf_bus *bus = dev_get_drvdata(dev);
+ struct brcmf_usbdev_info *devinfo;
+ int ret;
+
+ brcmf_dbg(USB, "Start fw downloading\n");
+ ret = check_file(fw->data);
+ if (ret < 0) {
+ brcmf_err("invalid firmware\n");
+ release_firmware(fw);
+ goto error;
+ }
+
+ devinfo = bus->bus_priv.usb->devinfo;
+ devinfo->image = fw->data;
+ devinfo->image_len = fw->size;
+
+ ret = brcmf_usb_fw_download(devinfo);
+ release_firmware(fw);
+ if (ret)
+ goto error;
+
+ ret = brcmf_usb_bus_setup(devinfo);
+ if (ret)
+ goto error;
+
+ return;
+error:
+ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
+ device_release_driver(dev);
+}
+
static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
{
struct brcmf_bus *bus = NULL;
struct brcmf_usbdev *bus_pub = NULL;
- int ret;
struct device *dev = devinfo->dev;
+ int ret;
brcmf_dbg(USB, "Enter\n");
bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
@@ -1254,22 +1187,18 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->chip = bus_pub->devid;
bus->chiprev = bus_pub->chiprev;
bus->proto_type = BRCMF_PROTO_BCDC;
+ bus->always_use_fws_queue = true;
- /* Attach to the common driver interface */
- ret = brcmf_attach(dev);
- if (ret) {
- brcmf_err("brcmf_attach failed\n");
- goto fail;
- }
-
- ret = brcmf_bus_start(dev);
- if (ret) {
- brcmf_err("dongle is not responding\n");
- brcmf_detach(dev);
- goto fail;
+ if (!brcmf_usb_dlneeded(devinfo)) {
+ ret = brcmf_usb_bus_setup(devinfo);
+ if (ret)
+ goto fail;
}
-
+ /* request firmware here */
+ brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
+ brcmf_usb_probe_phase2);
return 0;
+
fail:
/* Release resources in reverse order */
kfree(bus);
@@ -1357,9 +1286,6 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto fail;
}
- endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
- devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
-
devinfo->rx_pipe = 0;
devinfo->rx_pipe2 = 0;
devinfo->tx_pipe = 0;
@@ -1391,16 +1317,9 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
}
}
- /* Allocate interrupt URB and data buffer */
- /* RNDIS says 8-byte intr, our old drivers used 4-byte */
- if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
- devinfo->intr_size = 8;
- else
- devinfo->intr_size = 4;
-
- devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
-
- if (usb->speed == USB_SPEED_HIGH)
+ if (usb->speed == USB_SPEED_SUPER)
+ brcmf_dbg(USB, "Broadcom super speed USB wireless device detected\n");
+ else if (usb->speed == USB_SPEED_HIGH)
brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
else
brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
@@ -1455,23 +1374,18 @@ static int brcmf_usb_resume(struct usb_interface *intf)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
brcmf_dbg(USB, "Enter\n");
- if (!brcmf_attach(devinfo->dev))
- return brcmf_bus_start(&usb->dev);
-
- return 0;
+ return brcmf_usb_bus_setup(devinfo);
}
static int brcmf_usb_reset_resume(struct usb_interface *intf)
{
struct usb_device *usb = interface_to_usbdev(intf);
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
-
brcmf_dbg(USB, "Enter\n");
- if (!brcmf_usb_fw_download(devinfo))
- return brcmf_usb_resume(intf);
-
- return -EIO;
+ return brcmf_fw_get_firmwares(&usb->dev, 0,
+ brcmf_usb_get_fwname(devinfo), NULL,
+ brcmf_usb_probe_phase2);
}
#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
@@ -1506,16 +1420,6 @@ static struct usb_driver brcmf_usbdrvr = {
.disable_hub_initiated_lpm = 1,
};
-static void brcmf_release_fw(struct list_head *q)
-{
- struct brcmf_usb_image *fw_image, *next;
-
- list_for_each_entry_safe(fw_image, next, q, list) {
- vfree(fw_image->image);
- list_del_init(&fw_image->list);
- }
-}
-
static int brcmf_usb_reset_device(struct device *dev, void *notused)
{
/* device past is the usb interface so we
@@ -1534,12 +1438,10 @@ void brcmf_usb_exit(void)
ret = driver_for_each_device(drv, NULL, NULL,
brcmf_usb_reset_device);
usb_deregister(&brcmf_usbdrvr);
- brcmf_release_fw(&fw_image_list);
}
void brcmf_usb_register(void)
{
brcmf_dbg(USB, "Enter\n");
- INIT_LIST_HEAD(&fw_image_list);
usb_register(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index be1985296bdc..d8fa276e368b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -221,9 +221,9 @@ static const struct ieee80211_regdomain brcmf_regdom = {
*/
REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
/* IEEE 802.11a, channel 36..64 */
- REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+ REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
/* IEEE 802.11a, channel 100..165 */
- REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+ REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
};
static const u32 __wl_cipher_suites[] = {
@@ -341,6 +341,60 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
return qdbm;
}
+static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
+ struct cfg80211_chan_def *ch)
+{
+ struct brcmu_chan ch_inf;
+ s32 primary_offset;
+
+ brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
+ ch->chan->center_freq, ch->center_freq1, ch->width);
+ ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
+ primary_offset = ch->center_freq1 - ch->chan->center_freq;
+ switch (ch->width) {
+ case NL80211_CHAN_WIDTH_20:
+ ch_inf.bw = BRCMU_CHAN_BW_20;
+ WARN_ON(primary_offset != 0);
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ ch_inf.bw = BRCMU_CHAN_BW_40;
+ if (primary_offset < 0)
+ ch_inf.sb = BRCMU_CHAN_SB_U;
+ else
+ ch_inf.sb = BRCMU_CHAN_SB_L;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_inf.bw = BRCMU_CHAN_BW_80;
+ if (primary_offset < 0) {
+ if (primary_offset < -CH_10MHZ_APART)
+ ch_inf.sb = BRCMU_CHAN_SB_UU;
+ else
+ ch_inf.sb = BRCMU_CHAN_SB_UL;
+ } else {
+ if (primary_offset > CH_10MHZ_APART)
+ ch_inf.sb = BRCMU_CHAN_SB_LL;
+ else
+ ch_inf.sb = BRCMU_CHAN_SB_LU;
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ switch (ch->chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ ch_inf.band = BRCMU_CHAN_BAND_2G;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ ch_inf.band = BRCMU_CHAN_BAND_5G;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ d11inf->encchspec(&ch_inf);
+
+ return ch_inf.chspec;
+}
+
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch)
{
@@ -586,6 +640,9 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
if (err)
brcmf_err("Scan abort failed\n");
}
+
+ brcmf_set_mpc(ifp, 1);
+
/*
* e-scan can be initiated by scheduled scan
* which takes precedence.
@@ -595,12 +652,10 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
cfg->sched_escan = false;
if (!aborted)
cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
- brcmf_set_mpc(ifp, 1);
} else if (scan_request) {
brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
aborted ? "Aborted" : "Done");
cfg80211_scan_done(scan_request, aborted);
- brcmf_set_mpc(ifp, 1);
}
if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
@@ -1236,8 +1291,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
params->chandef.chan->center_freq);
if (params->channel_fixed) {
/* adding chanspec */
- chanspec = channel_to_chanspec(&cfg->d11inf,
- params->chandef.chan);
+ chanspec = chandef_to_chanspec(&cfg->d11inf,
+ &params->chandef);
join_params.params_le.chanspec_list[0] =
cpu_to_le16(chanspec);
join_params.params_le.chanspec_num = cpu_to_le32(1);
@@ -2182,7 +2237,7 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
static s32
brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
@@ -3124,7 +3179,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
}
if (!request->n_ssids || !request->n_match_sets) {
- brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
+ brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
request->n_ssids);
return -EINVAL;
}
@@ -3734,23 +3789,6 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
}
static s32
-brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg,
- struct brcmf_if *ifp,
- struct ieee80211_channel *channel)
-{
- u16 chanspec;
- s32 err;
-
- brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band,
- channel->center_freq);
-
- chanspec = channel_to_chanspec(&cfg->d11inf, channel);
- err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
-
- return err;
-}
-
-static s32
brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
{
@@ -3765,11 +3803,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
+ u16 chanspec;
- brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
- cfg80211_get_chandef_type(&settings->chandef),
- settings->beacon_interval,
- settings->dtim_period);
+ brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
+ settings->chandef.chan->hw_value,
+ settings->chandef.center_freq1, settings->chandef.width,
+ settings->beacon_interval, settings->dtim_period);
brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
settings->ssid, settings->ssid_len, settings->auth_type,
settings->inactivity_timeout);
@@ -3826,9 +3865,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
- err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan);
+ chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
if (err < 0) {
- brcmf_err("Set Channel failed, %d\n", err);
+ brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err);
goto exit;
}
@@ -3975,7 +4015,7 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
static int
brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
- u8 *mac)
+ const u8 *mac)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_scb_val_le scbval;
@@ -4203,7 +4243,7 @@ static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
}
static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
- struct net_device *ndev, u8 *peer,
+ struct net_device *ndev, const u8 *peer,
enum nl80211_tdls_operation oper)
{
struct brcmf_if *ifp;
@@ -4364,6 +4404,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
WIPHY_FLAG_OFFCHAN_TX |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_SUPPORTS_TDLS;
+ if (!brcmf_roamoff)
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
@@ -4685,7 +4727,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct ieee80211_channel *chan;
s32 err = 0;
- u16 reason;
if (brcmf_is_apmode(ifp->vif)) {
err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
@@ -4706,16 +4747,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
brcmf_dbg(CONN, "Linkdown\n");
if (!brcmf_is_ibssmode(ifp->vif)) {
brcmf_bss_connect_done(cfg, ndev, e, false);
- if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
- &ifp->vif->sme_state)) {
- reason = 0;
- if (((e->event_code == BRCMF_E_DEAUTH_IND) ||
- (e->event_code == BRCMF_E_DISASSOC_IND)) &&
- (e->reason != WLAN_REASON_UNSPECIFIED))
- reason = e->reason;
- cfg80211_disconnected(ndev, reason, NULL, 0,
- GFP_KERNEL);
- }
}
brcmf_link_down(ifp->vif);
brcmf_init_prof(ndev_to_prof(ndev));
@@ -5215,6 +5246,9 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
ch.bw == BRCMU_CHAN_BW_40)
continue;
+ if (!(bw_cap[band] & WLC_BW_80MHZ_BIT) &&
+ ch.bw == BRCMU_CHAN_BW_80)
+ continue;
update = false;
for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
if (band_chan_arr[j].hw_value == ch.chnum) {
@@ -5231,10 +5265,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
ieee80211_channel_to_frequency(ch.chnum, band);
band_chan_arr[index].hw_value = ch.chnum;
- if (ch.bw == BRCMU_CHAN_BW_40) {
- /* assuming the order is HT20, HT40 Upper,
- * HT40 lower from chanspecs
- */
+ /* assuming the chanspecs order is HT20,
+ * HT40 upper, HT40 lower, and VHT80.
+ */
+ if (ch.bw == BRCMU_CHAN_BW_80) {
+ band_chan_arr[index].flags &=
+ ~IEEE80211_CHAN_NO_80MHZ;
+ } else if (ch.bw == BRCMU_CHAN_BW_40) {
ht40_flag = band_chan_arr[index].flags &
IEEE80211_CHAN_NO_HT40;
if (ch.sb == BRCMU_CHAN_SB_U) {
@@ -5255,8 +5292,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
IEEE80211_CHAN_NO_HT40MINUS;
}
} else {
+ /* disable other bandwidths for now as mentioned
+ * order assure they are enabled for subsequent
+ * chanspecs.
+ */
band_chan_arr[index].flags =
- IEEE80211_CHAN_NO_HT40;
+ IEEE80211_CHAN_NO_HT40 |
+ IEEE80211_CHAN_NO_80MHZ;
ch.bw = BRCMU_CHAN_BW_20;
cfg->d11inf.encchspec(&ch);
channel = ch.chspec;
@@ -5323,13 +5365,63 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
}
}
+static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
+ u32 bw_cap[2], u32 nchain)
+{
+ band->ht_cap.ht_supported = true;
+ if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ }
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp)
+{
+ u16 mcs_map;
+ int i;
+
+ for (i = 0, mcs_map = 0xFFFF; i < nchain; i++)
+ mcs_map = (mcs_map << 2) | supp;
+
+ return cpu_to_le16(mcs_map);
+}
+
+static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
+ u32 bw_cap[2], u32 nchain)
+{
+ __le16 mcs_map;
+
+ /* not allowed in 2.4G band */
+ if (band->band == IEEE80211_BAND_2GHZ)
+ return;
+
+ band->vht_cap.vht_supported = true;
+ /* 80MHz is mandatory */
+ band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
+ if (bw_cap[band->band] & WLC_BW_160MHZ_BIT) {
+ band->vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
+ }
+ /* all support 256-QAM */
+ mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9);
+ band->vht_cap.vht_mcs.rx_mcs_map = mcs_map;
+ band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
+}
+
static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct wiphy *wiphy;
s32 phy_list;
u32 band_list[3];
- u32 nmode;
+ u32 nmode = 0;
+ u32 vhtmode = 0;
u32 bw_cap[2] = { 0, 0 };
u32 rxchain;
u32 nchain;
@@ -5360,14 +5452,16 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
band_list[0], band_list[1], band_list[2]);
+ (void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
if (err) {
brcmf_err("nmode error (%d)\n", err);
} else {
brcmf_get_bwcap(ifp, bw_cap);
}
- brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
- bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
+ brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
+ nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
+ bw_cap[IEEE80211_BAND_5GHZ]);
err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
if (err) {
@@ -5398,17 +5492,10 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
else
continue;
- if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
- band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
- band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- }
- band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
- band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
- band->ht_cap.ht_supported = true;
- band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
- memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
- band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (nmode)
+ brcmf_update_ht_cap(band, bw_cap, nchain);
+ if (vhtmode)
+ brcmf_update_vht_cap(band, bw_cap, nchain);
bands[band->band] = band;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 8c5fa4e58139..43c71bfaa474 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -897,7 +897,8 @@ static bool brcms_tx_flush_completed(struct brcms_info *wl)
return result;
}
-static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct brcms_info *wl = hw->priv;
int ret;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 9417cb5a2553..af8ba64ace39 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -4870,14 +4870,11 @@ static void brcms_c_detach_module(struct brcms_c_info *wlc)
/*
* low level detach
*/
-static int brcms_b_detach(struct brcms_c_info *wlc)
+static void brcms_b_detach(struct brcms_c_info *wlc)
{
uint i;
struct brcms_hw_band *band;
struct brcms_hardware *wlc_hw = wlc->hw;
- int callbacks;
-
- callbacks = 0;
brcms_b_detach_dmapio(wlc_hw);
@@ -4900,9 +4897,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
ai_detach(wlc_hw->sih);
wlc_hw->sih = NULL;
}
-
- return callbacks;
-
}
/*
@@ -4917,14 +4911,15 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
*/
uint brcms_c_detach(struct brcms_c_info *wlc)
{
- uint callbacks = 0;
+ uint callbacks;
if (wlc == NULL)
return 0;
- callbacks += brcms_b_detach(wlc);
+ brcms_b_detach(wlc);
/* delete software timers */
+ callbacks = 0;
if (!brcms_c_radio_monitor_stop(wlc))
callbacks++;
diff --git a/drivers/net/wireless/brcm80211/brcmutil/d11.c b/drivers/net/wireless/brcm80211/brcmutil/d11.c
index 30e54e2c6c9b..2b2522bdd8eb 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/d11.c
@@ -21,19 +21,46 @@
#include <brcmu_wifi.h>
#include <brcmu_d11.h>
-static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
+static u16 d11n_sb(enum brcmu_chan_sb sb)
{
- ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
+ switch (sb) {
+ case BRCMU_CHAN_SB_NONE:
+ return BRCMU_CHSPEC_D11N_SB_N;
+ case BRCMU_CHAN_SB_L:
+ return BRCMU_CHSPEC_D11N_SB_L;
+ case BRCMU_CHAN_SB_U:
+ return BRCMU_CHSPEC_D11N_SB_U;
+ default:
+ WARN_ON(1);
+ }
+ return 0;
+}
- switch (ch->bw) {
+static u16 d11n_bw(enum brcmu_chan_bw bw)
+{
+ switch (bw) {
case BRCMU_CHAN_BW_20:
- ch->chspec |= BRCMU_CHSPEC_D11N_BW_20 | BRCMU_CHSPEC_D11N_SB_N;
- break;
+ return BRCMU_CHSPEC_D11N_BW_20;
case BRCMU_CHAN_BW_40:
+ return BRCMU_CHSPEC_D11N_BW_40;
default:
- WARN_ON_ONCE(1);
- break;
+ WARN_ON(1);
}
+ return 0;
+}
+
+static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
+{
+ if (ch->bw == BRCMU_CHAN_BW_20)
+ ch->sb = BRCMU_CHAN_SB_NONE;
+
+ ch->chspec = 0;
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
+ BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_SB_MASK,
+ 0, d11n_sb(ch->sb));
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_BW_MASK,
+ 0, d11n_bw(ch->bw));
if (ch->chnum <= CH_MAX_2G_CHANNEL)
ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G;
@@ -41,23 +68,34 @@ static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G;
}
-static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
+static u16 d11ac_bw(enum brcmu_chan_bw bw)
{
- ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
-
- switch (ch->bw) {
+ switch (bw) {
case BRCMU_CHAN_BW_20:
- ch->chspec |= BRCMU_CHSPEC_D11AC_BW_20;
- break;
+ return BRCMU_CHSPEC_D11AC_BW_20;
case BRCMU_CHAN_BW_40:
+ return BRCMU_CHSPEC_D11AC_BW_40;
case BRCMU_CHAN_BW_80:
- case BRCMU_CHAN_BW_80P80:
- case BRCMU_CHAN_BW_160:
+ return BRCMU_CHSPEC_D11AC_BW_80;
default:
- WARN_ON_ONCE(1);
- break;
+ WARN_ON(1);
}
+ return 0;
+}
+static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
+{
+ if (ch->bw == BRCMU_CHAN_BW_20 || ch->sb == BRCMU_CHAN_SB_NONE)
+ ch->sb = BRCMU_CHAN_SB_L;
+
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
+ BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
+ BRCMU_CHSPEC_D11AC_SB_SHIFT, ch->sb);
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_BW_MASK,
+ 0, d11ac_bw(ch->bw));
+
+ ch->chspec &= ~BRCMU_CHSPEC_D11AC_BND_MASK;
if (ch->chnum <= CH_MAX_2G_CHANNEL)
ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
else
@@ -73,6 +111,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
case BRCMU_CHSPEC_D11N_BW_20:
ch->bw = BRCMU_CHAN_BW_20;
+ ch->sb = BRCMU_CHAN_SB_NONE;
break;
case BRCMU_CHSPEC_D11N_BW_40:
ch->bw = BRCMU_CHAN_BW_40;
@@ -112,6 +151,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
case BRCMU_CHSPEC_D11AC_BW_20:
ch->bw = BRCMU_CHAN_BW_20;
+ ch->sb = BRCMU_CHAN_SB_NONE;
break;
case BRCMU_CHSPEC_D11AC_BW_40:
ch->bw = BRCMU_CHAN_BW_40;
@@ -128,6 +168,25 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
break;
case BRCMU_CHSPEC_D11AC_BW_80:
ch->bw = BRCMU_CHAN_BW_80;
+ ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
+ BRCMU_CHSPEC_D11AC_SB_SHIFT);
+ switch (ch->sb) {
+ case BRCMU_CHAN_SB_LL:
+ ch->chnum -= CH_30MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_LU:
+ ch->chnum -= CH_10MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_UL:
+ ch->chnum += CH_10MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_UU:
+ ch->chnum += CH_30MHZ_APART;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
break;
case BRCMU_CHSPEC_D11AC_BW_8080:
case BRCMU_CHSPEC_D11AC_BW_160:
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
index 8660a2cba098..f9745ea8b3e0 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -108,13 +108,7 @@ enum brcmu_chan_bw {
};
enum brcmu_chan_sb {
- BRCMU_CHAN_SB_NONE = 0,
- BRCMU_CHAN_SB_L,
- BRCMU_CHAN_SB_U,
- BRCMU_CHAN_SB_LL,
- BRCMU_CHAN_SB_LU,
- BRCMU_CHAN_SB_UL,
- BRCMU_CHAN_SB_UU,
+ BRCMU_CHAN_SB_NONE = -1,
BRCMU_CHAN_SB_LLL,
BRCMU_CHAN_SB_LLU,
BRCMU_CHAN_SB_LUL,
@@ -123,6 +117,12 @@ enum brcmu_chan_sb {
BRCMU_CHAN_SB_ULU,
BRCMU_CHAN_SB_UUL,
BRCMU_CHAN_SB_UUU,
+ BRCMU_CHAN_SB_L = BRCMU_CHAN_SB_LLL,
+ BRCMU_CHAN_SB_U = BRCMU_CHAN_SB_LLU,
+ BRCMU_CHAN_SB_LL = BRCMU_CHAN_SB_LLL,
+ BRCMU_CHAN_SB_LU = BRCMU_CHAN_SB_LLU,
+ BRCMU_CHAN_SB_UL = BRCMU_CHAN_SB_LUL,
+ BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
};
struct brcmu_chan {
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 74419d4bd123..76b5d3a86294 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -29,6 +29,7 @@
#define CH_UPPER_SB 0x01
#define CH_LOWER_SB 0x02
#define CH_EWA_VALID 0x04
+#define CH_30MHZ_APART 6
#define CH_20MHZ_APART 4
#define CH_10MHZ_APART 2
#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 103f7bce8932..cd0cad7f7759 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -936,7 +936,8 @@ static int __cw1200_flush(struct cw1200_common *priv, bool drop)
return ret;
}
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct cw1200_common *priv = hw->priv;
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
index 35babb62cc6a..b7e386b7662b 100644
--- a/drivers/net/wireless/cw1200/sta.h
+++ b/drivers/net/wireless/cw1200/sta.h
@@ -40,7 +40,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 67db34e56d7e..52919ad42726 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -882,7 +882,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
dev->mtu = local->mtu;
- SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops);
+ dev->ethtool_ops = &prism2_ethtool_ops;
}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index d37a6fd90d40..b598e2803500 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -573,7 +573,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
rx_status.flag |= RX_FLAG_SHORTPRE;
if ((unlikely(rx_stats->phy_count > 20))) {
- D_DROP("dsp size out of range [0,20]: %d/n",
+ D_DROP("dsp size out of range [0,20]: %d\n",
rx_stats->phy_count);
return;
}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 888ad5c74639..c159c05db6ef 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -670,7 +670,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- D_DROP("dsp size out of range [0,20]: %d/n",
+ D_DROP("dsp size out of range [0,20]: %d\n",
phy_res->cfg_phy_cnt);
return;
}
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 4f42174d9994..ecc674627e6e 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4755,7 +4755,8 @@ out:
}
EXPORT_SYMBOL(il_mac_change_interface);
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct il_priv *il = hw->priv;
unsigned long timeout = jiffies + msecs_to_jiffies(500);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index dfb13c70efe8..ea5c0f863c4e 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1723,7 +1723,8 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
int il_alloc_txq_mem(struct il_priv *il);
void il_free_txq_mem(struct il_priv *il);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 74b3b4de7bb7..7fd50428b934 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -2,10 +2,6 @@ config IWLWIFI
tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
depends on PCI && MAC80211 && HAS_IOMEM
select FW_LOADER
- select NEW_LEDS
- select LEDS_CLASS
- select LEDS_TRIGGERS
- select MAC80211_LEDS
---help---
Select to build the driver supporting the:
@@ -43,6 +39,14 @@ config IWLWIFI
say M here and read <file:Documentation/kbuild/modules.txt>. The
module will be called iwlwifi.
+config IWLWIFI_LEDS
+ bool
+ depends on IWLWIFI
+ depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+ select LEDS_TRIGGERS
+ select MAC80211_LEDS
+ default y
+
config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
depends on IWLWIFI
@@ -124,7 +128,6 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
-
bool "iwlwifi device access tracing"
depends on IWLWIFI
depends on EVENT_TRACING
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
index dce7ab2e0c4b..4d19685f31c3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -4,9 +4,10 @@ iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
iwldvm-objs += power.o
-iwldvm-objs += scan.o led.o
+iwldvm-objs += scan.o
iwldvm-objs += rxon.o devices.o
+iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index be1086c87157..20e6aa910700 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -94,7 +94,6 @@ int iwl_send_calib_results(struct iwl_priv *priv)
{
struct iwl_host_cmd hcmd = {
.id = REPLY_PHY_CALIBRATION_CMD,
- .flags = CMD_SYNC,
};
struct iwl_calib_result *res;
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d2fe2596d54e..0ffb6ff1a255 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -1481,7 +1481,7 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
/* make request to uCode to retrieve statistics information */
mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
+ ret = iwl_send_statistics_request(priv, 0, false);
mutex_unlock(&priv->mutex);
if (ret)
@@ -1868,7 +1868,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
/* make request to uCode to retrieve statistics information */
mutex_lock(&priv->mutex);
- iwl_send_statistics_request(priv, CMD_SYNC, true);
+ iwl_send_statistics_request(priv, 0, true);
mutex_unlock(&priv->mutex);
return count;
@@ -2188,7 +2188,6 @@ static int iwl_cmd_echo_test(struct iwl_priv *priv)
struct iwl_host_cmd cmd = {
.id = REPLY_ECHO,
.len = { 0 },
- .flags = CMD_SYNC,
};
ret = iwl_dvm_send_cmd(priv, &cmd);
@@ -2320,7 +2319,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
mutex_lock(&priv->mutex);
/* take the return value to make compiler happy - it will fail anyway */
- ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, CMD_SYNC, 0, NULL);
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, 0, 0, NULL);
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 3441f70d0ff9..a6f22c32a279 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -888,9 +888,11 @@ struct iwl_priv {
struct iwl_event_log event_log;
+#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
+#endif
/* WoWLAN GTK rekey data */
u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 758c54eeb206..34b41e5f7cfc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -417,7 +417,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = { sizeof(cmd), },
- .flags = CMD_SYNC,
.data = { &cmd, },
};
@@ -579,7 +578,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = { sizeof(*cmd), },
- .flags = CMD_SYNC,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
int err;
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index 6a0817d9c4fa..1c6b2252d0f2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -36,8 +36,20 @@ struct iwl_priv;
#define IWL_LED_ACTIVITY (0<<1)
#define IWL_LED_LINK (1<<1)
+#ifdef CONFIG_IWLWIFI_LEDS
void iwlagn_led_enable(struct iwl_priv *priv);
void iwl_leds_init(struct iwl_priv *priv);
void iwl_leds_exit(struct iwl_priv *priv);
+#else
+static inline void iwlagn_led_enable(struct iwl_priv *priv)
+{
+}
+static inline void iwl_leds_init(struct iwl_priv *priv)
+{
+}
+static inline void iwl_leds_exit(struct iwl_priv *priv)
+{
+}
+#endif
#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 576f7ee38ca5..2191621d69c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -81,7 +81,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0,
sizeof(tx_power_cmd), &tx_power_cmd);
}
@@ -141,7 +141,6 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
struct iwl_host_cmd cmd = {
.id = REPLY_TXFIFO_FLUSH,
.len = { sizeof(struct iwl_txfifo_flush_cmd), },
- .flags = CMD_SYNC,
.data = { &flush_cmd, },
};
@@ -180,7 +179,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
goto done;
}
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans);
+ iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
done:
ieee80211_wake_queues(priv->hw);
mutex_unlock(&priv->mutex);
@@ -333,12 +332,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
memcpy(&bt_cmd_v2.basic, &basic,
sizeof(basic));
ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2);
+ 0, sizeof(bt_cmd_v2), &bt_cmd_v2);
} else {
memcpy(&bt_cmd_v1.basic, &basic,
sizeof(basic));
ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1);
+ 0, sizeof(bt_cmd_v1), &bt_cmd_v1);
}
if (ret)
IWL_ERR(priv, "failed to send BT Coex Config\n");
@@ -1044,7 +1043,6 @@ int iwlagn_send_patterns(struct iwl_priv *priv,
struct iwl_host_cmd cmd = {
.id = REPLY_WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .flags = CMD_SYNC,
};
int i, err;
@@ -1201,7 +1199,6 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
if (key_data.use_rsc_tsc) {
struct iwl_host_cmd rsc_tsc_cmd = {
.id = REPLY_WOWLAN_TSC_RSC_PARAMS,
- .flags = CMD_SYNC,
.data[0] = key_data.rsc_tsc,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.len[0] = sizeof(*key_data.rsc_tsc),
@@ -1215,7 +1212,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
if (key_data.use_tkip) {
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_WOWLAN_TKIP_PARAMS,
- CMD_SYNC, sizeof(tkip_cmd),
+ 0, sizeof(tkip_cmd),
&tkip_cmd);
if (ret)
goto out;
@@ -1231,20 +1228,20 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_WOWLAN_KEK_KCK_MATERIAL,
- CMD_SYNC, sizeof(kek_kck_cmd),
+ 0, sizeof(kek_kck_cmd),
&kek_kck_cmd);
if (ret)
goto out;
}
}
- ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0,
sizeof(d3_cfg_cmd), &d3_cfg_cmd);
if (ret)
goto out;
ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
- CMD_SYNC, sizeof(wakeup_filter_cmd),
+ 0, sizeof(wakeup_filter_cmd),
&wakeup_filter_cmd);
if (ret)
goto out;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index dd55c9cf7ba8..29af7b51e370 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1091,7 +1091,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1119,7 +1120,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}
}
IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans);
+ iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
done:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 6a6df71af1d7..0b7f46f0b079 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -128,7 +128,6 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
struct iwl_tx_beacon_cmd *tx_beacon_cmd;
struct iwl_host_cmd cmd = {
.id = REPLY_TX_BEACON,
- .flags = CMD_SYNC,
};
struct ieee80211_tx_info *info;
u32 frame_size;
@@ -311,8 +310,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
else
- return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
- CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
}
@@ -622,7 +620,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_CT_KILL_CONFIG_CMD,
- CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
+ 0, sizeof(adv_cmd), &adv_cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
@@ -637,7 +635,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_CT_KILL_CONFIG_CMD,
- CMD_SYNC, sizeof(cmd), &cmd);
+ 0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
@@ -673,9 +671,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_dvm_send_cmd_pdu(priv,
- TX_ANT_CONFIGURATION_CMD,
- CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0,
sizeof(struct iwl_tx_ant_config_cmd),
&tx_ant_cmd);
} else {
@@ -703,7 +699,7 @@ static void iwl_send_bt_config(struct iwl_priv *priv)
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
+ 0, sizeof(struct iwl_bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
@@ -987,7 +983,7 @@ static void iwl_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
else
IWL_ERR(priv,
- "Cannot request restart before registrating with mac80211");
+ "Cannot request restart before registrating with mac80211\n");
} else {
WARN_ON(1);
}
@@ -1127,7 +1123,6 @@ static void iwl_option_config(struct iwl_priv *priv)
static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
struct iwl_nvm_data *data = priv->nvm_data;
- char *debug_msg;
if (data->sku_cap_11n_enable &&
!priv->cfg->ht_params) {
@@ -1141,8 +1136,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
return -EINVAL;
}
- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
- IWL_DEBUG_INFO(priv, debug_msg,
+ IWL_DEBUG_INFO(priv,
+ "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
data->sku_cap_11n_enable ? "" : "NOT", "enabled");
@@ -1350,7 +1345,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
iwl_set_hw_params(priv);
if (!(priv->nvm_data->sku_cap_ipan_enable)) {
- IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
+ IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n");
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
/*
* if not PAN, then don't support P2P -- might be a uCode
@@ -2019,10 +2014,10 @@ void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
if (!test_bit(mq, &priv->transport_queue_stop)) {
- IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq);
+ IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq);
ieee80211_wake_queue(priv->hw, mq);
} else {
- IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq);
+ IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq);
}
}
@@ -2053,6 +2048,17 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
return false;
}
+static void iwl_napi_add(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
+}
+
static const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
@@ -2065,6 +2071,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
.cmd_queue_full = iwl_cmd_queue_full,
.nic_config = iwl_nic_config,
.wimax_active = iwl_wimax_active,
+ .napi_add = iwl_napi_add,
};
/*****************************************************************************
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index b4e61417013a..f2c1439566b5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -278,7 +278,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
le32_to_cpu(cmd->sleep_interval[3]),
le32_to_cpu(cmd->sleep_interval[4]));
- return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, 0,
sizeof(struct iwl_powertable_cmd), cmd);
}
@@ -361,7 +361,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
} else
- IWL_ERR(priv, "set power fail, ret = %d", ret);
+ IWL_ERR(priv, "set power fail, ret = %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index aa773a2da4ab..32b78a66536d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1453,7 +1453,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
tbl->action = IWL_LEGACY_SWITCH_SISO;
break;
default:
- IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+ IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
break;
}
@@ -1628,7 +1628,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
default:
- IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+ IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
break;
}
@@ -1799,7 +1799,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break;
default:
- IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+ IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
break;
}
@@ -1969,7 +1969,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
default:
- IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+ IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
break;
}
@@ -2709,7 +2709,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_link_cmd(NULL, lq_sta, rate);
priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
- iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
+ iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, 0, true);
}
static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index cd8377346aff..debec963c610 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -786,7 +786,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx_ni(priv->hw, skb);
+ ieee80211_rx(priv->hw, skb);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 503a81e58185..ed50de6362ed 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -104,7 +104,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
- CMD_SYNC, sizeof(*send), send);
+ 0, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -134,7 +134,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
- CMD_SYNC, sizeof(*send), send);
+ 0, sizeof(*send), send);
send->filter_flags = old_filter;
send->dev_type = old_dev_type;
@@ -160,7 +160,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
sizeof(*send), send);
send->filter_flags = old_filter;
@@ -189,7 +189,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
- ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
@@ -353,7 +353,7 @@ static int iwl_send_rxon_timing(struct iwl_priv *priv,
le16_to_cpu(ctx->timing.atim_window));
return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
- CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
+ 0, sizeof(ctx->timing), &ctx->timing);
}
static int iwlagn_rxon_disconn(struct iwl_priv *priv,
@@ -495,7 +495,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
* Associated RXON doesn't clear the station table in uCode,
* so we don't need to restore stations etc. after this.
*/
- ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
sizeof(struct iwl_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -610,7 +610,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].width = cpu_to_le16(slot0);
cmd.slots[1].width = cpu_to_le16(slot1);
- ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -823,7 +823,7 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
== (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
- IWL_WARN(priv, "CCK and auto detect");
+ IWL_WARN(priv, "CCK and auto detect\n");
errors |= BIT(8);
}
@@ -1395,7 +1395,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
priv->phy_calib_chain_noise_reset_cmd);
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_PHY_CALIBRATION_CMD,
- CMD_SYNC, sizeof(cmd), &cmd);
+ 0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv,
"Could not send REPLY_PHY_CALIBRATION_CMD\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index be98b913ed58..43bef901e8f9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -59,7 +59,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
int ret;
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_ABORT_CMD,
- .flags = CMD_SYNC | CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB,
};
__le32 *status;
@@ -639,7 +639,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_CMD,
.len = { sizeof(struct iwl_scan_cmd), },
- .flags = CMD_SYNC,
};
struct iwl_scan_cmd *scan;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 9cdd91cdf661..6ec86adbe4a1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -39,7 +39,7 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
lockdep_assert_held(&priv->sta_lock);
if (sta_id >= IWLAGN_STATION_COUNT) {
- IWL_ERR(priv, "invalid sta_id %u", sta_id);
+ IWL_ERR(priv, "invalid sta_id %u\n", sta_id);
return -EINVAL;
}
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
@@ -165,7 +165,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
iwl_free_resp(&cmd);
if (cmd.handler_status)
- IWL_ERR(priv, "%s - error in the CMD response %d", __func__,
+ IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
cmd.handler_status);
return cmd.handler_status;
@@ -261,7 +261,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
cmd.station_flags = flags;
cmd.sta.sta_id = sta_id;
- return iwl_send_add_sta(priv, &cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &cmd, 0);
}
static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
@@ -413,7 +413,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
spin_unlock_bh(&priv->sta_lock);
/* Add station to device's station table */
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ ret = iwl_send_add_sta(priv, &sta_cmd, 0);
if (ret) {
spin_lock_bh(&priv->sta_lock);
IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -456,7 +456,6 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
struct iwl_host_cmd cmd = {
.id = REPLY_REMOVE_STA,
.len = { sizeof(struct iwl_rem_sta_cmd), },
- .flags = CMD_SYNC,
.data = { &rm_sta_cmd, },
};
@@ -740,7 +739,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
send_lq = true;
}
spin_unlock_bh(&priv->sta_lock);
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ ret = iwl_send_add_sta(priv, &sta_cmd, 0);
if (ret) {
spin_lock_bh(&priv->sta_lock);
IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -756,8 +755,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* current LQ command
*/
if (send_lq)
- iwl_send_lq_cmd(priv, ctx, &lq,
- CMD_SYNC, true);
+ iwl_send_lq_cmd(priv, ctx, &lq, 0, true);
spin_lock_bh(&priv->sta_lock);
priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
}
@@ -968,7 +966,7 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv,
return -ENOMEM;
}
- ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
+ ret = iwl_send_lq_cmd(priv, ctx, link_cmd, 0, true);
if (ret)
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
@@ -999,7 +997,6 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
struct iwl_host_cmd cmd = {
.id = ctx->wep_key_cmd,
.data = { wep_cmd, },
- .flags = CMD_SYNC,
};
might_sleep();
@@ -1248,7 +1245,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &sta_cmd, 0);
}
int iwl_set_dynamic_key(struct iwl_priv *priv,
@@ -1284,13 +1281,13 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
- seq.tkip.iv32, p1k, CMD_SYNC);
+ seq.tkip.iv32, p1k, 0);
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
- 0, NULL, CMD_SYNC);
+ 0, NULL, 0);
break;
default:
IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
@@ -1409,7 +1406,7 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_bh(&priv->sta_lock);
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &sta_cmd, 0);
}
int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1433,7 +1430,7 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_bh(&priv->sta_lock);
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &sta_cmd, 0);
}
int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1458,7 +1455,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_bh(&priv->sta_lock);
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &sta_cmd, 0);
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index 058c5892c427..acb981a0a0aa 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -236,7 +236,7 @@ static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
{
IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
/* make request to retrieve statistics information */
- iwl_send_statistics_request(priv, CMD_SYNC, false);
+ iwl_send_statistics_request(priv, 0, false);
/* Reschedule the ct_kill wait timer */
mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 398dd096674c..3255a1723d17 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -402,10 +402,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
/* aggregation is on for this <sta,tid> */
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
tid_data->agg.state != IWL_AGG_ON) {
- IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
- " Tx flags = 0x%08x, agg.state = %d",
+ IWL_ERR(priv,
+ "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
info->flags, tid_data->agg.state);
- IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
+ IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
sta_id, tid,
IEEE80211_SEQ_TO_SN(tid_data->seq_number));
goto drop_unlock_sta;
@@ -416,7 +416,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
*/
if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
tid_data->agg.state != IWL_AGG_OFF,
- "Tx while agg.state = %d", tid_data->agg.state))
+ "Tx while agg.state = %d\n", tid_data->agg.state))
goto drop_unlock_sta;
seq_number = tid_data->seq_number;
@@ -778,8 +778,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
/* There are no packets for this RA / TID in the HW any more */
if (tid_data->agg.ssn == tid_data->next_reclaimed) {
IWL_DEBUG_TX_QUEUES(priv,
- "Can continue DELBA flow ssn = next_recl ="
- " %d", tid_data->next_reclaimed);
+ "Can continue DELBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
iwl_trans_txq_disable(priv->trans,
tid_data->agg.txq_id);
iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
@@ -791,8 +791,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
/* There are no packets for this RA / TID in the HW any more */
if (tid_data->agg.ssn == tid_data->next_reclaimed) {
IWL_DEBUG_TX_QUEUES(priv,
- "Can continue ADDBA flow ssn = next_recl ="
- " %d", tid_data->next_reclaimed);
+ "Can continue ADDBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
tid_data->agg.state = IWL_AGG_STARTING;
ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
}
@@ -1216,8 +1216,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
ctx->vif->type == NL80211_IFTYPE_STATION) {
/* block and stop all queues */
priv->passive_no_rx = true;
- IWL_DEBUG_TX_QUEUES(priv, "stop all queues: "
- "passive channel");
+ IWL_DEBUG_TX_QUEUES(priv,
+ "stop all queues: passive channel\n");
ieee80211_stop_queues(priv->hw);
IWL_DEBUG_TX_REPLY(priv,
@@ -1271,7 +1271,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
- ieee80211_tx_status_ni(priv->hw, skb);
+ ieee80211_tx_status(priv->hw, skb);
}
return 0;
@@ -1411,7 +1411,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status_ni(priv->hw, skb);
+ ieee80211_tx_status(priv->hw, skb);
}
return 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index cf03ef5619d9..d5cee1530597 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -172,7 +172,7 @@ static int iwl_send_wimax_coex(struct iwl_priv *priv)
memset(&coex_cmd, 0, sizeof(coex_cmd));
return iwl_dvm_send_cmd_pdu(priv,
- COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
+ COEX_PRIORITY_TABLE_CMD, 0,
sizeof(coex_cmd), &coex_cmd);
}
@@ -205,7 +205,7 @@ void iwl_send_prio_tbl(struct iwl_priv *priv)
memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
sizeof(iwl_bt_prio_tbl));
if (iwl_dvm_send_cmd_pdu(priv,
- REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
+ REPLY_BT_COEX_PRIO_TABLE, 0,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
IWL_ERR(priv, "failed to send BT prio tbl command\n");
}
@@ -218,7 +218,7 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
env_cmd.action = action;
env_cmd.type = type;
ret = iwl_dvm_send_cmd_pdu(priv,
- REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
+ REPLY_BT_COEX_PROT_ENV, 0,
sizeof(env_cmd), &env_cmd);
if (ret)
IWL_ERR(priv, "failed to send BT env command\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 854ba84ccb73..c3817fae16c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -62,6 +62,7 @@ static const struct iwl_base_params iwl1000_base_params = {
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 128,
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl1000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 3e63323637f3..21e5d0843a62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -75,6 +75,7 @@ static const struct iwl_base_params iwl2000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
@@ -88,6 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl2000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 6674f2c4541c..332bbede39e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -61,6 +61,7 @@ static const struct iwl_base_params iwl5000_base_params = {
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl5000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8048de90233f..8f2c3c8c6b84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -85,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6050_base_params = {
@@ -97,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -109,6 +111,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl6000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 4c2d4ef28b22..48730064da73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,12 +71,12 @@
#define IWL3160_UCODE_API_MAX 9
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 8
-#define IWL3160_UCODE_API_OK 8
+#define IWL7260_UCODE_API_OK 9
+#define IWL3160_UCODE_API_OK 9
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 7
-#define IWL3160_UCODE_API_MIN 7
+#define IWL7260_UCODE_API_MIN 8
+#define IWL3160_UCODE_API_MIN 8
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@@ -98,7 +98,7 @@
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
static const struct iwl_base_params iwl7000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.shadow_ram_support = true,
@@ -107,6 +107,7 @@ static const struct iwl_base_params iwl7000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .apmg_wake_up_wa = true,
};
static const struct iwl_ht_params iwl7000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index f5bd82b88592..51c41531d81d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -83,9 +83,10 @@
#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
+#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin"
static const struct iwl_base_params iwl8000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.shadow_ram_support = true,
@@ -118,6 +119,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
};
const struct iwl_cfg iwl8260_n_cfg = {
@@ -127,6 +129,7 @@ const struct iwl_cfg iwl8260_n_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
};
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 7f37fb86837b..04a483d38659 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -102,9 +102,7 @@
/* EEPROM */
#define IWLAGN_EEPROM_IMG_SIZE 2048
-/* OTP */
-/* lower blocks contain EEPROM image and calibration data */
-#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
+
/* high blocks contain PAPD data */
#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3f17dc3f2c8a..b7047905f41a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -146,6 +146,9 @@ static inline u8 num_of_ant(u8 mask)
* @wd_timeout: TX queues watchdog timeout
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadow register support
+ * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
+ * is in flight. This is due to a HW bug in 7260, 3160 and 7265.
+ * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
*/
struct iwl_base_params {
int eeprom_size;
@@ -160,6 +163,8 @@ struct iwl_base_params {
u32 max_event_log_size;
const bool shadow_reg_enable;
const bool pcie_l1_allowed;
+ const bool apmg_wake_up_wa;
+ const bool scd_chain_ext_wa;
};
/*
@@ -188,6 +193,11 @@ struct iwl_ht_params {
#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
#define EEPROM_REGULATORY_BAND_NO_HT40 0
+/* lower blocks contain EEPROM image and calibration data */
+#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
+#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
+#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
+
struct iwl_eeprom_params {
const u8 regulatory_bands[7];
bool enhanced_txpower;
@@ -264,6 +274,8 @@ struct iwl_cfg {
u8 nvm_hw_section_num;
bool lp_xtal_workaround;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
+ bool no_power_up_nic_in_init;
+ const char *default_nvm_file;
};
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 8a44f594528d..09feff4fa226 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -61,8 +61,6 @@
*
*****************************************************************************/
-#define DEBUG
-
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/export.h>
@@ -128,8 +126,8 @@ void __iwl_dbg(struct device *dev,
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(level) &&
(!limit || net_ratelimit()))
- dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
- function, &vaf);
+ dev_printk(KERN_DEBUG, dev, "%c %s %pV",
+ in_interrupt() ? 'I' : 'U', function, &vaf);
#endif
trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
va_end(args);
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index c8cbdbe15924..295083510e72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,12 +47,32 @@ void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
+/* not all compilers can evaluate strlen() at compile time, so use sizeof() */
+#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n')
+
/* No matter what is m (priv, bus, trans), this will work */
-#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
-#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a)
-#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
-#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
-#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
+#define IWL_ERR_DEV(d, f, a...) \
+ do { \
+ CHECK_FOR_NEWLINE(f); \
+ __iwl_err((d), false, false, f, ## a); \
+ } while (0)
+#define IWL_ERR(m, f, a...) \
+ IWL_ERR_DEV((m)->dev, f, ## a)
+#define IWL_WARN(m, f, a...) \
+ do { \
+ CHECK_FOR_NEWLINE(f); \
+ __iwl_warn((m)->dev, f, ## a); \
+ } while (0)
+#define IWL_INFO(m, f, a...) \
+ do { \
+ CHECK_FOR_NEWLINE(f); \
+ __iwl_info((m)->dev, f, ## a); \
+ } while (0)
+#define IWL_CRIT(m, f, a...) \
+ do { \
+ CHECK_FOR_NEWLINE(f); \
+ __iwl_crit((m)->dev, f, ## a); \
+ } while (0)
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
void __iwl_dbg(struct device *dev,
@@ -72,12 +92,17 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
+#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \
+ do { \
+ CHECK_FOR_NEWLINE(fmt); \
+ __iwl_dbg(dev, level, limit, __func__, fmt, ##args); \
+ } while (0)
#define IWL_DEBUG(m, level, fmt, args...) \
- __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
+ __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
- __iwl_dbg((dev), level, false, __func__, fmt, ##args)
+ __IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
- __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
+ __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
#ifdef CONFIG_IWLWIFI_DEBUG
#define iwl_print_hex_dump(m, level, p, len) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 0a3e841b44a9..f2a5c12269a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1243,6 +1243,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
.wd_disable = true,
+ .uapsd_disable = false,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1356,6 +1357,10 @@ MODULE_PARM_DESC(wd_disable,
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
MODULE_PARM_DESC(nvm_file, "NVM file name");
+module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
+
/*
* set bt_coex_active to true, uCode will do kill/defer
* every time the priority line is asserted (BT is sending signals on the
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 58c8941c0d95..2953ffceda38 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -71,10 +71,15 @@
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_SRAM:
* @IWL_FW_ERROR_DUMP_REG:
+ * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
+ * &struct iwl_fw_error_dump_txcmd packets
*/
enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_SRAM = 0,
IWL_FW_ERROR_DUMP_REG = 1,
+ IWL_FW_ERROR_DUMP_RXF = 2,
+ IWL_FW_ERROR_DUMP_TXCMD = 3,
IWL_FW_ERROR_DUMP_MAX,
};
@@ -89,7 +94,7 @@ struct iwl_fw_error_dump_data {
__le32 type;
__le32 len;
__u8 data[];
-} __packed __aligned(4);
+} __packed;
/**
* struct iwl_fw_error_dump_file - the layout of the header of the file
@@ -101,6 +106,29 @@ struct iwl_fw_error_dump_file {
__le32 barker;
__le32 file_len;
u8 data[0];
-} __packed __aligned(4);
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_txcmd - TX command data
+ * @cmdlen: original length of command
+ * @caplen: captured length of command (may be less)
+ * @data: captured command data, @caplen bytes
+ */
+struct iwl_fw_error_dump_txcmd {
+ __le32 cmdlen;
+ __le32 caplen;
+ u8 data[];
+} __packed;
+
+/**
+ * iwl_mvm_fw_error_next_data - advance fw error dump data pointer
+ * @data: previous data block
+ * Returns: next data block
+ */
+static inline struct iwl_fw_error_dump_data *
+iwl_mvm_fw_error_next_data(struct iwl_fw_error_dump_data *data)
+{
+ return (void *)(data->data + le32_to_cpu(data->len));
+}
#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index d14f19339d61..0aa7c0085c9f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -74,29 +74,24 @@
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
- * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
* offload profile config command.
- * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
- * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
- * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
* @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
* from the probe request template.
- * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
- * connection when going back to D0
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
- * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
- * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
- * containing CAM (Continuous Active Mode) indication.
+ * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in different bindings.
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
+ * P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -104,22 +99,16 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
- IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = BIT(6),
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
- IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
- IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
- IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
- IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
- IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
- IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
- IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
+ IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+ IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
@@ -128,9 +117,11 @@ enum iwl_ucode_tlv_flag {
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
+ * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
+ IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
};
/**
@@ -183,6 +174,7 @@ enum iwl_ucode_sec {
#define IWL_UCODE_SECTION_MAX 12
#define IWL_API_ARRAY_SIZE 1
#define IWL_CAPABILITIES_ARRAY_SIZE 1
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
struct iwl_ucode_capabilities {
u32 max_probe_length;
@@ -205,6 +197,11 @@ struct fw_img {
bool is_dual_cpus;
};
+struct iwl_sf_region {
+ u32 addr;
+ u32 size;
+};
+
/* uCode version contains 4 values: Major/Minor/API/Serial */
#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 44cc3cf45762..5eef4ae7333b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -33,6 +33,7 @@
#include "iwl-io.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
+#include "iwl-prph.h"
#include "iwl-fh.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
@@ -183,6 +184,23 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
}
IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
+void iwl_force_nmi(struct iwl_trans *trans)
+{
+ /*
+ * In HW previous to the 8000 HW family, and in the 8000 HW family
+ * itself when the revision step==0, the DEVICE_SET_NMI_REG is used
+ * to force an NMI. Otherwise, a different register -
+ * DEVICE_SET_NMI_8000B_REG - is used.
+ */
+ if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
+ ((trans->hw_rev & 0xc) == 0x0))
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL);
+ else
+ iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
+ DEVICE_SET_NMI_8000B_VAL);
+}
+IWL_EXPORT_SYMBOL(iwl_force_nmi);
+
static const char *get_fh_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 665ddd9dbbc4..705d12c079e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -80,6 +80,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
+void iwl_force_nmi(struct iwl_trans *trans);
/* Error handling */
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index d994317db85b..d051857729ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -119,6 +119,7 @@ struct iwl_mod_params {
#endif
int ant_coupling;
char *nvm_file;
+ bool uapsd_disable;
};
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 6be30c698506..85eee79c495c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -62,6 +62,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/etherdevice.h>
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
@@ -127,19 +128,20 @@ static const u8 iwl_nvm_channels[] = {
static const u8 iwl_nvm_channels_family_8000[] = {
/* 2.4 GHz */
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
/* 5 GHz */
36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
149, 153, 157, 161, 165, 169, 173, 177, 181
};
-#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
-#define NUM_2GHZ_CHANNELS 14
-#define FIRST_2GHZ_HT_MINUS 5
-#define LAST_2GHZ_HT_PLUS 9
-#define LAST_5GHZ_HT 161
+#define NUM_2GHZ_CHANNELS 14
+#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
+#define FIRST_2GHZ_HT_MINUS 5
+#define LAST_2GHZ_HT_PLUS 9
+#define LAST_5GHZ_HT 161
#define DEFAULT_MAX_TX_POWER 16
@@ -202,21 +204,23 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct ieee80211_channel *channel;
u16 ch_flags;
bool is_5ghz;
- int num_of_ch;
+ int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
num_of_ch = IWL_NUM_CHANNELS;
nvm_chan = &iwl_nvm_channels[0];
+ num_2ghz_channels = NUM_2GHZ_CHANNELS;
} else {
num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
nvm_chan = &iwl_nvm_channels_family_8000[0];
+ num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
- if (ch_idx >= NUM_2GHZ_CHANNELS &&
+ if (ch_idx >= num_2ghz_channels &&
!data->sku_cap_band_52GHz_enable)
ch_flags &= ~NVM_CHANNEL_VALID;
@@ -225,7 +229,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
nvm_chan[ch_idx],
ch_flags,
- (ch_idx >= NUM_2GHZ_CHANNELS) ?
+ (ch_idx >= num_2ghz_channels) ?
"5.2" : "2.4");
continue;
}
@@ -234,7 +238,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
- channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+ channel->band = (ch_idx < num_2ghz_channels) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
@@ -242,7 +246,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* TODO: Need to be dependent to the NVM */
channel->flags = IEEE80211_CHAN_NO_HT40;
- if (ch_idx < NUM_2GHZ_CHANNELS &&
+ if (ch_idx < num_2ghz_channels &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -250,7 +254,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
} else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
- if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+ if ((ch_idx - num_2ghz_channels) % 2 == 0)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
else
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
@@ -447,13 +451,7 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *nvm_sec)
{
- u8 hw_addr[ETH_ALEN];
-
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
- memcpy(hw_addr, nvm_sec + HW_ADDR, ETH_ALEN);
- else
- memcpy(hw_addr, nvm_sec + MAC_ADDRESS_OVERRIDE_FAMILY_8000,
- ETH_ALEN);
+ const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR);
/* The byte order is little endian 16 bit, meaning 214365 */
data->hw_addr[0] = hw_addr[1];
@@ -464,6 +462,41 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
data->hw_addr[5] = hw_addr[4];
}
+static void iwl_set_hw_address_family_8000(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ const __le16 *mac_override,
+ const __le16 *nvm_hw)
+{
+ const u8 *hw_addr;
+
+ if (mac_override) {
+ hw_addr = (const u8 *)(mac_override +
+ MAC_ADDRESS_OVERRIDE_FAMILY_8000);
+
+ /* The byte order is little endian 16 bit, meaning 214365 */
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+
+ if (is_valid_ether_addr(hw_addr))
+ return;
+ }
+
+ /* take the MAC address from the OTP */
+ hw_addr = (const u8 *)(nvm_hw + HW_ADDR0_FAMILY_8000);
+ data->hw_addr[0] = hw_addr[3];
+ data->hw_addr[1] = hw_addr[2];
+ data->hw_addr[2] = hw_addr[1];
+ data->hw_addr[3] = hw_addr[0];
+
+ hw_addr = (const u8 *)(nvm_hw + HW_ADDR1_FAMILY_8000);
+ data->hw_addr[4] = hw_addr[1];
+ data->hw_addr[5] = hw_addr[0];
+}
+
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
@@ -523,7 +556,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
rx_chains);
} else {
/* MAC address in family 8000 */
- iwl_set_hw_address(cfg, data, mac_override);
+ iwl_set_hw_address_family_8000(cfg, data, mac_override, nvm_hw);
iwl_init_sbands(dev, cfg, data, regulatory,
sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index ea29504ac617..99785c892f96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -63,6 +63,7 @@
#ifndef __iwl_op_mode_h__
#define __iwl_op_mode_h__
+#include <linux/netdevice.h>
#include <linux/debugfs.h>
struct iwl_op_mode;
@@ -112,8 +113,11 @@ struct iwl_cfg;
* @stop: stop the op_mode. Must free all the memory allocated.
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
- * HCMD this Rx responds to.
- * This callback may sleep, it is called from a threaded IRQ handler.
+ * HCMD this Rx responds to. Can't sleep.
+ * @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
+ * but the higher layers need to know about it (in particular mac80211 to
+ * to able to call the right NAPI RX functions); this function is needed
+ * to eventually call netif_napi_add() with higher layer involvement.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
@@ -143,6 +147,11 @@ struct iwl_op_mode_ops {
void (*stop)(struct iwl_op_mode *op_mode);
int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+ void (*napi_add)(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -180,7 +189,6 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
- might_sleep();
return op_mode->ops->rx(op_mode, rxb, cmd);
}
@@ -249,4 +257,15 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
return op_mode->ops->exit_d0i3(op_mode);
}
+static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ if (!op_mode->ops->napi_add)
+ return;
+ op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index b761ac4822a3..d4fb5cad07ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -345,7 +345,6 @@ static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
struct iwl_phy_db_cmd phy_db_cmd;
struct iwl_host_cmd cmd = {
.id = PHY_DB_CMD,
- .flags = CMD_SYNC,
};
IWL_DEBUG_INFO(phy_db->trans,
@@ -393,13 +392,13 @@ static int iwl_phy_db_send_all_channel_groups(
entry->data);
if (err) {
IWL_ERR(phy_db->trans,
- "Can't SEND phy_db section %d (%d), err %d",
+ "Can't SEND phy_db section %d (%d), err %d\n",
type, i, err);
return err;
}
IWL_DEBUG_INFO(phy_db->trans,
- "Sent PHY_DB HCMD, type = %d num = %d",
+ "Sent PHY_DB HCMD, type = %d num = %d\n",
type, i);
}
@@ -451,7 +450,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
IWL_NUM_PAPD_CH_GROUPS);
if (err) {
IWL_ERR(phy_db->trans,
- "Cannot send channel specific PAPD groups");
+ "Cannot send channel specific PAPD groups\n");
return err;
}
@@ -461,7 +460,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
IWL_NUM_TXP_CH_GROUPS);
if (err) {
IWL_ERR(phy_db->trans,
- "Cannot send channel specific TX power groups");
+ "Cannot send channel specific TX power groups\n");
return err;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5f657c501406..4997e27672b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -105,6 +105,9 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
+#define DEVICE_SET_NMI_VAL 0x1
+#define DEVICE_SET_NMI_8000B_REG 0x00a01c24
+#define DEVICE_SET_NMI_8000B_VAL 0x1000000
/* Shared registers (0x0..0x3ff, via target indirect or periphery */
#define SHR_BASE 0x00a10000
@@ -348,4 +351,12 @@ enum secure_load_status_reg {
#define LMPM_SECURE_TIME_OUT (100)
+/* Rx FIFO */
+#define RXF_SIZE_ADDR (0xa00c88)
+#define RXF_SIZE_BYTE_CND_POS (7)
+#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
+
+#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
+#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8cdb0dd618a6..34d49e171fb4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -189,10 +189,9 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
/**
* enum CMD_MODE - how to send the host commands ?
*
- * @CMD_SYNC: The caller will be stalled until the fw responds to the command
* @CMD_ASYNC: Return right away and don't wait for the response
- * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
- * response. The caller needs to call iwl_free_resp when done.
+ * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
+ * the response. The caller needs to call iwl_free_resp when done.
* @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
* command queue, but after other high priority commands. valid only
* with CMD_ASYNC.
@@ -202,7 +201,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* (i.e. mark it as non-idle).
*/
enum CMD_MODE {
- CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
@@ -427,7 +425,7 @@ struct iwl_trans;
* @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
* If RFkill is asserted in the middle of a SYNC host command, it must
* return -ERFKILL straight away.
- * May sleep only if CMD_SYNC is set
+ * May sleep only if CMD_ASYNC is not set
* @tx: send an skb
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
@@ -437,8 +435,7 @@ struct iwl_trans;
* this one. The op_mode must not configure the HCMD queue. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
- * @wait_tx_queue_empty: wait until all tx queues are empty
- * May sleep
+ * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
* @write8: write a u8 to a register at offset ofs from the BAR
@@ -464,6 +461,11 @@ struct iwl_trans;
* @unref: release a reference previously taken with @ref. Note that
* initially the reference count is 1, making an initial @unref
* necessary to allow low power states.
+ * @dump_data: fill a data dump with debug data, maybe containing last
+ * TX'ed commands and similar. When called with a NULL buffer and
+ * zero buffer length, provide only the (estimated) required buffer
+ * length. Return the used buffer length.
+ * Note that the transport must fill in the proper file headers.
*/
struct iwl_trans_ops {
@@ -471,6 +473,8 @@ struct iwl_trans_ops {
void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
+ int (*update_sf)(struct iwl_trans *trans,
+ struct iwl_sf_region *st_fwrd_space);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans);
@@ -490,7 +494,7 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
- int (*wait_tx_queue_empty)(struct iwl_trans *trans);
+ int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -512,6 +516,10 @@ struct iwl_trans_ops {
u32 value);
void (*ref)(struct iwl_trans *trans);
void (*unref)(struct iwl_trans *trans);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ u32 (*dump_data)(struct iwl_trans *trans, void *buf, u32 buflen);
+#endif
};
/**
@@ -630,6 +638,17 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
return trans->ops->start_fw(trans, fw, run_in_rfkill);
}
+static inline int iwl_trans_update_sf(struct iwl_trans *trans,
+ struct iwl_sf_region *st_fwrd_space)
+{
+ might_sleep();
+
+ if (trans->ops->update_sf)
+ return trans->ops->update_sf(trans, st_fwrd_space);
+
+ return 0;
+}
+
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
might_sleep();
@@ -665,6 +684,16 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
trans->ops->unref(trans);
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline u32 iwl_trans_dump_data(struct iwl_trans *trans,
+ void *buf, u32 buflen)
+{
+ if (!trans->ops->dump_data)
+ return 0;
+ return trans->ops->dump_data(trans, buf, buflen);
+}
+#endif
+
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
@@ -678,7 +707,7 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
return -EIO;
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
@@ -720,7 +749,7 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -EIO;
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return trans->ops->tx(trans, skb, dev_cmd, queue);
}
@@ -729,7 +758,7 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
int ssn, struct sk_buff_head *skbs)
{
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
trans->ops->reclaim(trans, queue, ssn, skbs);
}
@@ -746,7 +775,7 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
might_sleep();
if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
frame_limit, ssn);
@@ -759,12 +788,13 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
}
-static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
+static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
+ u32 txq_bm)
{
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return trans->ops->wait_tx_queue_empty(trans);
+ return trans->ops->wait_tx_queue_empty(trans, txq_bm);
}
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index ccdd3b7c4cce..c30d7f64ec1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -3,8 +3,9 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o coex.o
-iwlmvm-y += led.o tt.o offloading.o
+iwlmvm-y += tt.o offloading.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
+iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 0489314425cb..c8c3b38228f0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -104,12 +104,9 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
#define BT_ANTENNA_COUPLING_THRESHOLD (30)
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
{
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
sizeof(struct iwl_bt_coex_prio_tbl_cmd),
&iwl_bt_prio_tbl);
}
@@ -127,10 +124,10 @@ const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
};
static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
- cpu_to_le32(0xf0f0f0f0),
- cpu_to_le32(0xc0c0c0c0),
- cpu_to_le32(0xfcfcfcfc),
- cpu_to_le32(0xff00ff00),
+ cpu_to_le32(0xf0f0f0f0), /* 50% */
+ cpu_to_le32(0xc0c0c0c0), /* 25% */
+ cpu_to_le32(0xfcfcfcfc), /* 75% */
+ cpu_to_le32(0xfefefefe), /* 87.5% */
};
static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
@@ -303,8 +300,8 @@ static const __le64 iwl_ci_mask[][3] = {
};
static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
- cpu_to_le32(0x22002200),
- cpu_to_le32(0x33113311),
+ cpu_to_le32(0x28412201),
+ cpu_to_le32(0x11118451),
};
struct corunning_block_luts {
@@ -568,13 +565,13 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
- .flags = CMD_SYNC,
};
int ret;
u32 flags;
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- return 0;
+ ret = iwl_send_bt_prio_tbl(mvm);
+ if (ret)
+ return ret;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
@@ -582,10 +579,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
cmd.data[0] = bt_cmd;
bt_cmd->max_kill = 5;
- bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
- bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
- bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
- bt_cmd->bt4_tx_rx_max_freq0 = 15,
+ bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+ bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
+ bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
+ bt_cmd->bt4_tx_rx_max_freq0 = 15;
+ bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
+ bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
@@ -663,7 +662,6 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
.data[0] = &bt_cmd,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
- .flags = CMD_SYNC,
};
int ret = 0;
@@ -717,7 +715,8 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
+static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
+ bool enable)
{
struct iwl_bt_coex_cmd *bt_cmd;
/* Send ASYNC since this can be sent from an atomic context */
@@ -735,8 +734,7 @@ int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
return 0;
/* nothing to do */
- if (mvmsta->bt_reduced_txpower_dbg ||
- mvmsta->bt_reduced_txpower == enable)
+ if (mvmsta->bt_reduced_txpower == enable)
return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -803,23 +801,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ /* Count BSSes vifs */
+ data->num_bss_ifaces++;
/* default smps_mode for BSS / P2P client is AUTOMATIC */
smps_mode = IEEE80211_SMPS_AUTOMATIC;
- data->num_bss_ifaces++;
-
- /*
- * Count unassoc BSSes, relax SMSP constraints
- * and disable reduced Tx Power
- */
- if (!vif->bss_conf.assoc) {
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
- if (iwl_mvm_bt_coex_reduced_txp(mvm,
- mvmvif->ap_sta_id,
- false))
- IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
- return;
- }
break;
case NL80211_IFTYPE_AP:
/* default smps_mode for AP / GO is OFF */
@@ -845,8 +830,12 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* ... relax constraints and disable rssi events */
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode);
- if (vif->type == NL80211_IFTYPE_STATION)
+ data->reduced_tx_power = false;
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+ false);
iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ }
return;
}
@@ -857,6 +846,11 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
smps_mode = vif->type == NL80211_IFTYPE_AP ?
IEEE80211_SMPS_OFF :
IEEE80211_SMPS_DYNAMIC;
+
+ /* relax SMPS contraints for next association */
+ if (!vif->bss_conf.assoc)
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
IWL_DEBUG_COEX(data->mvm,
"mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status, bt_activity_grading,
@@ -903,22 +897,18 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
- /* don't reduce the Tx power if in loose scheme */
+ /*
+ * don't reduce the Tx power if one of these is true:
+ * we are in LOOSE
+ * single share antenna product
+ * BT is active
+ * we are associated
+ */
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
- mvm->cfg->bt_shared_single_ant) {
- data->reduced_tx_power = false;
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
- return;
- }
-
- /* reduced Txpower only if BT is on, so ...*/
- if (!data->notif->bt_status) {
- /* ... cancel reduced Tx power ... */
- if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
- IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
+ !data->notif->bt_status) {
data->reduced_tx_power = false;
-
- /* ... and there is no need to get reports on RSSI any more. */
+ iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
@@ -1022,9 +1012,9 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
/* Don't spam the fw with the same command over and over */
if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
- if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
sizeof(cmd), &cmd))
- IWL_ERR(mvm, "Failed to send BT_CI cmd");
+ IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
}
@@ -1039,7 +1029,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
}
-/* upon association, the fw will send in BT Coex notification */
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *dev_cmd)
@@ -1215,6 +1204,17 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
}
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum ieee80211_band band)
+{
+ u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+
+ if (band != IEEE80211_BAND_2GHZ)
+ return false;
+
+ return bt_activity >= BT_LOW_TRAFFIC;
+}
+
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac)
{
@@ -1249,9 +1249,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- return;
-
iwl_mvm_bt_coex_notif_handle(mvm);
}
@@ -1270,7 +1267,6 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
- .flags = CMD_SYNC,
};
if (!IWL_MVM_BT_COEX_CORUNNING)
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index e56f5a0edf85..645b3cfc29a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -193,8 +193,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
wkc.wep_key.key_offset = data->wep_key_idx;
}
- ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
- sizeof(wkc), &wkc);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
data->error = ret != 0;
mvm->ptk_ivlen = key->iv_len;
@@ -341,7 +340,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .flags = CMD_SYNC,
};
int i, err;
@@ -518,7 +516,6 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
.id = REMOTE_WAKE_CONFIG_CMD,
.len = { sizeof(*cfg), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
- .flags = CMD_SYNC,
};
int ret;
@@ -666,10 +663,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (WARN_ON(!vif->bss_conf.assoc))
return -EINVAL;
- /* hack */
- vif->bss_conf.assoc = false;
+
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
- vif->bss_conf.assoc = true;
if (ret)
return ret;
@@ -705,7 +700,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
if (ret)
return ret;
@@ -719,7 +714,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 1; i < MAX_BINDINGS; i++)
quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
- ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
sizeof(quota_cmd), &quota_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
@@ -739,15 +734,13 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
};
struct iwl_host_cmd cmd = {
.id = NON_QOS_TX_COUNTER_CMD,
- .flags = CMD_SYNC | CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB,
};
int err;
u32 size;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
- cmd.data[0] = &query_cmd;
- cmd.len[0] = sizeof(query_cmd);
- }
+ cmd.data[0] = &query_cmd;
+ cmd.len[0] = sizeof(query_cmd);
err = iwl_mvm_send_cmd(mvm, &cmd);
if (err)
@@ -758,10 +751,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
err = -EINVAL;
} else {
err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
- /* new API returns next, not last-used seqno */
- if (mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
- err = (u16) (err - 0x10);
+ /* firmware returns next, not last-used seqno */
+ err = (u16) (err - 0x10);
}
iwl_free_resp(&cmd);
@@ -785,11 +776,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->seqno_valid = false;
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
- return;
-
- if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
+ if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
sizeof(query_cmd), &query_cmd))
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
}
@@ -804,7 +791,7 @@ iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
cmd_len = sizeof(*cmd);
- return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
cmd_len, cmd);
}
@@ -833,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
struct iwl_host_cmd d3_cfg_cmd = {
.id = D3_CONFIG_CMD,
- .flags = CMD_SYNC | CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
@@ -983,7 +970,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (key_data.use_rsc_tsc) {
struct iwl_host_cmd rsc_tsc_cmd = {
.id = WOWLAN_TSC_RSC_PARAM,
- .flags = CMD_SYNC,
.data[0] = key_data.rsc_tsc,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.len[0] = sizeof(*key_data.rsc_tsc),
@@ -997,7 +983,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (key_data.use_tkip) {
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_TKIP_PARAM,
- CMD_SYNC, sizeof(tkip_cmd),
+ 0, sizeof(tkip_cmd),
&tkip_cmd);
if (ret)
goto out;
@@ -1014,8 +1000,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
ret = iwl_mvm_send_cmd_pdu(mvm,
- WOWLAN_KEK_KCK_MATERIAL,
- CMD_SYNC,
+ WOWLAN_KEK_KCK_MATERIAL, 0,
sizeof(kek_kck_cmd),
&kek_kck_cmd);
if (ret)
@@ -1031,7 +1016,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC);
+ ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0);
if (ret)
goto out;
@@ -1043,7 +1028,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_power_update_mac(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm);
if (ret)
goto out;
@@ -1082,6 +1067,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (iwl_mvm_is_d0i3_supported(mvm)) {
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ return 0;
+ }
+
return __iwl_mvm_suspend(hw, wowlan, false);
}
@@ -1277,7 +1271,7 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
}
static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
- struct iwl_wowlan_status_v6 *status)
+ struct iwl_wowlan_status *status)
{
union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
@@ -1294,7 +1288,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
}
struct iwl_mvm_d3_gtk_iter_data {
- struct iwl_wowlan_status_v6 *status;
+ struct iwl_wowlan_status *status;
void *last_gtk;
u32 cipher;
bool find_phase, unhandled_cipher;
@@ -1370,7 +1364,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_wowlan_status_v6 *status)
+ struct iwl_wowlan_status *status)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
@@ -1465,10 +1459,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
} err_info;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
- .flags = CMD_SYNC | CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB,
};
struct iwl_wowlan_status_data status;
- struct iwl_wowlan_status_v6 *status_v6;
+ struct iwl_wowlan_status *fw_status;
int ret, len, status_size, i;
bool keep;
struct ieee80211_sta *ap_sta;
@@ -1491,7 +1485,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
}
/* only for tracing for now */
- ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
@@ -1505,10 +1499,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (!cmd.resp_pkt)
goto out_unlock;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
- status_size = sizeof(struct iwl_wowlan_status_v6);
- else
- status_size = sizeof(struct iwl_wowlan_status_v4);
+ status_size = sizeof(*fw_status);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
@@ -1516,35 +1507,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
goto out_free_resp;
}
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
- status_v6 = (void *)cmd.resp_pkt->data;
-
- status.pattern_number = le16_to_cpu(status_v6->pattern_number);
- for (i = 0; i < 8; i++)
- status.qos_seq_ctr[i] =
- le16_to_cpu(status_v6->qos_seq_ctr[i]);
- status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
- status.wake_packet_length =
- le32_to_cpu(status_v6->wake_packet_length);
- status.wake_packet_bufsize =
- le32_to_cpu(status_v6->wake_packet_bufsize);
- status.wake_packet = status_v6->wake_packet;
- } else {
- struct iwl_wowlan_status_v4 *status_v4;
- status_v6 = NULL;
- status_v4 = (void *)cmd.resp_pkt->data;
-
- status.pattern_number = le16_to_cpu(status_v4->pattern_number);
- for (i = 0; i < 8; i++)
- status.qos_seq_ctr[i] =
- le16_to_cpu(status_v4->qos_seq_ctr[i]);
- status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
- status.wake_packet_length =
- le32_to_cpu(status_v4->wake_packet_length);
- status.wake_packet_bufsize =
- le32_to_cpu(status_v4->wake_packet_bufsize);
- status.wake_packet = status_v4->wake_packet;
- }
+ fw_status = (void *)cmd.resp_pkt->data;
+
+ status.pattern_number = le16_to_cpu(fw_status->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(fw_status->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(fw_status->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(fw_status->wake_packet_bufsize);
+ status.wake_packet = fw_status->wake_packet;
if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@@ -1571,7 +1545,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
- keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+ keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
iwl_free_resp(&cmd);
return keep;
@@ -1674,6 +1648,19 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ if (iwl_mvm_is_d0i3_supported(mvm)) {
+ bool exit_now;
+
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+ &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ if (exit_now)
+ _iwl_mvm_exit_d0i3(mvm);
+ return 0;
+ }
+
return __iwl_mvm_resume(mvm, false);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 9b59e1d7ae71..2e90ff795c13 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -103,10 +103,6 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
dbgfs_pm->tx_data_timeout = val;
break;
- case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
- IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
- dbgfs_pm->disable_power_off = val;
- break;
case MVM_DEBUGFS_PM_LPRX_ENA:
IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
dbgfs_pm->lprx_ena = val;
@@ -154,12 +150,6 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
if (sscanf(buf + 16, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
- } else if (!strncmp("disable_power_off=", buf, 18) &&
- !(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
- if (sscanf(buf + 18, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
} else if (!strncmp("lprx=", buf, 5)) {
if (sscanf(buf + 5, "%d", &val) != 1)
return -EINVAL;
@@ -185,7 +175,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_pm(mvm, vif, param, val);
- ret = iwl_mvm_power_update_mac(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -272,10 +262,9 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
pos += scnprintf(buf+pos, bufsz-pos,
- "ap_sta_id %d - reduced Tx power %d force %d\n",
+ "ap_sta_id %d - reduced Tx power %d\n",
ap_sta_id,
- mvm_sta->bt_reduced_txpower,
- mvm_sta->bt_reduced_txpower_dbg);
+ mvm_sta->bt_reduced_txpower);
}
}
@@ -293,41 +282,6 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct iwl_mvm_sta *mvmsta;
- bool reduced_tx_power;
- int ret;
-
- if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
- return -ENOTCONN;
-
- if (strtobool(buf, &reduced_tx_power) != 0)
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
-
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
- if (IS_ERR_OR_NULL(mvmsta)) {
- mutex_unlock(&mvm->mutex);
- return -ENOTCONN;
- }
-
- mvmsta->bt_reduced_txpower_dbg = false;
- ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
- reduced_tx_power);
- if (!ret)
- mvmsta->bt_reduced_txpower_dbg = true;
-
- mutex_unlock(&mvm->mutex);
-
- return ret ? : count;
-}
-
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@@ -462,9 +416,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -568,7 +522,6 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -592,8 +545,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
- if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
- iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
@@ -601,7 +553,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
- MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 1b52deea6081..29ca72695eaa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -65,9 +65,8 @@
#include "mvm.h"
#include "sta.h"
#include "iwl-io.h"
-#include "iwl-prph.h"
#include "debugfs.h"
-#include "fw-error-dump.h"
+#include "iwl-fw-error-dump.h"
static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@@ -136,9 +135,6 @@ static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
file->private_data = mvm->fw_error_dump;
mvm->fw_error_dump = NULL;
- kfree(mvm->fw_error_sram);
- mvm->fw_error_sram = NULL;
- mvm->fw_error_sram_len = 0;
ret = 0;
out:
@@ -684,7 +680,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
mvm->restart_fw++;
/* take the return value to make compiler happy - it will fail anyway */
- ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
mutex_unlock(&mvm->mutex);
@@ -694,7 +690,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- iwl_write_prph(mvm->trans, DEVICE_SET_NMI_REG, 1);
+ iwl_force_nmi(mvm->trans);
return count;
}
@@ -841,7 +837,7 @@ static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
/* send updated bcast filtering configuration */
if (mvm->dbgfs_bcast_filtering.override &&
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
sizeof(cmd), &cmd);
mutex_unlock(&mvm->mutex);
@@ -913,7 +909,7 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
/* send updated bcast filtering configuration */
if (mvm->dbgfs_bcast_filtering.override &&
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
sizeof(cmd), &cmd);
mutex_unlock(&mvm->mutex);
@@ -1004,6 +1000,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
PRINT_MVM_REF(IWL_MVM_REF_USER);
+ PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -1108,9 +1105,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
- .open = iwl_dbgfs_fw_error_dump_open,
- .read = iwl_dbgfs_fw_error_dump_read,
- .release = iwl_dbgfs_fw_error_dump_release,
+ .open = iwl_dbgfs_fw_error_dump_open,
+ .read = iwl_dbgfs_fw_error_dump_read,
+ .release = iwl_dbgfs_fw_error_dump_release,
};
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
@@ -1138,9 +1135,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
- MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
- S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+ S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 21877e5966a8..5fe82c29c8ad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -141,7 +141,8 @@ enum iwl_bt_coex_lut_type {
BT_COEX_TX_DIS_LUT,
BT_COEX_MAX_LUT,
-};
+ BT_COEX_INVALID_LUT = 0xff,
+}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
#define BT_COEX_LUT_SIZE (12)
#define BT_COEX_CORUN_LUT_SIZE (32)
@@ -154,19 +155,23 @@ enum iwl_bt_coex_lut_type {
* @flags:&enum iwl_bt_coex_flags
* @max_kill:
* @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @bt4_antenna_isolation:
- * @bt4_antenna_isolation_thr:
- * @bt4_tx_tx_delta_freq_thr:
- * @bt4_tx_rx_max_freq0:
- * @bt_prio_boost:
+ * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ * should be set by default
+ * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ * should be set by default
+ * @bt4_antenna_isolation: antenna isolation
+ * @bt4_antenna_isolation_thr: antenna threshold value
+ * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
+ * @bt4_tx_rx_max_freq0: TxRx max frequency
+ * @bt_prio_boost: BT priority boost registers
* @wifi_tx_prio_boost: SW boost of wifi tx priority
* @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @decision_lut:
- * @bt4_multiprio_lut:
- * @bt4_corun_lut20:
- * @bt4_corun_lut40:
+ * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
+ * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
+ * @decision_lut: PTA decision LUT, per Prio-Ch
+ * @bt4_multiprio_lut: multi priority LUT configuration
+ * @bt4_corun_lut20: co-running 20 MHz LUT configuration
+ * @bt4_corun_lut40: co-running 40 MHz LUT configuration
* @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
*
* The structure is used for the BT_COEX command.
@@ -175,7 +180,8 @@ struct iwl_bt_coex_cmd {
__le32 flags;
u8 max_kill;
u8 bt_reduced_tx_power;
- u8 reserved[2];
+ u8 override_primary_lut;
+ u8 override_secondary_lut;
u8 bt4_antenna_isolation;
u8 bt4_antenna_isolation_thr;
@@ -194,7 +200,7 @@ struct iwl_bt_coex_cmd {
__le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
__le32 valid_bit_msk;
-} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+} __packed; /* BT_COEX_CMD_API_S_VER_5 */
/**
* struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
@@ -282,7 +288,7 @@ enum iwl_bt_activity_grading {
BT_ON_NO_CONNECTION = 1,
BT_LOW_TRAFFIC = 2,
BT_HIGH_TRAFFIC = 3,
-};
+}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
/**
* struct iwl_bt_coex_profile_notif - notification about BT coex
@@ -310,7 +316,7 @@ struct iwl_bt_coex_profile_notif {
__le32 primary_ch_lut;
__le32 secondary_ch_lut;
__le32 bt_activity_grading;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
enum iwl_bt_coex_prio_table_event {
BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 10fcc1a79ebd..13696fe419b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -345,21 +345,6 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
-struct iwl_wowlan_status_v4 {
- __le64 replay_ctr;
- __le16 pattern_number;
- __le16 non_qos_seq_ctr;
- __le16 qos_seq_ctr[8];
- __le32 wakeup_reasons;
- __le32 rekey_status;
- __le32 num_of_gtk_rekeys;
- __le32 transmitted_ndps;
- __le32 received_beacons;
- __le32 wake_packet_length;
- __le32 wake_packet_bufsize;
- u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
-
struct iwl_wowlan_gtk_status {
u8 key_index;
u8 reserved[3];
@@ -368,7 +353,7 @@ struct iwl_wowlan_gtk_status {
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
} __packed;
-struct iwl_wowlan_status_v6 {
+struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk;
__le64 replay_ctr;
__le16 pattern_number;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 39148b5bb332..8bb5b94bf963 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -334,7 +334,7 @@ enum {
*/
struct iwl_lq_cmd {
u8 sta_id;
- u8 reserved1;
+ u8 reduced_tpc;
u16 control;
/* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
u8 flags;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index d73a89ecd78a..6959fda3fe09 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -169,8 +169,12 @@ enum iwl_scan_type {
SCAN_TYPE_DISCOVERY_FORCED = 6,
}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
-/* Maximal number of channels to scan */
-#define MAX_NUM_SCAN_CHANNELS 0x24
+/**
+ * Maximal number of channels to scan
+ * it should be equal to:
+ * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
+ */
+#define MAX_NUM_SCAN_CHANNELS 50
/**
* struct iwl_scan_cmd - scan request command
@@ -534,13 +538,16 @@ struct iwl_scan_offload_schedule {
*
* IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
* IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
- * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
- * on A band.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
+ * beacon period. Finding channel activity in this mode is not guaranteed.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
+ * Assuming beacon period is 100ms finding channel activity is guaranteed.
*/
enum iwl_scan_offload_flags {
IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
- IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
+ IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
+ IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
};
/**
@@ -563,17 +570,24 @@ enum iwl_scan_offload_compleate_status {
IWL_SCAN_OFFLOAD_ABORTED = 2,
};
+enum iwl_scan_ebs_status {
+ IWL_SCAN_EBS_SUCCESS,
+ IWL_SCAN_EBS_FAILED,
+ IWL_SCAN_EBS_CHAN_NOT_FOUND,
+};
+
/**
* iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: enum iwl_scan_offload_compleate_status
+ * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
*/
struct iwl_scan_offload_complete {
u8 last_schedule_line;
u8 last_schedule_iteration;
u8 status;
- u8 reserved;
+ u8 ebs_status;
} __packed;
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index d63647867262..39cebee8016f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -255,22 +255,19 @@ struct iwl_mvm_keyinfo {
} __packed;
/**
- * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
+ * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
- * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
- * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
- * sent
+ * @awake_acs:
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @mac_id_n_color: the Mac context this station belongs to
* @addr[ETH_ALEN]: station's MAC address
* @sta_id: index of station in uCode's station table
* @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
* alone. 1 - modify, 0 - don't change.
- * @key: look at %iwl_mvm_keyinfo
* @station_flags: look at %iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed
- * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
- * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
* Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
* add_immediate_ba_ssn.
@@ -294,40 +291,7 @@ struct iwl_mvm_keyinfo {
* ADD_STA sets up the table entry for one station, either creating a new
* entry, or modifying a pre-existing one.
*/
-struct iwl_mvm_add_sta_cmd_v5 {
- u8 add_modify;
- u8 unicast_tx_key_id;
- u8 multicast_tx_key_id;
- u8 reserved1;
- __le32 mac_id_n_color;
- u8 addr[ETH_ALEN];
- __le16 reserved2;
- u8 sta_id;
- u8 modify_mask;
- __le16 reserved3;
- struct iwl_mvm_keyinfo key;
- __le32 station_flags;
- __le32 station_flags_msk;
- __le16 tid_disable_tx;
- __le16 reserved4;
- u8 add_immediate_ba_tid;
- u8 remove_immediate_ba_tid;
- __le16 add_immediate_ba_ssn;
- __le16 sleep_tx_count;
- __le16 sleep_state_flags;
- __le16 assoc_id;
- __le16 beamform_flags;
- __le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_5 */
-
-/**
- * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
- * VER_7 of this command is quite similar to VER_5 except
- * exclusion of all fields related to the security key installation.
- * It only differs from VER_6 by the "awake_acs" field that is
- * reserved and ignored in VER_6.
- */
-struct iwl_mvm_add_sta_cmd_v7 {
+struct iwl_mvm_add_sta_cmd {
u8 add_modify;
u8 awake_acs;
__le16 tid_disable_tx;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 8e122f3a7a74..6cc5f52b807f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -482,7 +482,8 @@ struct iwl_mvm_tx_resp {
u8 pa_integ_res_b[3];
u8 pa_integ_res_c[3];
__le16 measurement_req_id;
- __le16 reserved;
+ u8 reduced_tpc;
+ u8 reserved;
__le32 tfd_info;
__le16 seq_ctl;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 6e75b52588de..309a9b9a94fe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -71,6 +71,7 @@
#include "fw-api-power.h"
#include "fw-api-d3.h"
#include "fw-api-coex.h"
+#include "fw-api-scan.h"
/* maximal number of Tx queues in any platform */
#define IWL_MVM_MAX_QUEUES 20
@@ -604,52 +605,7 @@ enum {
TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
-
-/**
- * struct iwl_time_event_cmd_api_v1 - configuring Time Events
- * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
- * with version 2. determined by IWL_UCODE_TLV_FLAGS)
- * ( TIME_EVENT_CMD = 0x29 )
- * @id_and_color: ID and color of the relevant MAC
- * @action: action to perform, one of FW_CTXT_ACTION_*
- * @id: this field has two meanings, depending on the action:
- * If the action is ADD, then it means the type of event to add.
- * For all other actions it is the unique event ID assigned when the
- * event was added by the FW.
- * @apply_time: When to start the Time Event (in GP2)
- * @max_delay: maximum delay to event's start (apply time), in TU
- * @depends_on: the unique ID of the event we depend on (if any)
- * @interval: interval between repetitions, in TU
- * @interval_reciprocal: 2^32 / interval
- * @duration: duration of event in TU
- * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
- * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
- * and TE_V1_EVENT_SOCIOPATHIC
- * @is_present: 0 or 1, are we present or absent during the Time Event
- * @max_frags: maximal number of fragments the Time Event can be divided to
- * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
- */
-struct iwl_time_event_cmd_v1 {
- /* COMMON_INDEX_HDR_API_S_VER_1 */
- __le32 id_and_color;
- __le32 action;
- __le32 id;
- /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
- __le32 apply_time;
- __le32 max_delay;
- __le32 dep_policy;
- __le32 depends_on;
- __le32 is_present;
- __le32 max_frags;
- __le32 interval;
- __le32 interval_reciprocal;
- __le32 duration;
- __le32 repeat;
- __le32 notify;
-} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
-
-
-/* Time event - defines for command API v2 */
+/* Time event - defines for command API */
/*
* @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
@@ -680,7 +636,7 @@ enum {
#define TE_V2_PLACEMENT_POS 12
#define TE_V2_ABSENCE_POS 15
-/* Time event policy values (for time event cmd api v2)
+/* Time event policy values
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
@@ -727,7 +683,7 @@ enum {
};
/**
- * struct iwl_time_event_cmd_api_v2 - configuring Time Events
+ * struct iwl_time_event_cmd_api - configuring Time Events
* with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
* with version 1. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
@@ -750,7 +706,7 @@ enum {
* TE_EVENT_SOCIOPATHIC
* using TE_ABSENCE and using TE_NOTIF_*
*/
-struct iwl_time_event_cmd_v2 {
+struct iwl_time_event_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 7ce20062f32d..883e702152d5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -99,7 +99,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
};
IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
sizeof(tx_ant_cmd), &tx_ant_cmd);
}
@@ -137,6 +137,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
mvm->umac_error_event_table =
le32_to_cpu(palive2->error_info_addr);
+ mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
+ mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
alive_data->valid = le16_to_cpu(palive2->status) ==
IWL_ALIVE_STATUS_OK;
@@ -180,6 +182,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
int ret, i;
enum iwl_ucode_type old_type = mvm->cur_ucode;
static const u8 alive_cmd[] = { MVM_ALIVE };
+ struct iwl_sf_region st_fwrd_space;
fw = iwl_get_ucode_image(mvm, ucode_type);
if (WARN_ON(!fw))
@@ -215,6 +218,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return -EIO;
}
+ /*
+ * update the sdio allocation according to the pointer we get in the
+ * alive notification.
+ */
+ st_fwrd_space.addr = mvm->sf_space.addr;
+ st_fwrd_space.size = mvm->sf_space.size;
+ ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
+
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
@@ -256,7 +267,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
phy_cfg_cmd.phy_cfg);
- return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}
@@ -288,14 +299,14 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
goto error;
}
- ret = iwl_send_bt_prio_tbl(mvm);
+ ret = iwl_send_bt_init_conf(mvm);
if (ret)
goto error;
/* Read the NVM only at driver load time, no need to do this twice */
if (read_nvm) {
/* Read nvm */
- ret = iwl_nvm_init(mvm);
+ ret = iwl_nvm_init(mvm, true);
if (ret) {
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
goto error;
@@ -303,7 +314,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
}
/* In case we read the NVM from external file, load it to the NIC */
- if (iwlwifi_mod_params.nvm_file)
+ if (mvm->nvm_file_name)
iwl_mvm_load_nvm_to_nic(mvm);
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
@@ -424,10 +435,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
- ret = iwl_send_bt_prio_tbl(mvm);
- if (ret)
- goto error;
-
ret = iwl_send_bt_init_conf(mvm);
if (ret)
goto error;
@@ -468,12 +475,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
- ret = iwl_power_legacy_set_cam_mode(mvm);
- if (ret)
- goto error;
- }
-
ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 9ccec10bba16..8b5302777632 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,12 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- /* Don't use cts to self as the fw doesn't support it currently. */
if (vif->bss_conf.use_cts_prot) {
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
- cmd->protection_flags |=
- cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
}
IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
vif->bss_conf.use_cts_prot,
@@ -688,7 +685,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
struct iwl_mac_ctx_cmd *cmd)
{
- int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+ int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
sizeof(*cmd), cmd);
if (ret)
IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
@@ -696,19 +693,39 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
return ret;
}
-/*
- * Fill the specific data for mac context of type station or p2p client
- */
-static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_mac_data_sta *ctxt_sta,
- bool force_assoc_off)
+static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action, bool force_assoc_off)
{
+ struct iwl_mac_ctx_cmd cmd = {};
+ struct iwl_mac_data_sta *ctxt_sta;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ if (vif->p2p) {
+ struct ieee80211_p2p_noa_attr *noa =
+ &vif->bss_conf.p2p_noa_attr;
+
+ cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+ ctxt_sta = &cmd.p2p_sta.sta;
+ } else {
+ ctxt_sta = &cmd.sta;
+ }
+
/* We need the dtim_period to set the MAC as associated */
if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
!force_assoc_off) {
u32 dtim_offs;
+ /* Allow beacons to pass through as long as we are not
+ * associated, or we do not have dtim period information.
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+
/*
* The DTIM count counts down, so when it is N that means N
* more beacon intervals happen until the DTIM TBTT. Therefore
@@ -755,51 +772,6 @@ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
-}
-
-static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 action)
-{
- struct iwl_mac_ctx_cmd cmd = {};
-
- WARN_ON(vif->type != NL80211_IFTYPE_STATION || vif->p2p);
-
- /* Fill the common data for all mac context types */
- iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
-
- /* Allow beacons to pass through as long as we are not associated,or we
- * do not have dtim period information */
- if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period)
- cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
- else
- cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
-
- /* Fill the data specific for station mode */
- iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta,
- action == FW_CTXT_ACTION_ADD);
-
- return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
-}
-
-static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 action)
-{
- struct iwl_mac_ctx_cmd cmd = {};
- struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
-
- WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p);
-
- /* Fill the common data for all mac context types */
- iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
-
- /* Fill the data specific for station mode */
- iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta,
- action == FW_CTXT_ACTION_ADD);
-
- cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
- IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
@@ -1137,16 +1109,12 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
}
static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u32 action)
+ u32 action, bool force_assoc_off)
{
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- if (!vif->p2p)
- return iwl_mvm_mac_ctxt_cmd_station(mvm, vif,
- action);
- else
- return iwl_mvm_mac_ctxt_cmd_p2p_client(mvm, vif,
- action);
+ return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
+ force_assoc_off);
break;
case NL80211_IFTYPE_AP:
if (!vif->p2p)
@@ -1176,7 +1144,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
vif->addr, ieee80211_vif_type_p2p(vif)))
return -EIO;
- ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD);
+ ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
+ true);
if (ret)
return ret;
@@ -1187,7 +1156,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return 0;
}
-int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1195,7 +1165,8 @@ int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
vif->addr, ieee80211_vif_type_p2p(vif)))
return -EIO;
- return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY);
+ return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
+ force_assoc_off);
}
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1214,7 +1185,7 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->color));
cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
- ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
sizeof(cmd), &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
@@ -1240,11 +1211,23 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
u32 rate __maybe_unused =
le32_to_cpu(beacon->beacon_notify_hdr.initial_rate);
+ lockdep_assert_held(&mvm->mutex);
+
IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX rate:%d\n",
status & TX_STATUS_MSK,
beacon->beacon_notify_hdr.failure_frame,
le64_to_cpu(beacon->tsf),
rate);
+
+ if (unlikely(mvm->csa_vif && mvm->csa_vif->csa_active)) {
+ if (!ieee80211_csa_is_complete(mvm->csa_vif)) {
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm->csa_vif);
+ } else {
+ ieee80211_csa_finish(mvm->csa_vif);
+ mvm->csa_vif = NULL;
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 8735ef1f44ae..7215f5980186 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -295,7 +295,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
+ IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 &&
+ !iwlwifi_mod_params.uapsd_disable) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -309,11 +311,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
-
- /* IBSS has bugs in older versions */
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
- hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
@@ -322,6 +321,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_CSA_FLOW)
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -365,14 +367,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
- hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
- hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
- /* we create the 802.11 header and zero length SSID IE. */
- hw->wiphy->max_sched_scan_ie_len =
- SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
- }
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_P2P_GO_OPPPS;
@@ -386,7 +385,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#ifdef CONFIG_PM_SLEEP
- if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ if (iwl_mvm_is_d0i3_supported(mvm) &&
+ device_can_wakeup(mvm->trans->dev)) {
+ mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
+ hw->wiphy->wowlan = &mvm->wowlan;
+ } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
mvm->trans->ops->d3_suspend &&
mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
@@ -540,13 +543,22 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
return -EACCES;
/* return from D0i3 before starting a new Tx aggregation */
- if (action == IEEE80211_AMPDU_TX_START) {
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
tx_agg_ref = true;
/*
- * wait synchronously until D0i3 exit to get the correct
- * sequence number for the tid
+ * for tx start, wait synchronously until D0i3 exit to
+ * get the correct sequence number for the tid.
+ * additionally, some other ampdu actions use direct
+ * target access, which is not handled automatically
+ * by the trans layer (unlike commands), so wait for
+ * d0i3 exit in these cases as well.
*/
if (!wait_event_timeout(mvm->d0i3_exit_waitq,
!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
@@ -554,6 +566,9 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
return -EIO;
}
+ break;
+ default:
+ break;
}
mutex_lock(&mvm->mutex);
@@ -758,7 +773,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
.pwr_restriction = cpu_to_le16(tx_power),
};
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
sizeof(reduce_txpwr_cmd),
&reduce_txpwr_cmd);
}
@@ -817,18 +832,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_release;
- ret = iwl_mvm_power_update_mac(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm);
if (ret)
goto out_release;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
if (ret)
goto out_remove_mac;
- if (!mvm->bf_allowed_vif && false &&
- vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
+ if (!mvm->bf_allowed_vif &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
mvm->bf_allowed_vif = mvmvif;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -969,7 +983,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release:
@@ -1223,10 +1237,14 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
return 0;
+ /* bcast filtering isn't supported for P2P client */
+ if (vif->p2p)
+ return 0;
+
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
return 0;
- return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
sizeof(cmd), &cmd);
}
#else
@@ -1253,7 +1271,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
if (ret)
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
@@ -1333,10 +1351,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
iwl_mvm_sf_update(mvm, vif, false);
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
- ret = iwl_mvm_power_update_mac(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
@@ -1347,16 +1365,19 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
if (changes & BSS_CHANGED_CQM) {
- IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
+ IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
/* reset cqm events tracking */
mvmvif->bf_data.last_cqm_event = 0;
- ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
- if (ret)
- IWL_ERR(mvm, "failed to update CQM thresholds\n");
+ if (mvmvif->bf_data.bf_enabled) {
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to update CQM thresholds\n");
+ }
}
if (changes & BSS_CHANGED_ARP_FILTER) {
- IWL_DEBUG_MAC80211(mvm, "arp filter changed");
+ IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
iwl_mvm_configure_bcast_filter(mvm, vif);
}
}
@@ -1402,7 +1423,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
mvmvif->ap_ibss_active = true;
/* power updated needs to be done before quotas */
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
ret = iwl_mvm_update_quotas(mvm, vif);
if (ret)
@@ -1410,7 +1431,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
- iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
@@ -1420,7 +1441,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
return 0;
out_quota_failed:
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
@@ -1450,13 +1471,13 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
- iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
iwl_mvm_update_quotas(mvm, NULL);
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
iwl_mvm_binding_remove_vif(mvm, vif);
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -1477,7 +1498,7 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
BSS_CHANGED_BANDWIDTH) &&
- iwl_mvm_mac_ctxt_changed(mvm, vif))
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false))
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
/* Need to send a new beacon template to the FW */
@@ -1495,6 +1516,9 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
+ iwl_mvm_sched_scan_stop(mvm, true);
+
switch (vif->type) {
case NL80211_IFTYPE_STATION:
iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
@@ -1525,7 +1549,7 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
switch (mvm->scan_status) {
case IWL_MVM_SCAN_SCHED:
- ret = iwl_mvm_sched_scan_stop(mvm);
+ ret = iwl_mvm_sched_scan_stop(mvm, true);
if (ret) {
ret = -EBUSY;
goto out;
@@ -1697,6 +1721,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_add_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
+ /*
+ * EBS may be disabled due to previous failures reported by FW.
+ * Reset EBS status here assuming environment has been changed.
+ */
+ mvm->last_ebs_successful = true;
ret = 0;
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
@@ -1708,14 +1737,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
- if (vif->bss_conf.dtim_period)
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
- CMD_SYNC));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* disable beacon filtering */
- WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
+ WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
@@ -1772,7 +1799,7 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
int ret;
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
mutex_unlock(&mvm->mutex);
return ret;
}
@@ -1865,7 +1892,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
int ret;
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_sched_scan_stop(mvm);
+ ret = iwl_mvm_sched_scan_stop(mvm, false);
mutex_unlock(&mvm->mutex);
iwl_mvm_wait_for_async_handlers(mvm);
@@ -2161,10 +2188,10 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
return;
mutex_lock(&mvm->mutex);
+ iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
- iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -2184,6 +2211,11 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_AP:
+ /* Unless it's a CSA flow we have nothing to do here */
+ if (vif->csa_active) {
+ mvmvif->ap_ibss_active = true;
+ break;
+ }
case NL80211_IFTYPE_ADHOC:
/*
* The AP binding flow is handled as part of the start_ap flow
@@ -2207,7 +2239,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
* Power state must be updated before quotas,
* otherwise fw will complain.
*/
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
/* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
@@ -2220,11 +2252,17 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out_remove_binding;
}
+ /* Handle binding during CSA */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ iwl_mvm_update_quotas(mvm, vif);
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false);
+ }
+
goto out_unlock;
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
out_unlock:
mutex_unlock(&mvm->mutex);
if (ret)
@@ -2244,22 +2282,29 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
switch (vif->type) {
- case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
goto out_unlock;
case NL80211_IFTYPE_MONITOR:
mvmvif->monitor_active = false;
iwl_mvm_update_quotas(mvm, NULL);
break;
+ case NL80211_IFTYPE_AP:
+ /* This part is triggered only during CSA */
+ if (!vif->csa_active || !mvmvif->ap_ibss_active)
+ goto out_unlock;
+
+ mvmvif->ap_ibss_active = false;
+ iwl_mvm_update_quotas(mvm, NULL);
+ /*TODO: bt_coex notification here? */
default:
break;
}
iwl_mvm_binding_remove_vif(mvm, vif);
- iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mvmvif->phy_ctxt = NULL;
+ iwl_mvm_power_update_mac(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -2323,9 +2368,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif,
- CMD_SYNC);
- return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
+ return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
}
return -EOPNOTSUPP;
@@ -2346,6 +2390,53 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
+static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ if (WARN(mvm->csa_vif && mvm->csa_vif->csa_active,
+ "Another CSA is already in progress"))
+ goto out_unlock;
+
+ IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n",
+ chandef->center_freq1);
+ mvm->csa_vif = vif;
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u32 queues, bool drop)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_sta *mvmsta;
+
+ if (!vif || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ mutex_lock(&mvm->mutex);
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
+
+ if (WARN_ON_ONCE(!mvmsta))
+ goto done;
+
+ if (drop) {
+ if (iwl_mvm_flush_tx_path(mvm, mvmsta->tfd_queue_msk, true))
+ IWL_ERR(mvm, "flush request fail\n");
+ } else {
+ iwl_trans_wait_tx_queue_empty(mvm->trans,
+ mvmsta->tfd_queue_msk);
+ }
+done:
+ mutex_unlock(&mvm->mutex);
+}
+
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -2369,6 +2460,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .flush = iwl_mvm_mac_flush,
.sched_scan_start = iwl_mvm_mac_sched_scan_start,
.sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
.set_key = iwl_mvm_mac_set_key,
@@ -2388,6 +2480,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.set_tim = iwl_mvm_set_tim,
+ .channel_switch_beacon = iwl_mvm_channel_switch_beacon,
+
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index f1ec0986c3c9..fcc6c29482d0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -164,7 +164,6 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
- MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@ -177,7 +176,6 @@ struct iwl_dbgfs_pm {
u32 tx_data_timeout;
bool skip_over_dtim;
u8 skip_dtim_periods;
- bool disable_power_off;
bool lprx_ena;
u32 lprx_rssi_threshold;
bool snooze_ena;
@@ -232,6 +230,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_USER,
IWL_MVM_REF_TX,
IWL_MVM_REF_TX_AGG,
+ IWL_MVM_REF_EXIT_WORK,
IWL_MVM_REF_COUNT,
};
@@ -265,6 +264,7 @@ struct iwl_mvm_vif_bf_data {
* @uploaded: indicates the MAC context has been added to the device
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
* should get quota etc.
+ * @pm_enabled - Indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
* @low_latency: indicates that this interface is in low-latency mode
@@ -283,6 +283,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_ibss_active;
+ bool pm_enabled;
bool monitor_active;
bool low_latency;
struct iwl_mvm_vif_bf_data bf_data;
@@ -451,6 +452,11 @@ struct iwl_mvm_frame_stats {
int last_frame_idx;
};
+enum {
+ D0I3_DEFER_WAKEUP,
+ D0I3_PENDING_WAKEUP,
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -484,6 +490,7 @@ struct iwl_mvm {
u32 log_event_table;
u32 umac_error_event_table;
bool support_umac_log;
+ struct iwl_sf_region sf_space;
u32 ampdu_ref;
@@ -495,6 +502,7 @@ struct iwl_mvm {
u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
+ const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
@@ -535,6 +543,8 @@ struct iwl_mvm {
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
+ bool last_ebs_successful;
+
u8 scan_last_antenna_idx; /* to toggle TX between antennas */
u8 mgmt_last_antenna_idx;
@@ -578,8 +588,12 @@ struct iwl_mvm {
void *fw_error_dump;
void *fw_error_sram;
u32 fw_error_sram_len;
+ u32 *fw_error_rxf;
+ u32 fw_error_rxf_len;
+#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
+#endif
struct ieee80211_vif *p2p_device_vif;
@@ -601,6 +615,9 @@ struct iwl_mvm {
bool d0i3_offloading;
struct work_struct d0i3_exit_work;
struct sk_buff_head d0i3_tx;
+ /* protect d0i3_suspend_flags */
+ struct mutex d0i3_suspend_mutex;
+ unsigned long d0i3_suspend_flags;
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq;
@@ -629,8 +646,8 @@ struct iwl_mvm {
/* Indicate if device power save is allowed */
bool ps_disabled;
- /* Indicate if device power management is allowed */
- bool pm_disabled;
+
+ struct ieee80211_vif *csa_vif;
};
/* Extract MVM priv from op_mode and _hw */
@@ -705,6 +722,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
#endif
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -745,7 +763,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
/* NVM */
-int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
int iwl_mvm_up(struct iwl_mvm *mvm);
@@ -796,7 +814,8 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off);
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
@@ -840,7 +859,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
-int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify);
int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
@@ -874,10 +893,8 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
int rs_pretty_print_rate(char *buf, const u32 rate);
/* power management */
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
-
int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
-int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
char *buf, int bufsz);
@@ -886,8 +903,18 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+#ifdef CONFIG_IWLWIFI_LEDS
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
+#else
+static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+}
+#endif
/* D3 (WoWLAN, NetDetect) */
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
@@ -922,9 +949,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
/* BT Coex */
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@@ -936,9 +963,10 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum ieee80211_band band);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
-int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
enum iwl_bt_kill_msk {
BT_KILL_MSK_DEFAULT,
@@ -969,17 +997,11 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 flags);
-int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool enable);
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool force,
- u32 flags);
-
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
enum ieee80211_smps_mode smps_request);
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
/* Low latency */
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index cf2d09f53782..808f78f6fbf9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -74,6 +74,12 @@
#define NVM_WRITE_OPCODE 1
#define NVM_READ_OPCODE 0
+/* load nvm chunk response */
+enum {
+ READ_NVM_CHUNK_SUCCEED = 0,
+ READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
+};
+
/*
* prepare the NVM host command w/ the pointers to the nvm buffer
* and send it to fw
@@ -90,7 +96,7 @@ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
struct iwl_host_cmd cmd = {
.id = NVM_ACCESS_CMD,
.len = { sizeof(struct iwl_nvm_access_cmd), length },
- .flags = CMD_SYNC | CMD_SEND_IN_RFKILL,
+ .flags = CMD_SEND_IN_RFKILL,
.data = { &nvm_access_cmd, data },
/* data may come from vmalloc, so use _DUP */
.dataflags = { 0, IWL_HCMD_DFL_DUP },
@@ -112,7 +118,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = NVM_ACCESS_CMD,
- .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
.data = { &nvm_access_cmd, },
};
int ret, bytes_read, offset_read;
@@ -139,10 +145,26 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
offset_read = le16_to_cpu(nvm_resp->offset);
resp_data = nvm_resp->data;
if (ret) {
- IWL_ERR(mvm,
- "NVM access command failed with status %d (device: %s)\n",
- ret, mvm->cfg->name);
- ret = -EINVAL;
+ if ((offset != 0) &&
+ (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
+ /*
+ * meaning of NOT_VALID_ADDRESS:
+ * driver try to read chunk from address that is
+ * multiple of 2K and got an error since addr is empty.
+ * meaning of (offset != 0): driver already
+ * read valid data from another chunk so this case
+ * is not an error.
+ */
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
+ offset);
+ ret = 0;
+ } else {
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM access command failed with status %d (device: %s)\n",
+ ret, mvm->cfg->name);
+ ret = -EIO;
+ }
goto exit;
}
@@ -211,9 +233,9 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
while (ret == length) {
ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
if (ret < 0) {
- IWL_ERR(mvm,
- "Cannot read NVM from section %d offset %d, length %d\n",
- section, offset, length);
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "Cannot read NVM from section %d offset %d, length %d\n",
+ section, offset, length);
return ret;
}
offset += ret;
@@ -238,13 +260,20 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
return NULL;
}
} else {
+ /* SW and REGULATORY sections are mandatory */
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data ||
!mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
IWL_ERR(mvm,
"Can't parse empty family 8000 NVM sections\n");
return NULL;
}
+ /* MAC_OVERRIDE or at least HW section must exist */
+ if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
+ !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
+ IWL_ERR(mvm,
+ "Can't parse mac_address, empty sections\n");
+ return NULL;
+ }
}
if (WARN_ON(!mvm->cfg))
@@ -311,16 +340,16 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
* get here after that we assume the NVM request can be satisfied
* synchronously.
*/
- ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file,
+ ret = request_firmware(&fw_entry, mvm->nvm_file_name,
mvm->trans->dev);
if (ret) {
IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
- iwlwifi_mod_params.nvm_file, ret);
+ mvm->nvm_file_name, ret);
return ret;
}
IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
- iwlwifi_mod_params.nvm_file, fw_entry->size);
+ mvm->nvm_file_name, fw_entry->size);
if (fw_entry->size < sizeof(*file_sec)) {
IWL_ERR(mvm, "NVM file too small\n");
@@ -427,53 +456,28 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
return ret;
}
-int iwl_nvm_init(struct iwl_mvm *mvm)
+int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
{
- int ret, i, section;
+ int ret, section;
u8 *nvm_buffer, *temp;
- int nvm_to_read[NVM_MAX_NUM_SECTIONS];
- int num_of_sections_to_read;
if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
return -EINVAL;
- /* load external NVM if configured */
- if (iwlwifi_mod_params.nvm_file) {
- /* move to External NVM flow */
- ret = iwl_mvm_read_external_nvm(mvm);
- if (ret)
- return ret;
- } else {
- /* list of NVM sections we are allowed/need to read */
- if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
- nvm_to_read[0] = mvm->cfg->nvm_hw_section_num;
- nvm_to_read[1] = NVM_SECTION_TYPE_SW;
- nvm_to_read[2] = NVM_SECTION_TYPE_CALIBRATION;
- nvm_to_read[3] = NVM_SECTION_TYPE_PRODUCTION;
- num_of_sections_to_read = 4;
- } else {
- nvm_to_read[0] = NVM_SECTION_TYPE_SW;
- nvm_to_read[1] = NVM_SECTION_TYPE_CALIBRATION;
- nvm_to_read[2] = NVM_SECTION_TYPE_PRODUCTION;
- nvm_to_read[3] = NVM_SECTION_TYPE_REGULATORY;
- nvm_to_read[4] = NVM_SECTION_TYPE_MAC_OVERRIDE;
- num_of_sections_to_read = 5;
- }
-
+ /* load NVM values from nic */
+ if (read_nvm_from_nic) {
/* Read From FW NVM */
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
- /* TODO: find correct NVM max size for a section */
nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
GFP_KERNEL);
if (!nvm_buffer)
return -ENOMEM;
- for (i = 0; i < num_of_sections_to_read; i++) {
- section = nvm_to_read[i];
+ for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
/* we override the constness for initial read */
ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
if (ret < 0)
- break;
+ continue;
temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
if (!temp) {
ret = -ENOMEM;
@@ -502,15 +506,21 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_hw_blob.size = ret;
break;
}
- WARN(1, "section: %d", section);
}
#endif
}
kfree(nvm_buffer);
- if (ret < 0)
+ }
+
+ /* load external NVM if configured */
+ if (mvm->nvm_file_name) {
+ /* move to External NVM flow */
+ ret = iwl_mvm_read_external_nvm(mvm);
+ if (ret)
return ret;
}
+ /* parse the relevant nvm sections */
mvm->nvm_data = iwl_parse_nvm_sections(mvm);
if (!mvm->nvm_data)
return -ENODATA;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 9545d7fdd4bf..cc2f7de396de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -79,8 +79,8 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw-api-scan.h"
-#include "fw-error-dump.h"
#include "time-event.h"
+#include "iwl-fw-error-dump.h"
/*
* module name, copyright, version, etc.
@@ -220,7 +220,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
- RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
+ RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true),
RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
iwl_mvm_rx_ant_coupling_notif, true),
@@ -402,6 +402,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->sf_state = SF_UNINIT;
mutex_init(&mvm->mutex);
+ mutex_init(&mvm->d0i3_suspend_mutex);
spin_lock_init(&mvm->async_handlers_lock);
INIT_LIST_HEAD(&mvm->time_event_list);
INIT_LIST_HEAD(&mvm->async_handlers_list);
@@ -465,13 +466,24 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_tt_initialize(mvm, min_backoff);
+ /* set the nvm_file_name according to priority */
+ if (iwlwifi_mod_params.nvm_file)
+ mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
+ else
+ mvm->nvm_file_name = mvm->cfg->default_nvm_file;
+
+ if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
+ "not allowing power-up and not having nvm_file\n"))
+ goto out_free;
/*
- * If the NVM exists in an external file,
- * there is no need to unnecessarily power up the NIC at driver load
+ * Even if nvm exists in the nvm_file driver should read agin the nvm
+ * from the nic because there might be entries that exist in the OTP
+ * and not in the file.
+ * for nics with no_power_up_nic_in_init: rely completley on nvm_file
*/
- if (iwlwifi_mod_params.nvm_file) {
- err = iwl_nvm_init(mvm);
+ if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
+ err = iwl_nvm_init(mvm, false);
if (err)
goto out_free;
} else {
@@ -518,7 +530,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_free:
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
- if (!iwlwifi_mod_params.nvm_file)
+ if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
iwl_trans_op_mode_leave(trans);
ieee80211_free_hw(mvm->hw);
return NULL;
@@ -538,6 +550,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
kfree(mvm->scan_cmd);
vfree(mvm->fw_error_dump);
kfree(mvm->fw_error_sram);
+ kfree(mvm->fw_error_rxf);
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
@@ -814,6 +827,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
u32 file_len;
+ u32 trans_len;
lockdep_assert_held(&mvm->mutex);
@@ -821,8 +835,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
return;
file_len = mvm->fw_error_sram_len +
+ mvm->fw_error_rxf_len +
sizeof(*dump_file) +
- sizeof(*dump_data);
+ sizeof(*dump_data) * 2;
+
+ trans_len = iwl_trans_dump_data(mvm->trans, NULL, 0);
+ if (trans_len)
+ file_len += trans_len;
dump_file = vmalloc(file_len);
if (!dump_file)
@@ -833,7 +852,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_file->file_len = cpu_to_le32(file_len);
dump_data = (void *)dump_file->data;
- dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+ dump_data->len = cpu_to_le32(mvm->fw_error_rxf_len);
+ memcpy(dump_data->data, mvm->fw_error_rxf, mvm->fw_error_rxf_len);
+
+ dump_data = iwl_mvm_fw_error_next_data(dump_data);
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
/*
@@ -842,6 +866,23 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
* mvm->fw_error_sram right now.
*/
memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
+
+ kfree(mvm->fw_error_rxf);
+ mvm->fw_error_rxf = NULL;
+ mvm->fw_error_rxf_len = 0;
+
+ kfree(mvm->fw_error_sram);
+ mvm->fw_error_sram = NULL;
+ mvm->fw_error_sram_len = 0;
+
+ if (trans_len) {
+ void *buf = iwl_mvm_fw_error_next_data(dump_data);
+ u32 real_trans_len = iwl_trans_dump_data(mvm->trans, buf,
+ trans_len);
+ dump_data = (void *)((u8 *)buf + real_trans_len);
+ dump_file->file_len =
+ cpu_to_le32(file_len - trans_len + real_trans_len);
+ }
}
#endif
@@ -853,6 +894,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_fw_error_sram_dump(mvm);
+ iwl_mvm_fw_error_rxf_dump(mvm);
#endif
iwl_mvm_nic_restart(mvm);
@@ -1126,9 +1168,9 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
struct iwl_host_cmd get_status_cmd = {
.id = WOWLAN_GET_STATUSES,
- .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
+ .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
};
- struct iwl_wowlan_status_v6 *status;
+ struct iwl_wowlan_status *status;
int ret;
u32 disconnection_reasons, wakeup_reasons;
__le16 *qos_seq = NULL;
@@ -1158,18 +1200,27 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
iwl_free_resp(&get_status_cmd);
out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
CMD_WAKE_UP_TRANS;
int ret;
IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
+ IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
+ __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ return 0;
+ }
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
if (ret)
goto out;
@@ -1183,6 +1234,25 @@ out:
return ret;
}
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
+ return _iwl_mvm_exit_d0i3(mvm);
+}
+
+static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
+}
+
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
@@ -1196,4 +1266,5 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
.nic_config = iwl_mvm_nic_config,
.enter_d0i3 = iwl_mvm_enter_d0i3,
.exit_d0i3 = iwl_mvm_exit_d0i3,
+ .napi_add = iwl_mvm_napi_add,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index 237efe0ac1c4..539f3a942d43 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -156,6 +156,18 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
idle_cnt = chains_static;
active_cnt = chains_dynamic;
+ /* In scenarios where we only ever use a single-stream rates,
+ * i.e. legacy 11b/g/a associations, single-stream APs or even
+ * static SMPS, enable both chains to get diversity, improving
+ * the case where we're far enough from the AP that attenuation
+ * between the two antennas is sufficiently different to impact
+ * performance.
+ */
+ if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
+ idle_cnt = 2;
+ active_cnt = 2;
+ }
+
cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
PHY_RX_CHAIN_VALID_POS);
cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@@ -187,7 +199,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
chains_static, chains_dynamic);
- ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
sizeof(struct iwl_phy_context_cmd),
&cmd);
if (ret)
@@ -202,18 +214,15 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
- int ret;
-
WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
ctxt->ref);
lockdep_assert_held(&mvm->mutex);
ctxt->channel = chandef->chan;
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
- chains_static, chains_dynamic,
- FW_CTXT_ACTION_ADD, 0);
- return ret;
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_ADD, 0);
}
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 6b636eab3339..c182a8baf685 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -123,28 +123,6 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
}
-int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool enable)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = cpu_to_le32(1),
- .ba_enable_beacon_abort = cpu_to_le32(enable),
- };
-
- if (!mvmvif->bf_data.bf_enabled)
- return 0;
-
- if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
- cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
-
- mvmvif->bf_data.ba_enabled = enable;
- iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
- iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
-}
-
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
struct iwl_mac_power_cmd *cmd)
{
@@ -268,6 +246,57 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
+static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ unsigned long *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_AP)
+ __set_bit(mvmvif->phy_ctxt->id, data);
+}
+
+static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned long phy_ctxt_counter = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_binding_iterator,
+ &phy_ctxt_counter);
+
+ if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN))
+ return false;
+
+ if (vif->p2p &&
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
+ return false;
+ /*
+ * Avoid using uAPSD if P2P client is associated to GO that uses
+ * opportunistic power save. This is due to current FW limitation.
+ */
+ if (vif->p2p &&
+ (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT))
+ return false;
+
+ /*
+ * Avoid using uAPSD if client is in DCM -
+ * low latency issue in Miracast
+ */
+ if (hweight8(phy_ctxt_counter) >= 2)
+ return false;
+
+ return true;
+}
+
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
@@ -280,7 +309,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
- bool allow_uapsd = true;
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
@@ -303,13 +331,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
- mvm->pm_disabled)
+ !mvmvif->pm_enabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -351,23 +374,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
}
- if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
- ETH_ALEN))
- allow_uapsd = false;
-
- if (vif->p2p &&
- !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
- allow_uapsd = false;
- /*
- * Avoid using uAPSD if P2P client is associated to GO that uses
- * opportunistic power save. This is due to current FW limitation.
- */
- if (vif->p2p &&
- vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
- IEEE80211_P2P_OPPPS_ENABLE_BIT)
- allow_uapsd = false;
-
- if (allow_uapsd)
+ if (iwl_mvm_power_allow_uapsd(mvm, vif))
iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -421,20 +428,13 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
{
struct iwl_mac_power_cmd cmd = {};
- if (vif->type != NL80211_IFTYPE_STATION)
- return 0;
-
- if (vif->p2p &&
- !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
- return 0;
-
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
- return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
sizeof(cmd), &cmd);
}
@@ -444,12 +444,6 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
};
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
- return 0;
-
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
- return 0;
-
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
mvm->ps_disabled = true;
@@ -466,7 +460,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
"Sending device power command with flags = 0x%X\n",
cmd.flags);
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd),
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
&cmd);
}
@@ -508,86 +502,69 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
return 0;
}
-struct iwl_power_constraint {
+struct iwl_power_vifs {
struct ieee80211_vif *bf_vif;
struct ieee80211_vif *bss_vif;
struct ieee80211_vif *p2p_vif;
- u16 bss_phyctx_id;
- u16 p2p_phyctx_id;
- bool pm_disabled;
- bool ps_disabled;
- struct iwl_mvm *mvm;
+ struct ieee80211_vif *ap_vif;
+ struct ieee80211_vif *monitor_vif;
+ bool p2p_active;
+ bool bss_active;
+ bool ap_active;
+ bool monitor_active;
};
static void iwl_mvm_power_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_power_constraint *power_iterator = _data;
- struct iwl_mvm *mvm = power_iterator->mvm;
+ struct iwl_power_vifs *power_iterator = _data;
+ mvmvif->pm_enabled = false;
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
break;
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP:
- /* no BSS power mgmt if we have an active AP */
- if (mvmvif->ap_ibss_active)
- power_iterator->pm_disabled = true;
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->ap_vif);
+ power_iterator->ap_vif = vif;
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->ap_active = true;
break;
case NL80211_IFTYPE_MONITOR:
- /* no BSS power mgmt and no device power save */
- power_iterator->pm_disabled = true;
- power_iterator->ps_disabled = true;
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->monitor_vif);
+ power_iterator->monitor_vif = vif;
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->monitor_active = true;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- if (mvmvif->phy_ctxt)
- power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
-
- /* we should have only one P2P vif */
+ /* only a single MAC of the same type */
WARN_ON(power_iterator->p2p_vif);
power_iterator->p2p_vif = vif;
-
- IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
- power_iterator->p2p_phyctx_id,
- power_iterator->bss_phyctx_id);
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
- /* no BSS power mgmt if we have a P2P client*/
- power_iterator->pm_disabled = true;
- } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
- power_iterator->bss_phyctx_id < MAX_PHYS &&
- power_iterator->p2p_phyctx_id ==
- power_iterator->bss_phyctx_id) {
- power_iterator->pm_disabled = true;
- }
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->p2p_active = true;
break;
case NL80211_IFTYPE_STATION:
- if (mvmvif->phy_ctxt)
- power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
-
- /* we should have only one BSS vif */
+ /* only a single MAC of the same type */
WARN_ON(power_iterator->bss_vif);
power_iterator->bss_vif = vif;
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->bss_active = true;
if (mvmvif->bf_data.bf_enabled &&
!WARN_ON(power_iterator->bf_vif))
power_iterator->bf_vif = vif;
- IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
- power_iterator->p2p_phyctx_id,
- power_iterator->bss_phyctx_id);
- if (mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
- (power_iterator->p2p_phyctx_id < MAX_PHYS &&
- power_iterator->bss_phyctx_id < MAX_PHYS &&
- power_iterator->p2p_phyctx_id ==
- power_iterator->bss_phyctx_id))
- power_iterator->pm_disabled = true;
break;
default:
@@ -596,70 +573,73 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
}
static void
-iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
- struct iwl_power_constraint *constraint)
+iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
+ struct iwl_power_vifs *vifs)
{
- lockdep_assert_held(&mvm->mutex);
+ struct iwl_mvm_vif *bss_mvmvif = NULL;
+ struct iwl_mvm_vif *p2p_mvmvif = NULL;
+ struct iwl_mvm_vif *ap_mvmvif = NULL;
+ bool client_same_channel = false;
+ bool ap_same_channel = false;
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
- constraint->pm_disabled = true;
- constraint->ps_disabled = true;
- }
+ lockdep_assert_held(&mvm->mutex);
+ /* get vifs info + set pm_enable to false */
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_iterator, constraint);
-}
-
-int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_power_constraint constraint = {
- .p2p_phyctx_id = MAX_PHYS,
- .bss_phyctx_id = MAX_PHYS,
- .mvm = mvm,
- };
- bool ba_enable;
- int ret;
+ iwl_mvm_power_iterator, vifs);
- lockdep_assert_held(&mvm->mutex);
+ if (vifs->bss_vif)
+ bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
- return 0;
+ if (vifs->p2p_vif)
+ p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
- iwl_mvm_power_get_global_constraint(mvm, &constraint);
- mvm->ps_disabled = constraint.ps_disabled;
- mvm->pm_disabled = constraint.pm_disabled;
+ if (vifs->ap_vif)
+ ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
- /* don't update device power state unless we add / remove monitor */
- if (vif->type == NL80211_IFTYPE_MONITOR) {
- ret = iwl_mvm_power_update_device(mvm);
- if (ret)
- return ret;
+ /* enable PM on bss if bss stand alone */
+ if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+ bss_mvmvif->pm_enabled = true;
+ return;
}
- if (constraint.bss_vif) {
- ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
- if (ret)
- return ret;
+ /* enable PM on p2p if p2p stand alone */
+ if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+ p2p_mvmvif->pm_enabled = true;
+ return;
}
- if (constraint.p2p_vif) {
- ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
- if (ret)
- return ret;
+ if (vifs->bss_active && vifs->p2p_active)
+ client_same_channel = (bss_mvmvif->phy_ctxt->id ==
+ p2p_mvmvif->phy_ctxt->id);
+ if (vifs->bss_active && vifs->ap_active)
+ ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
+ ap_mvmvif->phy_ctxt->id);
+
+ /* clients are not stand alone: enable PM if DCM */
+ if (!(client_same_channel || ap_same_channel) &&
+ (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+ if (vifs->bss_active)
+ bss_mvmvif->pm_enabled = true;
+ if (vifs->p2p_active &&
+ (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
+ p2p_mvmvif->pm_enabled = true;
+ return;
}
- if (!constraint.bf_vif)
- return 0;
-
- vif = constraint.bf_vif;
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
- !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
-
- return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
+ /*
+ * There is only one channel in the system and there are only
+ * bss and p2p clients that share it
+ */
+ if (client_same_channel && !vifs->ap_active &&
+ (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
+ /* share same channel*/
+ bss_mvmvif->pm_enabled = true;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+ p2p_mvmvif->pm_enabled = true;
+ }
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -671,19 +651,10 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct iwl_mac_power_cmd cmd = {};
int pos = 0;
- if (WARN_ON(!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
- return 0;
-
mutex_lock(&mvm->mutex);
memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
mutex_unlock(&mvm->mutex);
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -790,7 +761,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (mvmvif != mvm->bf_allowed_vif ||
+ if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
@@ -818,6 +789,26 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
}
+static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ if (!mvmvif->bf_data.bf_enabled)
+ return 0;
+
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
+ cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+ mvmvif->bf_data.ba_enabled = enable;
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
+}
+
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 flags)
@@ -826,8 +817,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
- vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
@@ -838,6 +828,55 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_power_vifs vifs = {};
+ bool ba_enable;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_power_set_pm(mvm, &vifs);
+
+ /* disable PS if CAM */
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+ mvm->ps_disabled = true;
+ } else {
+ /* don't update device power state unless we add / remove monitor */
+ if (vifs.monitor_vif) {
+ if (vifs.monitor_active)
+ mvm->ps_disabled = true;
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (vifs.bss_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (vifs.p2p_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (!vifs.bf_vif)
+ return 0;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vifs.bf_vif);
+
+ ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
+ !vifs.bf_vif->bss_conf.ps ||
+ iwl_mvm_vif_low_latency(mvmvif));
+
+ return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable);
+}
+
int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool enable, u32 flags)
@@ -861,9 +900,10 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
if (WARN_ON(!dtimper_msec))
return 0;
- cmd.flags |=
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
cmd.skip_dtim_periods = 300 / dtimper_msec;
+ if (cmd.skip_dtim_periods)
+ cmd.flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -894,33 +934,3 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
return ret;
}
-
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool force,
- u32 flags)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (mvmvif != mvm->bf_allowed_vif)
- return 0;
-
- if (!mvmvif->bf_data.bf_enabled) {
- /* disable beacon filtering explicitly if force is true */
- if (force)
- return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
- return 0;
- }
-
- return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
-}
-
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
-{
- struct iwl_powertable_cmd cmd = {
- .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
- };
-
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
-}
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 35e86e06dffd..ba68d7b84505 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -285,7 +285,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
- ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index e1c838899363..306a6caa4868 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -211,7 +211,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
.next_columns = {
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
@@ -223,8 +223,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_B,
.next_columns = {
RS_COLUMN_LEGACY_ANT_A,
- RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
@@ -238,10 +238,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_A_SGI,
- RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -254,10 +254,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_B_SGI,
- RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -271,10 +271,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_MIMO2,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -289,10 +289,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_MIMO2,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -304,12 +304,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_AB,
.next_columns = {
RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_SISO_ANT_A_SGI,
- RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
@@ -321,12 +321,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
.sgi = true,
.next_columns = {
RS_COLUMN_SISO_ANT_A_SGI,
- RS_COLUMN_SISO_ANT_B_SGI,
- RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
@@ -527,6 +527,9 @@ static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&tbl->win[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
+ rs_rate_scale_clear_window(&tbl->tpc_win[i]);
}
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -656,17 +659,34 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
return 0;
}
-static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes)
+static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes,
+ u8 reduced_txp)
{
struct iwl_rate_scale_data *window = NULL;
+ int ret;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL;
+ if (tbl->column != RS_COLUMN_INVALID) {
+ lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
+ lq_sta->tx_stats[tbl->column][scale_index].success += successes;
+ }
+
/* Select window for current tx bit rate */
window = &(tbl->win[scale_index]);
+ ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+ window);
+ if (ret)
+ return ret;
+
+ if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+ return -EINVAL;
+
+ window = &tbl->tpc_win[reduced_txp];
return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
window);
}
@@ -1000,6 +1020,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
u32 ucode_rate;
struct rs_rate rate;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@@ -1141,9 +1162,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
ucode_rate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
- rs_collect_tx_data(curr_tbl, rate.index,
+ rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
info->status.ampdu_len,
- info->status.ampdu_ack_len);
+ info->status.ampdu_ack_len,
+ reduced_txp);
/* Update success/fail counts if not searching for new mode */
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1176,8 +1198,9 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
else
continue;
- rs_collect_tx_data(tmp_tbl, rate.index, 1,
- i < retries ? 0 : legacy_success);
+ rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
+ i < retries ? 0 : legacy_success,
+ reduced_txp);
}
/* Update success/fail counts if not searching for new mode */
@@ -1188,6 +1211,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
}
/* The last TX rate is cached in lq_sta; it's set in if/else above */
lq_sta->last_rate_n_flags = ucode_rate;
+ IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
done:
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
@@ -1311,105 +1335,50 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
}
-/*
- * Find starting rate for new "search" high-throughput mode of modulation.
- * Goal is to find lowest expected rate (under perfect conditions) that is
- * above the current measured throughput of "active" mode, to give new mode
- * a fair chance to prove itself without too many challenges.
- *
- * This gets called when transitioning to more aggressive modulation
- * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
- * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
- * to decrease to match "active" throughput. When moving from MIMO to SISO,
- * bit rate will typically need to increase, but not if performance was bad.
- */
static s32 rs_get_best_rate(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl, /* "search" */
- u16 rate_mask, s8 index)
+ unsigned long rate_mask, s8 index)
{
- /* "active" values */
struct iwl_scale_tbl_info *active_tbl =
&(lq_sta->lq_info[lq_sta->active_tbl]);
- s32 active_sr = active_tbl->win[index].success_ratio;
- s32 active_tpt = active_tbl->expected_tpt[index];
- /* expected "search" throughput */
+ s32 success_ratio = active_tbl->win[index].success_ratio;
+ u16 expected_current_tpt = active_tbl->expected_tpt[index];
const u16 *tpt_tbl = tbl->expected_tpt;
-
- s32 new_rate, high, low, start_hi;
u16 high_low;
- s8 rate = index;
-
- new_rate = high = low = start_hi = IWL_RATE_INVALID;
-
- while (1) {
- high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
- tbl->rate.type);
-
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
+ u32 target_tpt;
+ int rate_idx;
- /*
- * Lower the "search" bit rate, to give new "search" mode
- * approximately the same throughput as "active" if:
- *
- * 1) "Active" mode has been working modestly well (but not
- * great), and expected "search" throughput (under perfect
- * conditions) at candidate rate is above the actual
- * measured "active" throughput (but less than expected
- * "active" throughput under perfect conditions).
- * OR
- * 2) "Active" mode has been working perfectly or very well
- * and expected "search" throughput (under perfect
- * conditions) at candidate rate is above expected
- * "active" throughput (under perfect conditions).
- */
- if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
- ((active_sr > RS_SR_FORCE_DECREASE) &&
- (active_sr <= IWL_RATE_HIGH_TH) &&
- (tpt_tbl[rate] <= active_tpt))) ||
- ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
- (tpt_tbl[rate] > active_tpt))) {
- /* (2nd or later pass)
- * If we've already tried to raise the rate, and are
- * now trying to lower it, use the higher rate. */
- if (start_hi != IWL_RATE_INVALID) {
- new_rate = start_hi;
- break;
- }
-
- new_rate = rate;
+ if (success_ratio > RS_SR_NO_DECREASE) {
+ target_tpt = 100 * expected_current_tpt;
+ IWL_DEBUG_RATE(mvm,
+ "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
+ success_ratio, target_tpt);
+ } else {
+ target_tpt = lq_sta->last_tpt;
+ IWL_DEBUG_RATE(mvm,
+ "SR %d not thag good. Find rate exceeding ACTUAL_TPT %d\n",
+ success_ratio, target_tpt);
+ }
- /* Loop again with lower rate */
- if (low != IWL_RATE_INVALID)
- rate = low;
+ rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
- /* Lower rate not available, use the original */
- else
- break;
-
- /* Else try to raise the "search" rate to match "active" */
- } else {
- /* (2nd or later pass)
- * If we've already tried to lower the rate, and are
- * now trying to raise it, use the lower rate. */
- if (new_rate != IWL_RATE_INVALID)
- break;
+ while (rate_idx != IWL_RATE_INVALID) {
+ if (target_tpt < (100 * tpt_tbl[rate_idx]))
+ break;
- /* Loop again with higher rate */
- else if (high != IWL_RATE_INVALID) {
- start_hi = high;
- rate = high;
+ high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
+ tbl->rate.type);
- /* Higher rate not available, use the original */
- } else {
- new_rate = rate;
- break;
- }
- }
+ rate_idx = (high_low >> 8) & 0xff;
}
- return new_rate;
+ IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
+ rate_idx, target_tpt,
+ rate_idx != IWL_RATE_INVALID ?
+ 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
+
+ return rate_idx;
}
static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
@@ -1584,7 +1553,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
tpt = lq_sta->last_tpt / 100;
expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
- tbl->rate.bw);
+ rs_bw_from_sta_bw(sta));
if (WARN_ON_ONCE(!expected_tpt_tbl))
continue;
@@ -1625,7 +1594,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u16 rate_mask = 0;
+ unsigned long rate_mask = 0;
u32 rate_idx = 0;
memcpy(search_tbl, tbl, sz);
@@ -1667,7 +1636,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
!(BIT(rate_idx) & rate_mask)) {
IWL_DEBUG_RATE(mvm,
"can not switch with index %d"
- " rate mask %x\n",
+ " rate mask %lx\n",
rate_idx, rate_mask);
goto err;
@@ -1769,6 +1738,203 @@ out:
return action;
}
+static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
+ int *weaker, int *stronger)
+{
+ *weaker = index + TPC_TX_POWER_STEP;
+ if (*weaker > TPC_MAX_REDUCTION)
+ *weaker = TPC_INVALID;
+
+ *stronger = index - TPC_TX_POWER_STEP;
+ if (*stronger < 0)
+ *stronger = TPC_INVALID;
+}
+
+static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct rs_rate *rate, enum ieee80211_band band)
+{
+ int index = rate->index;
+ bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
+ bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
+ !vif->bss_conf.ps);
+
+ IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
+ cam, sta_ps_disabled);
+ /*
+ * allow tpc only if power management is enabled, or bt coex
+ * activity grade allows it and we are on 2.4Ghz.
+ */
+ if ((cam || sta_ps_disabled) &&
+ !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
+ return false;
+
+ IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
+ if (is_legacy(rate))
+ return index == IWL_RATE_54M_INDEX;
+ if (is_ht(rate))
+ return index == IWL_RATE_MCS_7_INDEX;
+ if (is_vht(rate))
+ return index == IWL_RATE_MCS_7_INDEX ||
+ index == IWL_RATE_MCS_8_INDEX ||
+ index == IWL_RATE_MCS_9_INDEX;
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+enum tpc_action {
+ TPC_ACTION_STAY,
+ TPC_ACTION_DECREASE,
+ TPC_ACTION_INCREASE,
+ TPC_ACTION_NO_RESTIRCTION,
+};
+
+static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
+ s32 sr, int weak, int strong,
+ int current_tpt,
+ int weak_tpt, int strong_tpt)
+{
+ /* stay until we have valid tpt */
+ if (current_tpt == IWL_INVALID_VALUE) {
+ IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
+ return TPC_ACTION_STAY;
+ }
+
+ /* Too many failures, increase txp */
+ if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
+ IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
+ return TPC_ACTION_NO_RESTIRCTION;
+ }
+
+ /* try decreasing first if applicable */
+ if (weak != TPC_INVALID) {
+ if (weak_tpt == IWL_INVALID_VALUE &&
+ (strong_tpt == IWL_INVALID_VALUE ||
+ current_tpt >= strong_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "no weak txp measurement. decrease txp\n");
+ return TPC_ACTION_DECREASE;
+ }
+
+ if (weak_tpt > current_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "lower txp has better tpt. decrease txp\n");
+ return TPC_ACTION_DECREASE;
+ }
+ }
+
+ /* next, increase if needed */
+ if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
+ if (weak_tpt == IWL_INVALID_VALUE &&
+ strong_tpt != IWL_INVALID_VALUE &&
+ current_tpt < strong_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "higher txp has better tpt. increase txp\n");
+ return TPC_ACTION_INCREASE;
+ }
+
+ if (weak_tpt < current_tpt &&
+ (strong_tpt == IWL_INVALID_VALUE ||
+ strong_tpt > current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "lower txp has worse tpt. increase txp\n");
+ return TPC_ACTION_INCREASE;
+ }
+ }
+
+ IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
+ return TPC_ACTION_STAY;
+}
+
+static bool rs_tpc_perform(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct ieee80211_vif *vif = mvm_sta->vif;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum ieee80211_band band;
+ struct iwl_rate_scale_data *window;
+ struct rs_rate *rate = &tbl->rate;
+ enum tpc_action action;
+ s32 sr;
+ u8 cur = lq_sta->lq.reduced_tpc;
+ int current_tpt;
+ int weak, strong;
+ int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+ IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
+ lq_sta->dbg_fixed_txp_reduction);
+ lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
+ return cur != lq_sta->dbg_fixed_txp_reduction;
+ }
+#endif
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (WARN_ON(!chanctx_conf))
+ band = IEEE80211_NUM_BANDS;
+ else
+ band = chanctx_conf->def.chan->band;
+ rcu_read_unlock();
+
+ if (!rs_tpc_allowed(mvm, vif, rate, band)) {
+ IWL_DEBUG_RATE(mvm,
+ "tpc is not allowed. remove txp restrictions\n");
+ lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ return cur != TPC_NO_REDUCTION;
+ }
+
+ rs_get_adjacent_txp(mvm, cur, &weak, &strong);
+
+ /* Collect measured throughputs for current and adjacent rates */
+ window = tbl->tpc_win;
+ sr = window[cur].success_ratio;
+ current_tpt = window[cur].average_tpt;
+ if (weak != TPC_INVALID)
+ weak_tpt = window[weak].average_tpt;
+ if (strong != TPC_INVALID)
+ strong_tpt = window[strong].average_tpt;
+
+ IWL_DEBUG_RATE(mvm,
+ "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
+ cur, current_tpt, sr, weak, strong,
+ weak_tpt, strong_tpt);
+
+ action = rs_get_tpc_action(mvm, sr, weak, strong,
+ current_tpt, weak_tpt, strong_tpt);
+
+ /* override actions if we are on the edge */
+ if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
+ IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
+ action = TPC_ACTION_STAY;
+ } else if (strong == TPC_INVALID &&
+ (action == TPC_ACTION_INCREASE ||
+ action == TPC_ACTION_NO_RESTIRCTION)) {
+ IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
+ action = TPC_ACTION_STAY;
+ }
+
+ switch (action) {
+ case TPC_ACTION_DECREASE:
+ lq_sta->lq.reduced_tpc = weak;
+ return true;
+ case TPC_ACTION_INCREASE:
+ lq_sta->lq.reduced_tpc = strong;
+ return true;
+ case TPC_ACTION_NO_RESTIRCTION:
+ lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ return true;
+ case TPC_ACTION_STAY:
+ /* do nothing */
+ break;
+ }
+ return false;
+}
+
/*
* Do rate scaling and search for new modulation mode.
*/
@@ -2019,6 +2185,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
break;
case RS_ACTION_STAY:
/* No change */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
+ update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
+ break;
default:
break;
}
@@ -2271,10 +2440,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
if (i == IWL_RATE_9M_INDEX)
continue;
- /* Disable MCS9 as a workaround */
- if (i == IWL_RATE_MCS_9_INDEX)
- continue;
-
/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
if (i == IWL_RATE_MCS_9_INDEX &&
sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2293,10 +2458,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
if (i == IWL_RATE_9M_INDEX)
continue;
- /* Disable MCS9 as a workaround */
- if (i == IWL_RATE_MCS_9_INDEX)
- continue;
-
/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
if (i == IWL_RATE_MCS_9_INDEX &&
sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2478,6 +2639,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->is_agg = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->dbg_fixed_rate = 0;
+ lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@ -2653,6 +2815,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
rs_build_rates_table_from_fixed(mvm, lq_cmd,
lq_sta->band,
lq_sta->dbg_fixed_rate);
+ lq_cmd->reduced_tpc = 0;
ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
RATE_MCS_ANT_POS;
} else
@@ -2783,7 +2946,6 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
size_t buf_size;
u32 parsed_rate;
-
mvm = lq_sta->drv;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
@@ -2856,6 +3018,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->lq.agg_disable_start_th,
lq_sta->lq.agg_frame_cnt_limit);
+ desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
desc += sprintf(buff+desc,
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
lq_sta->lq.initial_rate_index[0],
@@ -2928,6 +3091,94 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.llseek = default_llseek,
};
+static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static const char * const column_name[] = {
+ [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
+ [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
+ [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
+ [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
+ [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
+ [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
+ [RS_COLUMN_MIMO2] = "MIMO2",
+ [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
+ };
+
+ static const char * const rate_name[] = {
+ [IWL_RATE_1M_INDEX] = "1M",
+ [IWL_RATE_2M_INDEX] = "2M",
+ [IWL_RATE_5M_INDEX] = "5.5M",
+ [IWL_RATE_11M_INDEX] = "11M",
+ [IWL_RATE_6M_INDEX] = "6M|MCS0",
+ [IWL_RATE_9M_INDEX] = "9M",
+ [IWL_RATE_12M_INDEX] = "12M|MCS1",
+ [IWL_RATE_18M_INDEX] = "18M|MCS2",
+ [IWL_RATE_24M_INDEX] = "24M|MCS3",
+ [IWL_RATE_36M_INDEX] = "36M|MCS4",
+ [IWL_RATE_48M_INDEX] = "48M|MCS5",
+ [IWL_RATE_54M_INDEX] = "54M|MCS6",
+ [IWL_RATE_MCS_7_INDEX] = "MCS7",
+ [IWL_RATE_MCS_8_INDEX] = "MCS8",
+ [IWL_RATE_MCS_9_INDEX] = "MCS9",
+ };
+
+ char *buff, *pos, *endpos;
+ int col, rate;
+ ssize_t ret;
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct rs_rate_stats *stats;
+ static const size_t bufsz = 1024;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos, "COLUMN,");
+ for (rate = 0; rate < IWL_RATE_COUNT; rate++)
+ pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
+ pos += scnprintf(pos, endpos - pos, "\n");
+
+ for (col = 0; col < RS_COLUMN_COUNT; col++) {
+ pos += scnprintf(pos, endpos - pos,
+ "%s,", column_name[col]);
+
+ for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
+ stats = &(lq_sta->tx_stats[col][rate]);
+ pos += scnprintf(pos, endpos - pos,
+ "%llu/%llu,",
+ stats->success,
+ stats->total);
+ }
+ pos += scnprintf(pos, endpos - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+ return ret;
+}
+
+static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
+
+ return count;
+}
+
+static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
+ .read = rs_sta_dbgfs_drv_tx_stats_read,
+ .write = rs_sta_dbgfs_drv_tx_stats_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -2937,9 +3188,15 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
+ debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
&lq_sta->tx_agg_tid_en);
+ lq_sta->rs_sta_dbgfs_reduced_txp_file =
+ debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->dbg_fixed_txp_reduction);
}
static void rs_remove_debugfs(void *mvm, void *mvm_sta)
@@ -2947,7 +3204,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
struct iwl_lq_sta *lq_sta = mvm_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 0acfac96a56c..374a83d7db25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -158,6 +158,13 @@ enum {
#define RS_SR_FORCE_DECREASE 1920 /* 15% */
#define RS_SR_NO_DECREASE 10880 /* 85% */
+#define TPC_SR_FORCE_INCREASE 9600 /* 75% */
+#define TPC_SR_NO_INCREASE 10880 /* 85% */
+#define TPC_TX_POWER_STEP 3
+#define TPC_MAX_REDUCTION 15
+#define TPC_NO_REDUCTION 0
+#define TPC_INVALID 0xff
+
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
@@ -266,9 +273,16 @@ enum rs_column {
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
RS_COLUMN_INVALID,
};
+/* Packet stats per rate */
+struct rs_rate_stats {
+ u64 success;
+ u64 total;
+};
+
/**
* struct iwl_scale_tbl_info -- tx params and success history for all rates
*
@@ -280,6 +294,8 @@ struct iwl_scale_tbl_info {
enum rs_column column;
const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+ /* per txpower-reduction history */
+ struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
};
enum {
@@ -315,6 +331,8 @@ struct iwl_lq_sta {
bool is_vht;
enum ieee80211_band band;
+ struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
+
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
unsigned long active_legacy_rate;
unsigned long active_siso_rate;
@@ -334,8 +352,11 @@ struct iwl_lq_sta {
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_drv_tx_stats_file;
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ struct dentry *rs_sta_dbgfs_reduced_txp_file;
u32 dbg_fixed_rate;
+ u8 dbg_fixed_txp_reduction;
#endif
struct iwl_mvm *drv;
@@ -345,6 +366,9 @@ struct iwl_lq_sta {
u32 last_rate_n_flags;
/* packets destined for this STA are aggregated */
u8 is_agg;
+
+ /* tx power reduce for this sta */
+ int tpc_reduce;
};
/* Initialize station's rate scaling information after adding station */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 6061553a5e44..cf7276967acd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -60,7 +60,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "iwl-trans.h"
-
#include "mvm.h"
#include "fw-api.h"
@@ -130,42 +129,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx_ni(mvm->hw, skb);
-}
-
-static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
- struct iwl_rx_phy_info *phy_info,
- struct ieee80211_rx_status *rx_status)
-{
- int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
- u32 agc_a, agc_b;
- u32 val;
-
- val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
- agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
- agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
-
- val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
- rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
- rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
-
- /*
- * dBm = rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal.
- */
- rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
- rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
- max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
-
- IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
- rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
-
- rx_status->signal = max_rssi_dbm;
- rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
- RX_RES_PHY_FLAGS_ANTENNA)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
- rx_status->chain_signal[0] = rssi_a_dbm;
- rx_status->chain_signal[1] = rssi_b_dbm;
+ ieee80211_rx(mvm->hw, skb);
}
/*
@@ -337,10 +301,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
*/
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
- iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
- else
- iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
+ iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
(unsigned long long)rx_status.mactime);
@@ -394,6 +355,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status.flag |= RX_FLAG_VHT;
rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status.vht_flag |= RX_VHT_FLAG_BF;
} else {
rx_status.rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c28de54c75d4..4b6c7d4bd199 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -306,7 +306,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
.id = SCAN_REQUEST_CMD,
.len = { 0, },
.data = { mvm->scan_cmd, },
- .flags = CMD_SYNC,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_cmd *cmd = mvm->scan_cmd;
@@ -319,7 +318,10 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
- BUG_ON(mvm->scan_cmd == NULL);
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(mvm->scan_cmd == NULL))
+ return -ENOMEM;
IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
mvm->scan_status = IWL_MVM_SCAN_OS;
@@ -514,7 +516,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
ARRAY_SIZE(scan_abort_notif),
iwl_mvm_scan_abort_notif, NULL);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
/* mac80211's state will be cleaned in the nic_restart flow */
@@ -538,9 +540,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
/* scan status must be locked for proper checking */
lockdep_assert_held(&mvm->mutex);
- IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+ IWL_DEBUG_SCAN(mvm,
+ "Scheduled scan completed, status %s EBS status %s:%d\n",
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- "completed" : "aborted");
+ "completed" : "aborted", scan_notif->ebs_status ==
+ IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
+ scan_notif->ebs_status);
+
/* only call mac80211 completion if the stop was initiated by FW */
if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
@@ -548,6 +554,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
ieee80211_sched_scan_stopped(mvm->hw);
}
+ mvm->last_ebs_successful = !scan_notif->ebs_status;
+
return 0;
}
@@ -740,7 +748,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
struct iwl_scan_offload_cfg *scan_cfg;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_CONFIG_CMD,
- .flags = CMD_SYNC,
};
struct iwl_mvm_scan_params params = {};
@@ -798,7 +805,6 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct iwl_scan_offload_blacklist *blacklist;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
- .flags = CMD_SYNC,
.len[1] = sizeof(*profile_cfg),
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
@@ -884,7 +890,12 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
}
- return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
+ if (mvm->last_ebs_successful &&
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
+ scan_req.flags |=
+ cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
+
+ return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
sizeof(scan_req), &scan_req);
}
@@ -893,7 +904,6 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
int ret;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_ABORT_CMD,
- .flags = CMD_SYNC,
};
u32 status;
@@ -922,7 +932,7 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
return ret;
}
-int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify)
{
int ret;
struct iwl_notification_wait wait_scan_done;
@@ -960,5 +970,8 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
*/
mvm->scan_status = IWL_MVM_SCAN_NONE;
+ if (notify)
+ ieee80211_sched_scan_stopped(mvm->hw);
+
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 88809b2d1654..7edfd15efc9d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -237,9 +237,6 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
.sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
};
- if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
- return 0;
-
/*
* Ignore the call if we are in HW Restart flow, or if the handled
* vif is a p2p device.
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index f339ef884250..1fb01ea2e704 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,115 +66,6 @@
#include "sta.h"
#include "rs.h"
-static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
- struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
-{
- memset(cmd_v5, 0, sizeof(*cmd_v5));
-
- cmd_v5->add_modify = cmd_v7->add_modify;
- cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
- cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
- memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
- cmd_v5->sta_id = cmd_v7->sta_id;
- cmd_v5->modify_mask = cmd_v7->modify_mask;
- cmd_v5->station_flags = cmd_v7->station_flags;
- cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
- cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
- cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
- cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
- cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
- cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
- cmd_v5->assoc_id = cmd_v7->assoc_id;
- cmd_v5->beamform_flags = cmd_v7->beamform_flags;
- cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
-}
-
-static void
-iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
- struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
- u32 mac_id_n_color)
-{
- memset(sta_cmd, 0, sizeof(*sta_cmd));
-
- sta_cmd->sta_id = key_cmd->sta_id;
- sta_cmd->add_modify = STA_MODE_MODIFY;
- sta_cmd->modify_mask = STA_MODIFY_KEY;
- sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
-
- sta_cmd->key.key_offset = key_cmd->key_offset;
- sta_cmd->key.key_flags = key_cmd->key_flags;
- memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
- sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
- memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
- sizeof(sta_cmd->key.tkip_rx_ttak));
-}
-
-static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_cmd_v7 *cmd,
- int *status)
-{
- struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
- cmd, status);
-
- iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
- &cmd_v5, status);
-}
-
-static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
- struct iwl_mvm_add_sta_cmd_v7 *cmd)
-{
- struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
- sizeof(*cmd), cmd);
-
- iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
- &cmd_v5);
-}
-
-static int
-iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_key_cmd *cmd,
- u32 mac_id_n_color,
- int *status)
-{
- struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
- sizeof(*cmd), cmd, status);
-
- iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
- &sta_cmd, status);
-}
-
-static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
- u32 flags,
- struct iwl_mvm_add_sta_key_cmd *cmd,
- u32 mac_id_n_color)
-{
- struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
- sizeof(*cmd), cmd);
-
- iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
- &sta_cmd);
-}
-
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
enum nl80211_iftype iftype)
{
@@ -207,7 +98,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -295,7 +186,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+ &add_sta_cmd, &status);
if (ret)
return ret;
@@ -380,7 +272,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+ struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -393,7 +285,8 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -434,7 +327,7 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
return -EINVAL;
}
- ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
sizeof(rm_sta_cmd), &rm_sta_cmd);
if (ret) {
IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
@@ -498,7 +391,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
sta_id);
continue;
}
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
clear_bit(sta_id, mvm->sta_drained);
}
@@ -520,14 +413,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
/* flush its queues here since we are freeing mvm_sta */
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
- /*
- * Put a non-NULL since the fw station isn't removed.
- * It will be removed after the MAC will be set as
- * unassoc.
- */
- rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
- ERR_PTR(-EINVAL));
-
/* if we are associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
@@ -557,7 +442,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
} else {
spin_unlock_bh(&mvm_sta->lock);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
- rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
}
return ret;
@@ -571,7 +456,7 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
return ret;
}
@@ -593,7 +478,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
{
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
sta->sta_id = IWL_MVM_STATION_COUNT;
}
@@ -603,13 +488,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd_v7 cmd;
+ struct iwl_mvm_add_sta_cmd cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
+ memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -619,7 +504,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
if (addr)
memcpy(cmd.addr, addr, ETH_ALEN);
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -753,7 +639,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+ struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -777,7 +663,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
STA_MODIFY_REMOVE_BA_TID;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -812,7 +699,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+ struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -834,7 +721,8 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -1129,12 +1017,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
u32 cmd_flags)
{
- __le16 key_flags;
struct iwl_mvm_add_sta_key_cmd cmd = {};
+ __le16 key_flags;
int ret, status;
u16 keyidx;
int i;
- u32 mac_id_n_color = mvm_sta->mac_id_n_color;
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
@@ -1166,13 +1053,12 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
- if (cmd_flags == CMD_SYNC)
- ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
- mac_id_n_color,
- &status);
+ if (cmd_flags & CMD_ASYNC)
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
+ sizeof(cmd), &cmd);
else
- ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
- mac_id_n_color);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+ &cmd, &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1225,7 +1111,7 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
remove_key ? "removing" : "installing",
igtk_cmd.sta_id);
- return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
sizeof(igtk_cmd), &igtk_cmd);
}
@@ -1312,15 +1198,15 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
- seq.tkip.iv32, p1k, CMD_SYNC);
+ seq.tkip.iv32, p1k, 0);
break;
case WLAN_CIPHER_SUITE_CCMP:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
- 0, NULL, CMD_SYNC);
+ 0, NULL, 0);
break;
default:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
- sta_id, 0, NULL, CMD_SYNC);
+ sta_id, 0, NULL, 0);
}
if (ret)
@@ -1399,9 +1285,8 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
- mvm_sta->mac_id_n_color,
- &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+ &cmd, &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1448,7 +1333,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v7 cmd = {
+ struct iwl_mvm_add_sta_cmd cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1456,7 +1341,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
@@ -1468,7 +1353,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
bool agg)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v7 cmd = {
+ struct iwl_mvm_add_sta_cmd cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1538,7 +1423,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
}
- ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 2ed84c421481..d98e8a2142b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -253,6 +253,8 @@ enum iwl_mvm_agg_state {
* This is basically (last acked packet++).
* @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @reduced_tpc: Reduced tx power. Holds the data between the
+ * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
* @state: state of the BA agreement establishment / tear down.
* @txq_id: Tx queue used by the BA session
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
@@ -265,6 +267,7 @@ struct iwl_mvm_tid_data {
u16 next_reclaimed;
/* The rest is Tx AGG related */
u32 rate_n_flags;
+ u8 reduced_tpc;
enum iwl_mvm_agg_state state;
u16 txq_id;
u16 ssn;
@@ -284,8 +287,6 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
- * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
- * by debugfs.
* @bt_reduced_txpower: is reduced tx power enabled for this station
* @next_status_eosp: the next reclaimed packet is a PS-Poll response and
* we need to signal the EOSP
@@ -306,7 +307,6 @@ struct iwl_mvm_sta {
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
- bool bt_reduced_txpower_dbg;
bool bt_reduced_txpower;
bool next_status_eosp;
spinlock_t lock;
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 61331245ad93..80100f6cc12a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -273,67 +273,10 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
return true;
}
-/* used to convert from time event API v2 to v1 */
-#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
- TE_V2_EVENT_SOCIOPATHIC)
-static inline u16 te_v2_get_notify(__le16 policy)
-{
- return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
-}
-
-static inline u16 te_v2_get_dep_policy(__le16 policy)
-{
- return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
- TE_V2_PLACEMENT_POS;
-}
-
-static inline u16 te_v2_get_absence(__le16 policy)
-{
- return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
-}
-
-static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
- struct iwl_time_event_cmd_v1 *cmd_v1)
-{
- cmd_v1->id_and_color = cmd_v2->id_and_color;
- cmd_v1->action = cmd_v2->action;
- cmd_v1->id = cmd_v2->id;
- cmd_v1->apply_time = cmd_v2->apply_time;
- cmd_v1->max_delay = cmd_v2->max_delay;
- cmd_v1->depends_on = cmd_v2->depends_on;
- cmd_v1->interval = cmd_v2->interval;
- cmd_v1->duration = cmd_v2->duration;
- if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
- cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
- else
- cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
- cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
- cmd_v1->interval_reciprocal = 0; /* unused */
-
- cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
- cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
- cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
-}
-
-static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
- const struct iwl_time_event_cmd_v2 *cmd)
-{
- struct iwl_time_event_cmd_v1 cmd_v1;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
- return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(*cmd), cmd);
-
- iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
- return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(cmd_v1), &cmd_v1);
-}
-
-
static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_time_event_data *te_data,
- struct iwl_time_event_cmd_v2 *te_cmd)
+ struct iwl_time_event_cmd *te_cmd)
{
static const u8 time_event_response[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event;
@@ -369,7 +312,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
ARRAY_SIZE(time_event_response),
iwl_mvm_time_event_response, te_data);
- ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+ sizeof(*te_cmd), te_cmd);
if (ret) {
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -397,7 +341,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd_v2 time_cmd = {};
+ struct iwl_time_event_cmd time_cmd = {};
lockdep_assert_held(&mvm->mutex);
@@ -453,7 +397,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_time_event_data *te_data)
{
- struct iwl_time_event_cmd_v2 time_cmd = {};
+ struct iwl_time_event_cmd time_cmd = {};
u32 id, uid;
int ret;
@@ -490,7 +434,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
- ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+ sizeof(time_cmd), &time_cmd);
if (WARN_ON(ret))
return;
}
@@ -510,7 +455,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd_v2 time_cmd = {};
+ struct iwl_time_event_cmd time_cmd = {};
lockdep_assert_held(&mvm->mutex);
if (te_data->running) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 7a99fa361954..868561512783 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -409,7 +409,6 @@ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
.id = REPLY_THERMAL_MNG_BACKOFF,
.len = { sizeof(u32), },
.data = { &backoff, },
- .flags = CMD_SYNC,
};
backoff = max(backoff, mvm->thermal_throttle.min_backoff);
@@ -468,13 +467,14 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
}
if (params->support_tx_backoff) {
- tx_backoff = 0;
+ tx_backoff = tt->min_backoff;
for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
if (temperature < params->tx_backoff[i].temperature)
break;
- tx_backoff = params->tx_backoff[i].backoff;
+ tx_backoff = max(tt->min_backoff,
+ params->tx_backoff[i].backoff);
}
- if (tx_backoff != 0)
+ if (tx_backoff != tt->min_backoff)
throttle_enable = true;
if (tt->tx_backoff != tx_backoff)
iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
@@ -484,7 +484,8 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
IWL_WARN(mvm,
"Due to high temperature thermal throttling initiated\n");
tt->throttle = true;
- } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
+ } else if (tt->throttle && !tt->dynamic_smps &&
+ tt->tx_backoff == tt->min_backoff &&
temperature <= params->tx_protection_exit) {
IWL_WARN(mvm,
"Temperature is back to normal thermal throttling stopped\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 879aeac46cc1..3846a6c41eb1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -636,7 +636,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
- ieee80211_tx_status_ni(mvm->hw, skb);
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] =
+ (void *)(uintptr_t)tx_resp->reduced_tpc;
+
+ ieee80211_tx_status(mvm->hw, skb);
}
if (txq_id >= mvm->first_agg_queue) {
@@ -815,6 +819,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
+ mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
}
rcu_read_unlock();
@@ -928,6 +933,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
info->status.ampdu_len = ba_notif->txed;
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
info);
+ info->status.status_driver_data[0] =
+ (void *)(uintptr_t)tid_data->reduced_tpc;
}
}
@@ -937,7 +944,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status_ni(mvm->hw, skb);
+ ieee80211_tx_status(mvm->hw, skb);
}
return 0;
@@ -951,7 +958,7 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
};
- u32 flags = sync ? CMD_SYNC : CMD_ASYNC;
+ u32 flags = sync ? 0 : CMD_ASYNC;
ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
sizeof(flush_cmd), &flush_cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 2180902266ae..aa9fc77e8413 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -64,6 +64,7 @@
#include "iwl-debug.h"
#include "iwl-io.h"
+#include "iwl-prph.h"
#include "mvm.h"
#include "fw-api-rs.h"
@@ -143,7 +144,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
"cmd flags %x", cmd->flags))
return -EINVAL;
- cmd->flags |= CMD_SYNC | CMD_WANT_SKB;
+ cmd->flags |= CMD_WANT_SKB;
ret = iwl_trans_send_cmd(mvm->trans, cmd);
if (ret == -ERFKILL) {
@@ -469,6 +470,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
mvm->status, table.valid);
}
+ /* Do not change this output - scripts rely on it */
+
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@ -516,13 +519,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
{
const struct fw_img *img;
u32 ofs, sram_len;
void *sram;
- if (!mvm->ucode_loaded || mvm->fw_error_sram)
+ if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
return;
img = &mvm->fw->img[mvm->cur_ucode];
@@ -538,6 +542,48 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
mvm->fw_error_sram_len = sram_len;
}
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
+{
+ int i, reg_val;
+ unsigned long flags;
+
+ if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
+ return;
+
+ /* reading buffer size */
+ reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
+ mvm->fw_error_rxf_len =
+ (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+
+ /* the register holds the value divided by 128 */
+ mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
+
+ if (!mvm->fw_error_rxf_len)
+ return;
+
+ mvm->fw_error_rxf = kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
+ if (!mvm->fw_error_rxf) {
+ mvm->fw_error_rxf_len = 0;
+ return;
+ }
+
+ if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
+ kfree(mvm->fw_error_rxf);
+ mvm->fw_error_rxf = NULL;
+ mvm->fw_error_rxf_len = 0;
+ return;
+ }
+
+ for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
+ iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
+ i * sizeof(u32));
+ mvm->fw_error_rxf[i] =
+ iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
+ }
+ iwl_trans_release_nic_access(mvm->trans, &flags);
+}
+#endif
+
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
@@ -553,7 +599,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
struct iwl_host_cmd cmd = {
.id = LQ_CMD,
.len = { sizeof(struct iwl_lq_cmd), },
- .flags = init ? CMD_SYNC : CMD_ASYNC,
+ .flags = init ? 0 : CMD_ASYNC,
.data = { lq, },
};
@@ -604,6 +650,39 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
+static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool *result = _data;
+ int i;
+
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+ if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
+ mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+ *result = false;
+ }
+}
+
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
+{
+ bool result = true;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
+ return false;
+
+ if (!mvm->cfg->rx_with_siso_diversity)
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_diversity_iter, &result);
+
+ return result;
+}
+
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool value)
{
@@ -623,7 +702,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_bt_coex_vif_change(mvm);
- return iwl_mvm_power_update_mac(mvm, vif);
+ return iwl_mvm_power_update_mac(mvm);
}
static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3d1d57f9f5bc..7091a18d5a72 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -417,7 +417,7 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
splx->package.count != 2 ||
splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
splx->package.elements[0].integer.value != 0) {
- IWL_ERR(trans, "Unsupported splx structure");
+ IWL_ERR(trans, "Unsupported splx structure\n");
return 0;
}
@@ -426,14 +426,14 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
limits->package.count < 2 ||
limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
- IWL_ERR(trans, "Invalid limits element");
+ IWL_ERR(trans, "Invalid limits element\n");
return 0;
}
domain_type = &limits->package.elements[0];
power_limit = &limits->package.elements[1];
if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
- IWL_DEBUG_INFO(trans, "WiFi power is not limited");
+ IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
return 0;
}
@@ -450,26 +450,26 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
pxsx_handle = ACPI_HANDLE(&pdev->dev);
if (!pxsx_handle) {
IWL_DEBUG_INFO(trans,
- "Could not retrieve root port ACPI handle");
+ "Could not retrieve root port ACPI handle\n");
return;
}
/* Get the method's handle */
status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
if (ACPI_FAILURE(status)) {
- IWL_DEBUG_INFO(trans, "SPL method not found");
+ IWL_DEBUG_INFO(trans, "SPL method not found\n");
return;
}
/* Call SPLC with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &splx);
if (ACPI_FAILURE(status)) {
- IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
return;
}
trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
- IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
trans->dflt_pwr_limit);
kfree(splx.pointer);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 9091513ea738..6c22b23a2845 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -102,7 +102,7 @@ struct iwl_rxq {
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
- int need_update;
+ bool need_update;
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
@@ -117,21 +117,19 @@ struct iwl_dma_ptr {
/**
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
*/
-static inline int iwl_queue_inc_wrap(int index, int n_bd)
+static inline int iwl_queue_inc_wrap(int index)
{
- return ++index & (n_bd - 1);
+ return ++index & (TFD_QUEUE_SIZE_MAX - 1);
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
*/
-static inline int iwl_queue_dec_wrap(int index, int n_bd)
+static inline int iwl_queue_dec_wrap(int index)
{
- return --index & (n_bd - 1);
+ return --index & (TFD_QUEUE_SIZE_MAX - 1);
}
struct iwl_cmd_meta {
@@ -145,13 +143,13 @@ struct iwl_cmd_meta {
*
* Contains common data for Rx and Tx queues.
*
- * Note the difference between n_bd and n_window: the hardware
- * always assumes 256 descriptors, so n_bd is always 256 (unless
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
* there might be HW changes in the future). For the normal TX
* queues, n_window, which is the size of the software queue data
* is also 256; however, for the command queue, n_window is only
* 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
* the software buffers (in the variables @meta, @txb in struct
* iwl_txq) only have 32 entries, while the HW buffers (@tfds in
* the same struct) have 256.
@@ -162,7 +160,6 @@ struct iwl_cmd_meta {
* data is a window overlayed over the HW queue.
*/
struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
int write_ptr; /* 1-st empty entry (index) host_w*/
int read_ptr; /* last used entry (index) host_r*/
/* use for monitoring and recovering the stuck queue */
@@ -231,7 +228,7 @@ struct iwl_txq {
spinlock_t lock;
struct timer_list stuck_timer;
struct iwl_trans_pcie *trans_pcie;
- u8 need_update;
+ bool need_update;
u8 active;
bool ampdu;
};
@@ -270,6 +267,9 @@ struct iwl_trans_pcie {
struct iwl_trans *trans;
struct iwl_drv *drv;
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
/* INT ICT Table */
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
@@ -362,7 +362,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb, int handler_status);
@@ -370,6 +370,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
/*****************************************************
* Error handling
******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fdfa3969cac9..a2698e5e062c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -145,15 +145,13 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
u32 reg;
- spin_lock(&rxq->lock);
-
- if (rxq->need_update == 0)
- goto exit_unlock;
+ lockdep_assert_held(&rxq->lock);
/*
* explicitly wake up the NIC if:
@@ -169,13 +167,27 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
+ rxq->need_update = true;
+ return;
}
}
rxq->write_actual = round_down(rxq->write, 8);
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
- rxq->need_update = 0;
+}
+
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+ spin_lock(&rxq->lock);
+
+ if (!rxq->need_update)
+ goto exit_unlock;
+
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ rxq->need_update = false;
exit_unlock:
spin_unlock(&rxq->lock);
@@ -236,9 +248,8 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock(&rxq->lock);
- rxq->need_update = 1;
+ iwl_pcie_rxq_inc_wr_ptr(trans);
spin_unlock(&rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
}
}
@@ -362,20 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* Also restock the Rx queue via iwl_pcie_rxq_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
- spin_lock(&trans_pcie->irq_lock);
- iwl_pcie_rxq_restock(trans);
- spin_unlock(&trans_pcie->irq_lock);
-}
-
-static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
{
- iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+ iwl_pcie_rxq_alloc_rbs(trans, gfp);
iwl_pcie_rxq_restock(trans);
}
@@ -385,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwl_pcie_rx_replenish(trans_pcie->trans);
+ iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
@@ -521,14 +521,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans);
+ iwl_pcie_rx_replenish(trans, GFP_KERNEL);
iwl_pcie_rx_hw_init(trans, rxq);
- spin_lock(&trans_pcie->irq_lock);
- rxq->need_update = 1;
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ spin_unlock(&rxq->lock);
return 0;
}
@@ -673,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
- spin_lock(&rxq->lock);
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
@@ -694,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
- spin_unlock(&rxq->lock);
}
/*
@@ -709,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
u32 count = 8;
int total_empty;
+restart:
+ spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
@@ -743,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_pcie_rx_replenish_now(trans);
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
count = 0;
+ goto restart;
}
}
}
/* Backtrack one entry */
rxq->read = i;
+ spin_unlock(&rxq->lock);
+
if (fill_rx)
- iwl_pcie_rx_replenish_now(trans);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
else
iwl_pcie_rxq_restock(trans);
+
+ if (trans_pcie->napi.poll)
+ napi_gro_flush(&trans_pcie->napi, false);
}
/*
@@ -844,7 +850,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
trans_pcie->ict_index, read);
trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
trans_pcie->ict_index =
- iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+ ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
@@ -876,7 +882,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
- u32 i;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
@@ -1028,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
- iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
isr_stats->wakeup++;
@@ -1068,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
- iwl_pcie_rx_handle(trans);
-
/*
* Enable periodic interrupt in 8 msec only if we received
* real RX interrupt (instead of just periodic int), to catch
@@ -1082,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
CSR_INT_PERIODIC_ENA);
isr_stats->rx++;
+
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans);
+ local_bh_enable();
}
/* This "Tx" DMA channel is used only for loading uCode */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 2365553f1ef7..788085bc65d7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -73,6 +73,7 @@
#include "iwl-csr.h"
#include "iwl-prph.h"
#include "iwl-agn-hw.h"
+#include "iwl-fw-error-dump.h"
#include "internal.h"
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
@@ -103,7 +104,6 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
@@ -454,6 +454,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
int t = 0;
+ int iter;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@ -462,18 +463,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
if (ret >= 0)
return 0;
- /* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
+ for (iter = 0; iter < 10; iter++) {
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ do {
+ ret = iwl_pcie_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
- do {
- ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0)
- return 0;
+ usleep_range(200, 1000);
+ t += 200;
+ } while (t < 150000);
+ msleep(25);
+ }
- usleep_range(200, 1000);
- t += 200;
- } while (t < 150000);
+ IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
return ret;
}
@@ -1053,6 +1059,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+ WARN_ON(1);
+ return 0;
+}
+
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
@@ -1079,6 +1091,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->command_names = trans_cfg->command_names;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ * As this function may be called again in some corner cases don't
+ * do anything if NAPI was already initialized.
+ */
+ if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ init_dummy_netdev(&trans_pcie->napi_dev);
+ iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+ &trans_pcie->napi_dev,
+ iwl_pcie_dummy_napi_poll, 64);
+ }
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1099,6 +1123,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
pci_disable_device(trans_pcie->pci_dev);
kmem_cache_destroy(trans->dev_cmd_pool);
+ if (trans_pcie->napi.poll)
+ netif_napi_del(&trans_pcie->napi);
+
kfree(trans);
}
@@ -1237,7 +1264,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
@@ -1250,13 +1277,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
/* waiting for all the tx frames complete might take a while */
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u8 wr_ptr;
+
if (cnt == trans_pcie->cmd_queue)
continue;
+ if (!test_bit(cnt, trans_pcie->queue_used))
+ continue;
+ if (!(BIT(cnt) & txq_bm))
+ continue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt];
q = &txq->q;
- while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ wr_ptr = ACCESS_ONCE(q->write_ptr);
+
+ while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+
+ if (WARN_ONCE(wr_ptr != write_ptr,
+ "WR pointer moved while flushing %d -> %d\n",
+ wr_ptr, write_ptr))
+ return -ETIMEDOUT;
msleep(1);
+ }
if (q->read_ptr != q->write_ptr) {
IWL_ERR(trans,
@@ -1264,6 +1309,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
ret = -ETIMEDOUT;
break;
}
+ IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
}
if (!ret)
@@ -1298,8 +1344,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
cnt, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans,
- SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
}
@@ -1630,6 +1676,61 @@ err:
IWL_ERR(trans, "failed to create the trans debugfs entry\n");
return -ENOMEM;
}
+
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+{
+ u32 cmdlen = 0;
+ int i;
+
+ for (i = 0; i < IWL_NUM_OF_TBS; i++)
+ cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+
+ return cmdlen;
+}
+
+static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+ void *buf, u32 buflen)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_fw_error_dump_data *data;
+ struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_fw_error_dump_txcmd *txcmd;
+ u32 len;
+ int i, ptr;
+
+ if (!buf)
+ return sizeof(*data) +
+ cmdq->q.n_window * (sizeof(*txcmd) +
+ TFD_MAX_PAYLOAD_SIZE);
+
+ len = 0;
+ data = buf;
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->q.write_ptr;
+ for (i = 0; i < cmdq->q.n_window; i++) {
+ u8 idx = get_cmd_index(&cmdq->q, ptr);
+ u32 caplen, cmdlen;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_queue_dec_wrap(ptr);
+ }
+ spin_unlock_bh(&cmdq->lock);
+
+ data->len = cpu_to_le32(len);
+ return sizeof(*data) + len;
+}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
struct dentry *dir)
@@ -1672,6 +1773,10 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.grab_nic_access = iwl_trans_pcie_grab_nic_access,
.release_nic_access = iwl_trans_pcie_release_nic_access,
.set_bits_mask = iwl_trans_pcie_set_bits_mask,
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .dump_data = iwl_trans_pcie_dump_data,
+#endif
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3b0c72c10054..038940afbdc5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -70,20 +70,20 @@ static int iwl_queue_space(const struct iwl_queue *q)
/*
* To avoid ambiguity between empty and completely full queues, there
- * should always be less than q->n_bd elements in the queue.
- * If q->n_window is smaller than q->n_bd, there is no need to reserve
- * any queue entries for this purpose.
+ * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
+ * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+ * to reserve any queue entries for this purpose.
*/
- if (q->n_window < q->n_bd)
+ if (q->n_window < TFD_QUEUE_SIZE_MAX)
max = q->n_window;
else
- max = q->n_bd - 1;
+ max = TFD_QUEUE_SIZE_MAX - 1;
/*
- * q->n_bd is a power of 2, so the following is equivalent to modulo by
- * q->n_bd and is well defined for negative dividends.
+ * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
+ * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
*/
- used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1);
+ used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
if (WARN_ON(used > max))
return 0;
@@ -94,17 +94,11 @@ static int iwl_queue_space(const struct iwl_queue *q)
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
{
- q->n_bd = count;
q->n_window = slots_num;
q->id = id;
- /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
- * and iwl_queue_dec_wrap are broken. */
- if (WARN_ON(!is_power_of_2(count)))
- return -EINVAL;
-
/* slots_num must be power-of-two size, otherwise
* get_cmd_index is broken. */
if (WARN_ON(!is_power_of_2(slots_num)))
@@ -197,17 +191,17 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
i, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans,
- SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
}
for (i = q->read_ptr; i != q->write_ptr;
- i = iwl_queue_inc_wrap(i, q->n_bd))
+ i = iwl_queue_inc_wrap(i))
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
le32_to_cpu(txq->scratchbufs[i].scratch));
- iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
+ iwl_force_nmi(trans);
}
/*
@@ -287,14 +281,14 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
/*
* iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
*/
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->q.id;
- if (txq->need_update == 0)
- return;
+ lockdep_assert_held(&txq->lock);
/*
* explicitly wake up the NIC if:
@@ -317,6 +311,7 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ txq->need_update = true;
return;
}
}
@@ -327,8 +322,23 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
*/
IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ struct iwl_txq *txq = &trans_pcie->txq[i];
- txq->need_update = 0;
+ spin_lock_bh(&txq->lock);
+ if (trans_pcie->txq[i].need_update) {
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ trans_pcie->txq[i].need_update = false;
+ }
+ spin_unlock_bh(&txq->lock);
+ }
}
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -343,13 +353,6 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
return addr;
}
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
dma_addr_t addr, u16 len)
{
@@ -409,13 +412,17 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
- /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
int rd_ptr = txq->q.read_ptr;
int idx = get_cmd_index(&txq->q, rd_ptr);
lockdep_assert_held(&txq->lock);
- /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
/* free SKB */
@@ -436,7 +443,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
}
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
- dma_addr_t addr, u16 len, u8 reset)
+ dma_addr_t addr, u16 len, bool reset)
{
struct iwl_queue *q;
struct iwl_tfd *tfd, *tfd_tmp;
@@ -542,15 +549,14 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
{
int ret;
- txq->need_update = 0;
+ txq->need_update = false;
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
- txq_id);
+ ret = iwl_queue_init(&txq->q, slots_num, txq_id);
if (ret)
return ret;
@@ -575,15 +581,12 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
- if (!q->n_bd)
- return;
-
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, q->read_ptr);
iwl_pcie_txq_free_tfd(trans, txq);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
}
txq->active = false;
spin_unlock_bh(&txq->lock);
@@ -620,10 +623,12 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
}
/* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd) {
- dma_free_coherent(dev, sizeof(struct iwl_tfd) *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->q.dma_addr);
txq->q.dma_addr = 0;
+ txq->tfds = NULL;
dma_free_coherent(dev,
sizeof(*txq->scratchbufs) * txq->q.n_window,
@@ -680,7 +685,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
- iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+ if (trans->cfg->base_params->scd_chain_ext_wa)
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
trans_pcie->cmd_fifo);
@@ -931,8 +937,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- /* n_bd is usually 256 => n_bd - 1 = 0xff */
- int tfd_num = ssn & (txq->q.n_bd - 1);
+ int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
struct iwl_queue *q = &txq->q;
int last_to_free;
@@ -956,12 +961,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
+ last_to_free = iwl_queue_dec_wrap(tfd_num);
if (!iwl_queue_used(q, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, q->n_bd,
+ __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
q->write_ptr, q->read_ptr);
goto out;
}
@@ -971,7 +976,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
for (;
q->read_ptr != tfd_num;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
continue;
@@ -1010,25 +1015,26 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
lockdep_assert_held(&txq->lock);
- if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, q->n_bd,
+ __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
q->write_ptr, q->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, q->write_ptr, q->read_ptr);
- iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
+ iwl_force_nmi(trans);
}
}
- if (q->read_ptr == q->write_ptr) {
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ q->read_ptr == q->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
WARN_ON(!trans_pcie->cmd_in_flight);
trans_pcie->cmd_in_flight = false;
@@ -1309,28 +1315,39 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
cmd_pos = offsetof(struct iwl_device_cmd, payload);
copy_size = sizeof(out_cmd->hdr);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- int copy = 0;
+ int copy;
if (!cmd->len[i])
continue;
- /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
- if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
- copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
-
- if (copy > cmd->len[i])
- copy = cmd->len[i];
- }
-
/* copy everything if not nocopy/dup */
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
- IWL_HCMD_DFL_DUP)))
+ IWL_HCMD_DFL_DUP))) {
copy = cmd->len[i];
- if (copy) {
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
cmd_pos += copy;
copy_size += copy;
+ continue;
+ }
+
+ /*
+ * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
+ * in total (for the scratchbuf handling), but copy up to what
+ * we can fit into the payload for debug dump purposes.
+ */
+ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+
+ /* However, treat copy_size the proper way, we need it below */
+ if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+ copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ copy_size += copy;
}
}
@@ -1345,7 +1362,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
iwl_pcie_txq_build_tfd(trans, txq,
iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
- scratch_size, 1);
+ scratch_size, true);
/* map first command fragment, if any remains */
if (copy_size > scratch_size) {
@@ -1361,7 +1378,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
- copy_size - scratch_size, 0);
+ copy_size - scratch_size, false);
}
/* map the remaining (adjusted) nocopy/dup fragments */
@@ -1384,7 +1401,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto out;
}
- iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
out_meta->flags = cmd->flags;
@@ -1392,8 +1409,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
kfree(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
- txq->need_update = 1;
-
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
/* start timer if queue currently empty */
@@ -1405,9 +1420,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/*
* wake up the NIC to make sure that the firmware will see the host
* command - we will let the NIC sleep once all the host commands
- * returned.
+ * returned. This needs to be done only on NICs that have
+ * apmg_wake_up_wa set.
*/
- if (!trans_pcie->cmd_in_flight) {
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ !trans_pcie->cmd_in_flight) {
trans_pcie->cmd_in_flight = true;
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1427,7 +1444,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1583,7 +1600,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
- iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
+ iwl_force_nmi(trans);
iwl_trans_fw_error(trans);
goto cancel;
@@ -1661,7 +1678,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
void *tb1_addr;
u16 len, tb1_len, tb2_len;
- u8 wait_write_ptr = 0;
+ bool wait_write_ptr;
__le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc);
u16 wifi_seq;
@@ -1722,7 +1739,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
IWL_HCMD_SCRATCHBUF_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
- IWL_HCMD_SCRATCHBUF_SIZE, 1);
+ IWL_HCMD_SCRATCHBUF_SIZE, true);
/* there must be data left over for TB1 or this code must be changed */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
@@ -1732,7 +1749,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
goto out_err;
- iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
+ iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
/*
* Set up TFD's third entry to point directly to remainder
@@ -1748,7 +1765,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
&txq->tfds[q->write_ptr]);
goto out_err;
}
- iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
+ iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
}
/* Set up entry for this TFD in Tx byte-count array */
@@ -1762,12 +1779,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx_data(trans->dev, skb,
skb->data + hdr_len, tb2_len);
- if (!ieee80211_has_morefrags(fc)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
+ wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1775,22 +1787,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ if (!wait_write_ptr)
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
+ * and we will get a TX status notification eventually.
*/
if (iwl_queue_space(q) < q->high_mark) {
- if (wait_write_ptr) {
- txq->need_update = 1;
+ if (wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- } else {
+ else
iwl_stop_queue(trans, txq);
- }
}
spin_unlock(&txq->lock);
return 0;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 54e344aed6e0..47a998d8f99e 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1006,9 +1006,8 @@ struct cmd_key_material {
} __packed;
static int lbs_set_key_material(struct lbs_private *priv,
- int key_type,
- int key_info,
- u8 *key, u16 key_len)
+ int key_type, int key_info,
+ const u8 *key, u16 key_len)
{
struct cmd_key_material cmd;
int ret;
@@ -1610,7 +1609,7 @@ static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
*/
static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct lbs_private *priv = wiphy_priv(wiphy);
s8 signal, noise;
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index ab966f08024a..407784aca627 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -90,7 +90,8 @@ do { if ((lbs_debug & (grp)) == (grp)) \
#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args)
#ifdef DEBUG
-static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len)
+static inline void lbs_deb_hex(unsigned int grp, const char *prompt,
+ const u8 *buf, int len)
{
int i = 0;
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index c7366b07b568..e446fed7b345 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -71,8 +71,10 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
skb->ip_summed = CHECKSUM_NONE;
- if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
- return process_rxed_802_11_packet(priv, skb);
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ ret = process_rxed_802_11_packet(priv, skb);
+ goto done;
+ }
p_rx_pd = (struct rxpd *) skb->data;
p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd +
@@ -86,7 +88,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
lbs_deb_rx("rx err: frame received with bad length\n");
dev->stats.rx_length_errors++;
- ret = 0;
+ ret = -EINVAL;
dev_kfree_skb(skb);
goto done;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9d7a52f5a410..a312c653d116 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1676,7 +1676,9 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
/* Not implemented, queues only on kernel side */
}
@@ -2056,6 +2058,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index c92f27aa71ed..706831df1fa2 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -212,8 +212,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_types_header));
memcpy((u8 *)vht_op +
sizeof(struct mwifiex_ie_types_header),
- (u8 *)bss_desc->bcn_vht_oper +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_vht_oper,
le16_to_cpu(vht_op->header.len));
/* negotiate the channel width and central freq
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index d14ead8beca8..e1c2f67ae85e 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -345,8 +345,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
memcpy((u8 *) ht_info +
sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ht_oper +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_ht_oper,
le16_to_cpu(ht_info->header.len));
if (!(sband->ht_cap.cap &
@@ -750,3 +749,45 @@ void mwifiex_set_ba_params(struct mwifiex_private *priv)
return;
}
+
+u8 mwifiex_get_sec_chan_offset(int chan)
+{
+ u8 sec_offset;
+
+ switch (chan) {
+ case 36:
+ case 44:
+ case 52:
+ case 60:
+ case 100:
+ case 108:
+ case 116:
+ case 124:
+ case 132:
+ case 140:
+ case 149:
+ case 157:
+ sec_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ break;
+ case 40:
+ case 48:
+ case 56:
+ case 64:
+ case 104:
+ case 112:
+ case 120:
+ case 128:
+ case 136:
+ case 144:
+ case 153:
+ case 161:
+ sec_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ break;
+ case 165:
+ default:
+ sec_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ break;
+ }
+
+ return sec_offset;
+}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 40b007a00f4b..0b73fa08f5d4 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -63,6 +63,7 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
int cmd_action,
struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
+u8 mwifiex_get_sec_chan_offset(int chan);
static inline u8
mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
@@ -199,7 +200,7 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
}
static inline u8
-mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra)
+mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, const u8 *ra)
{
struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
if (node)
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 63211707f939..5b32106182f8 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -100,6 +100,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
struct sk_buff *skb)
{
struct txpd *local_tx_pd;
+ struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
skb_push(skb, sizeof(*local_tx_pd));
@@ -118,6 +119,9 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
sizeof(*local_tx_pd));
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+ local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
+
if (local_tx_pd->tx_control == 0)
/* TxCtrl set by user or default */
local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
@@ -160,6 +164,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
int pad = 0, ret;
struct mwifiex_tx_param tx_param;
struct txpd *ptx_pd = NULL;
+ struct timeval tv;
int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
skb_src = skb_peek(&pra_list->skb_head);
@@ -182,8 +187,14 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_info_aggr->bss_type = tx_info_src->bss_type;
tx_info_aggr->bss_num = tx_info_src->bss_num;
+
+ if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+ tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
skb_aggr->priority = skb_src->priority;
+ do_gettimeofday(&tv);
+ skb_aggr->tstamp = timeval_to_ktime(tv);
+
do {
/* Check if AMSDU can accommodate this MSDU */
if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
@@ -236,18 +247,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
skb_aggr, NULL);
} else {
- /*
- * Padding per MSDU will affect the length of next
- * packet and hence the exact length of next packet
- * is uncertain here.
- *
- * Also, aggregation of transmission buffer, while
- * downloading the data to the card, wont gain much
- * on the AMSDU packets as the AMSDU packets utilizes
- * the transmission buffer space to the maximum
- * (adapter->tx_buf_size).
- */
- tx_param.next_pkt_len = 0;
+ if (skb_src)
+ tx_param.next_pkt_len =
+ skb_src->len + sizeof(struct txpd);
+ else
+ tx_param.next_pkt_len = 0;
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
skb_aggr, &tx_param);
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index b9242c3dca43..3b55ce5690a5 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -200,4 +200,11 @@ getlog
cat getlog
+fw_dump
+ This command is used to dump firmware memory into files.
+ Separate file will be created for each memory segment.
+ Usage:
+
+ cat fw_dump
+
===============================================================================
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 21ee27ab7b74..e95dec91a561 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -994,7 +994,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
*/
static int
mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -1270,7 +1270,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac)
+ const u8 *mac)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_sta_node *sta_node;
@@ -2629,7 +2629,7 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, u8 action_code, u8 dialog_token,
+ const u8 *peer, u8 action_code, u8 dialog_token,
u16 status_code, u32 peer_capability,
const u8 *extra_ies, size_t extra_ies_len)
{
@@ -2701,7 +2701,7 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
static int
mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, enum nl80211_tdls_operation action)
+ const u8 *peer, enum nl80211_tdls_operation action)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -2748,9 +2748,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
}
static int
-mwifiex_cfg80211_add_station(struct wiphy *wiphy,
- struct net_device *dev,
- u8 *mac, struct station_parameters *params)
+mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_parameters *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -2765,9 +2764,9 @@ mwifiex_cfg80211_add_station(struct wiphy *wiphy,
}
static int
-mwifiex_cfg80211_change_station(struct wiphy *wiphy,
- struct net_device *dev,
- u8 *mac, struct station_parameters *params)
+mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params)
{
int ret;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1062c918a7bf..8dee6c86f4f1 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -955,8 +955,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->cmd_wait_q.status = -ETIMEDOUT;
wake_up_interruptible(&adapter->cmd_wait_q.wait);
mwifiex_cancel_pending_ioctl(adapter);
- /* reset cmd_sent flag to unblock new commands */
- adapter->cmd_sent = false;
}
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index b8a49aad12fd..7b419bbcd544 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -257,6 +257,29 @@ free_and_exit:
}
/*
+ * Proc firmware dump read handler.
+ *
+ * This function is called when the 'fw_dump' file is opened for
+ * reading.
+ * This function dumps firmware memory in different files
+ * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
+ * debugging.
+ */
+static ssize_t
+mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv = file->private_data;
+
+ if (!priv->adapter->if_ops.fw_dump)
+ return -EIO;
+
+ priv->adapter->if_ops.fw_dump(priv->adapter);
+
+ return 0;
+}
+
+/*
* Proc getlog file read handler.
*
* This function is called when the 'getlog' file is opened for reading
@@ -699,6 +722,7 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \
MWIFIEX_DFS_FILE_READ_OPS(info);
MWIFIEX_DFS_FILE_READ_OPS(debug);
MWIFIEX_DFS_FILE_READ_OPS(getlog);
+MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
MWIFIEX_DFS_FILE_OPS(regrdwr);
MWIFIEX_DFS_FILE_OPS(rdeeprom);
@@ -722,6 +746,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
MWIFIEX_DFS_ADD_FILE(getlog);
MWIFIEX_DFS_ADD_FILE(regrdwr);
MWIFIEX_DFS_ADD_FILE(rdeeprom);
+ MWIFIEX_DFS_ADD_FILE(fw_dump);
}
/*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index e7b3e16e5d34..38da6ff6f416 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -42,12 +42,12 @@
#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
-#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 16
-#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 32
+#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 64
+#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 64
#define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE 32
#define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE 16
-#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 32
-#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 48
+#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 64
+#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 64
#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 48
#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 32
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index b485dc1ae5eb..3175dd04834b 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -169,6 +169,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -229,6 +230,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
+#define ISALLOWED_CHANWIDTH40(ht_param) (ht_param & BIT(2))
/* httxcfg bitmap
* 0 reserved
@@ -403,7 +405,7 @@ enum P2P_MODES {
#define HS_CFG_CANCEL 0xffffffff
#define HS_CFG_COND_DEF 0x00000000
#define HS_CFG_GPIO_DEF 0xff
-#define HS_CFG_GAP_DEF 0
+#define HS_CFG_GAP_DEF 0xff
#define HS_CFG_COND_BROADCAST_DATA 0x00000001
#define HS_CFG_COND_UNICAST_DATA 0x00000002
#define HS_CFG_COND_MAC_EVENT 0x00000004
@@ -487,6 +489,7 @@ enum P2P_MODES {
#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
+#define EVENT_TDLS_GENERIC_EVENT 0x00000052
#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
@@ -519,6 +522,7 @@ enum P2P_MODES {
#define ACT_TDLS_DELETE 0x00
#define ACT_TDLS_CREATE 0x01
#define ACT_TDLS_CONFIG 0x02
+#define TDLS_EVENT_LINK_TEAR_DOWN 3
#define MWIFIEX_FW_V15 15
@@ -535,6 +539,7 @@ struct mwifiex_ie_types_data {
#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
+#define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01
struct txpd {
u8 bss_type;
@@ -577,7 +582,7 @@ struct rxpd {
* [Bit 7] Reserved
*/
u8 ht_info;
- u8 reserved;
+ u8 flags;
} __packed;
struct uap_txpd {
@@ -708,6 +713,13 @@ struct mwifiex_ie_types_vendor_param_set {
u8 ie[MWIFIEX_MAX_VSIE_LEN];
};
+#define MWIFIEX_TDLS_IDLE_TIMEOUT 60
+
+struct mwifiex_ie_types_tdls_idle_timeout {
+ struct mwifiex_ie_types_header header;
+ __le16 value;
+} __packed;
+
struct mwifiex_ie_types_rsn_param_set {
struct mwifiex_ie_types_header header;
u8 rsn_ie[1];
@@ -1745,6 +1757,15 @@ struct host_cmd_ds_802_11_subsc_evt {
__le16 events;
} __packed;
+struct mwifiex_tdls_generic_event {
+ __le16 type;
+ u8 peer_mac[ETH_ALEN];
+ union {
+ __le16 reason_code;
+ __le16 reserved;
+ } u;
+} __packed;
+
struct mwifiex_ie {
__le16 ie_index;
__le16 mgmt_subtype_mask;
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index ee494db54060..1b576722671d 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -303,7 +303,7 @@ struct mwifiex_ds_ant_cfg {
u32 rx_ant;
};
-#define MWIFIEX_NUM_OF_CMD_BUFFER 20
+#define MWIFIEX_NUM_OF_CMD_BUFFER 50
#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
enum {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9c771b3e9918..cbabc12fbda3 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -521,7 +521,6 @@ done:
release_firmware(adapter->firmware);
adapter->firmware = NULL;
}
- complete(&adapter->fw_load);
if (init_failed)
mwifiex_free_adapter(adapter);
up(sem);
@@ -535,7 +534,6 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
{
int ret;
- init_completion(&adapter->fw_load);
ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
adapter->dev, GFP_KERNEL, adapter,
mwifiex_fw_dpc);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d53e1e8c9467..1398afa84064 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -672,6 +672,7 @@ struct mwifiex_if_ops {
int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
+ void (*fw_dump)(struct mwifiex_adapter *);
int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
};
@@ -787,7 +788,6 @@ struct mwifiex_adapter {
struct mwifiex_wait_queue cmd_wait_q;
u8 scan_wait_q_woken;
spinlock_t queue_lock; /* lock for tx queues */
- struct completion fw_load;
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u16 max_mgmt_ie_index;
u8 scan_delay_cnt;
@@ -910,8 +910,6 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
struct sk_buff *skb);
int mwifiex_process_sta_event(struct mwifiex_private *);
int mwifiex_process_uap_event(struct mwifiex_private *);
-struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
@@ -1101,7 +1099,7 @@ mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv)
return 0;
/* Clear csa channel, if DFS channel move time has passed */
- if (jiffies > priv->csa_expire_time) {
+ if (time_after(jiffies, priv->csa_expire_time)) {
priv->csa_chan = 0;
priv->csa_expire_time = 0;
}
@@ -1220,26 +1218,26 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
extern const struct ethtool_ops mwifiex_ethtool_ops;
void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
-void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac);
void
mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
int ies_len, struct mwifiex_sta_node *node);
struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac);
+mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
-int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer,
+mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
u8 action_code, u8 dialog_token,
u16 status_code, const u8 *extra_ies,
size_t extra_ies_len);
-int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
- u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, const u8 *extra_ies,
- size_t extra_ies_len);
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
u8 *buf, int len);
-int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action);
-int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac);
+int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action);
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac);
void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index a7e8b96b2d90..2cc9b6fca490 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -50,7 +50,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
return -1;
}
mapping.len = size;
- memcpy(skb->cb, &mapping, sizeof(mapping));
+ mwifiex_store_mapping(skb, &mapping);
return 0;
}
@@ -60,7 +60,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
struct pcie_service_card *card = adapter->card;
struct mwifiex_dma_mapping mapping;
- MWIFIEX_SKB_PACB(skb, &mapping);
+ mwifiex_get_mapping(skb, &mapping);
pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
}
@@ -221,9 +221,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
if (!adapter || !adapter->priv_num)
return;
- /* In case driver is removed when asynchronous FW load is in progress */
- wait_for_completion(&adapter->fw_load);
-
if (user_rmmod) {
#ifdef CONFIG_PM_SLEEP
if (adapter->is_suspended)
@@ -1074,6 +1071,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
* is mapped to PCI device memory. Tx ring pointers are advanced accordingly.
* Download ready interrupt to FW is deffered if Tx ring is not full and
* additional payload can be accomodated.
+ * Caller must ensure tx_param parameter to this function is not NULL.
*/
static int
mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 7b3af3d29ded..45c5b3450cf5 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -29,9 +29,6 @@
#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
-#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
-#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
-#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
/* Memory needed to store a max sized Channel List TLV for a firmware scan */
#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
@@ -1055,20 +1052,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
/*
* In associated state we will reduce the number of channels scanned per
- * scan command to avoid any traffic delay/loss. This number is decided
- * based on total number of channels to be scanned due to constraints
- * of command buffers.
+ * scan command to 1 to avoid any traffic delay/loss.
*/
- if (priv->media_connected) {
- if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
+ if (priv->media_connected)
*max_chan_per_scan = 1;
- else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
- *max_chan_per_scan = 2;
- else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
- *max_chan_per_scan = 3;
- else
- *max_chan_per_scan = 4;
- }
}
/*
@@ -1353,23 +1340,17 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->beacon_buf);
break;
case WLAN_EID_BSS_COEX_2040:
- bss_entry->bcn_bss_co_2040 = current_ptr +
- sizeof(struct ieee_types_header);
- bss_entry->bss_co_2040_offset = (u16) (current_ptr +
- sizeof(struct ieee_types_header) -
- bss_entry->beacon_buf);
+ bss_entry->bcn_bss_co_2040 = current_ptr;
+ bss_entry->bss_co_2040_offset =
+ (u16) (current_ptr - bss_entry->beacon_buf);
break;
case WLAN_EID_EXT_CAPABILITY:
- bss_entry->bcn_ext_cap = current_ptr +
- sizeof(struct ieee_types_header);
- bss_entry->ext_cap_offset = (u16) (current_ptr +
- sizeof(struct ieee_types_header) -
- bss_entry->beacon_buf);
+ bss_entry->bcn_ext_cap = current_ptr;
+ bss_entry->ext_cap_offset =
+ (u16) (current_ptr - bss_entry->beacon_buf);
break;
case WLAN_EID_OPMODE_NOTIF:
- bss_entry->oper_mode =
- (void *)(current_ptr +
- sizeof(struct ieee_types_header));
+ bss_entry->oper_mode = (void *)current_ptr;
bss_entry->oper_mode_offset =
(u16)((u8 *)bss_entry->oper_mode -
bss_entry->beacon_buf);
@@ -1757,6 +1738,19 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
return 0;
}
+static void mwifiex_complete_scan(struct mwifiex_private *priv)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ if (adapter->curr_cmd->wait_q_enabled) {
+ adapter->cmd_wait_q.status = 0;
+ if (!priv->scan_request) {
+ dev_dbg(adapter->dev, "complete internal scan\n");
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ }
+ }
+}
+
static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
@@ -1770,16 +1764,9 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
- /* Need to indicate IOCTL complete */
- if (adapter->curr_cmd->wait_q_enabled) {
- adapter->cmd_wait_q.status = 0;
- if (!priv->scan_request) {
- dev_dbg(adapter->dev,
- "complete internal scan\n");
- mwifiex_complete_cmd(adapter,
- adapter->curr_cmd);
- }
- }
+ if (!adapter->ext_scan)
+ mwifiex_complete_scan(priv);
+
if (priv->report_scan_result)
priv->report_scan_result = false;
@@ -1984,6 +1971,9 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
{
dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+
+ mwifiex_complete_scan(priv);
+
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d206f04d4994..4ce3d7b33991 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -85,6 +85,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->supports_sdio_new_mode = data->supports_sdio_new_mode;
card->has_control_mask = data->has_control_mask;
card->tx_buf_size = data->tx_buf_size;
+ card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
+ card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
}
sdio_claim_host(func);
@@ -177,9 +179,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (!adapter || !adapter->priv_num)
return;
- /* In case driver is removed when asynchronous FW load is in progress */
- wait_for_completion(&adapter->fw_load);
-
if (user_rmmod) {
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
@@ -1679,8 +1678,12 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
if (ret) {
if (type == MWIFIEX_TYPE_CMD)
adapter->cmd_sent = false;
- if (type == MWIFIEX_TYPE_DATA)
+ if (type == MWIFIEX_TYPE_DATA) {
adapter->data_sent = false;
+ /* restore curr_wr_port in error cases */
+ card->curr_wr_port = port;
+ card->mp_wr_bitmap |= (u32)(1 << card->curr_wr_port);
+ }
} else {
if (type == MWIFIEX_TYPE_DATA) {
if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port)))
@@ -1842,8 +1845,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
card->mp_agg_pkt_limit, GFP_KERNEL);
ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
- SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
- SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
+ card->mp_tx_agg_buf_size,
+ card->mp_rx_agg_buf_size);
if (ret) {
dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
kfree(card->mp_regs);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index c71201b2e2a3..6eea30b43ed7 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -64,10 +64,8 @@
#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
-#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */
-
-/* Multi port RX aggregation buffer size */
-#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (16384) /* 16K */
+#define MWIFIEX_MP_AGGR_BUF_SIZE_16K (16384)
+#define MWIFIEX_MP_AGGR_BUF_SIZE_32K (32768)
/* Misc. Config Register : Auto Re-enable interrupts */
#define AUTO_RE_ENABLE_INT BIT(4)
@@ -234,6 +232,8 @@ struct sdio_mmc_card {
bool supports_sdio_new_mode;
bool has_control_mask;
u16 tx_buf_size;
+ u32 mp_tx_agg_buf_size;
+ u32 mp_rx_agg_buf_size;
u32 mp_rd_bitmap;
u32 mp_wr_bitmap;
@@ -258,6 +258,8 @@ struct mwifiex_sdio_device {
bool supports_sdio_new_mode;
bool has_control_mask;
u16 tx_buf_size;
+ u32 mp_tx_agg_buf_size;
+ u32 mp_rx_agg_buf_size;
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -315,6 +317,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -325,6 +329,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -335,6 +341,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -345,6 +353,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.supports_sdio_new_mode = true,
.has_control_mask = false,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
};
/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index e3cac1495cc7..88202ce0c139 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1546,6 +1546,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
struct mwifiex_ie_types_extcap *extcap;
struct mwifiex_ie_types_vhtcap *vht_capab;
struct mwifiex_ie_types_aid *aid;
+ struct mwifiex_ie_types_tdls_idle_timeout *timeout;
u8 *pos, qos_info;
u16 config_len = 0;
struct station_parameters *params = priv->sta_params;
@@ -1643,6 +1644,12 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
config_len += sizeof(struct mwifiex_ie_types_aid);
}
+ timeout = (void *)(pos + config_len);
+ timeout->header.type = cpu_to_le16(TLV_TYPE_TDLS_IDLE_TIMEOUT);
+ timeout->header.len = cpu_to_le16(sizeof(timeout->value));
+ timeout->value = cpu_to_le16(MWIFIEX_TDLS_IDLE_TIMEOUT);
+ config_len += sizeof(struct mwifiex_ie_types_tdls_idle_timeout);
+
break;
default:
dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index bfebb0144df5..577f2979ed8f 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -865,14 +865,20 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
switch (action) {
case ACT_TDLS_DELETE:
- if (reason)
- dev_err(priv->adapter->dev,
- "TDLS link delete for %pM failed: reason %d\n",
- cmd_tdls_oper->peer_mac, reason);
- else
+ if (reason) {
+ if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
+ dev_dbg(priv->adapter->dev,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ else
+ dev_err(priv->adapter->dev,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ } else {
dev_dbg(priv->adapter->dev,
- "TDLS link config for %pM successful\n",
+ "TDLS link delete for %pM successful\n",
cmd_tdls_oper->peer_mac);
+ }
break;
case ACT_TDLS_CREATE:
if (reason) {
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 368450cc56c7..f6395ef11a72 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -134,6 +134,46 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
netif_carrier_off(priv->netdev);
}
+static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb)
+{
+ int ret = 0;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_tdls_generic_event *tdls_evt =
+ (void *)event_skb->data + sizeof(adapter->event_cause);
+
+ /* reserved 2 bytes are not mandatory in tdls event */
+ if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
+ sizeof(u16) - sizeof(adapter->event_cause))) {
+ dev_err(adapter->dev, "Invalid event length!\n");
+ return -1;
+ }
+
+ sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac);
+ if (!sta_ptr) {
+ dev_err(adapter->dev, "cannot get sta entry!\n");
+ return -1;
+ }
+
+ switch (le16_to_cpu(tdls_evt->type)) {
+ case TDLS_EVENT_LINK_TEAR_DOWN:
+ cfg80211_tdls_oper_request(priv->netdev,
+ tdls_evt->peer_mac,
+ NL80211_TDLS_TEARDOWN,
+ le16_to_cpu(tdls_evt->u.reason_code),
+ GFP_KERNEL);
+ ret = mwifiex_tdls_oper(priv, tdls_evt->peer_mac,
+ MWIFIEX_TDLS_DISABLE_LINK);
+ queue_work(adapter->workqueue, &adapter->main_work);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
/*
* This function handles events generated by firmware.
*
@@ -459,6 +499,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
false);
break;
+ case EVENT_TDLS_GENERIC_EVENT:
+ ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
+ break;
+
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index ed26387eccf5..8b639d7fe6df 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -183,6 +183,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
struct rx_packet_hdr *rx_pkt_hdr;
u8 ta[ETH_ALEN];
u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
+ struct mwifiex_sta_node *sta_ptr;
local_rx_pd = (struct rxpd *) (skb->data);
rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
@@ -213,14 +214,25 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
* If the packet is not an unicast packet then send the packet
* directly to os. Don't pass thru rx reordering
*/
- if (!IS_11N_ENABLED(priv) ||
+ if ((!IS_11N_ENABLED(priv) &&
+ !(ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ !(local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET))) ||
!ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
mwifiex_process_rx_packet(priv, skb);
return ret;
}
- if (mwifiex_queuing_ra_based(priv)) {
+ if (mwifiex_queuing_ra_based(priv) ||
+ (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET)) {
memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
+ if (local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET &&
+ local_rx_pd->priority < MAX_NUM_TID) {
+ sta_ptr = mwifiex_get_sta_entry(priv, ta);
+ if (sta_ptr)
+ sta_ptr->rx_seq[local_rx_pd->priority] =
+ le16_to_cpu(local_rx_pd->seq_num);
+ }
} else {
if (rx_pkt_type != PKT_TYPE_BAR)
priv->rx_seq[local_rx_pd->priority] = seq_num;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 1236a5de7bca..5fce7e78a36e 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -128,6 +128,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct txpd *local_tx_pd;
+ struct mwifiex_tx_param tx_param;
/* sizeof(struct txpd) + Interface specific header */
#define NULL_PACKET_HDR 64
u32 data_len = NULL_PACKET_HDR;
@@ -168,8 +169,9 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
skb, NULL);
} else {
skb_push(skb, INTF_HEADER_LEN);
+ tx_param.next_pkt_len = 0;
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
- skb, NULL);
+ skb, &tx_param);
}
switch (ret) {
case -EBUSY:
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 97662a1ba58c..e73034fbbde9 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -25,8 +25,8 @@
#define TDLS_RESP_FIX_LEN 8
#define TDLS_CONFIRM_FIX_LEN 6
-static void
-mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
+static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
+ const u8 *mac, u8 status)
{
struct mwifiex_ra_list_tbl *ra_list;
struct list_head *tid_list;
@@ -84,7 +84,8 @@ mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
return;
}
-static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac)
+static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
+ const u8 *mac)
{
struct mwifiex_ra_list_tbl *ra_list;
struct list_head *ra_list_head;
@@ -185,8 +186,50 @@ static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac,
+ u8 vht_enabled, struct sk_buff *skb)
+{
+ struct ieee80211_ht_operation *ht_oper;
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_bssdescriptor *bss_desc =
+ &priv->curr_bss_params.bss_descriptor;
+ u8 *pos;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (unlikely(!sta_ptr)) {
+ dev_warn(priv->adapter->dev,
+ "TDLS peer station not found in list\n");
+ return -1;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
+ *pos++ = WLAN_EID_HT_OPERATION;
+ *pos++ = sizeof(struct ieee80211_ht_operation);
+ ht_oper = (void *)pos;
+
+ ht_oper->primary_chan = bss_desc->channel;
+
+ /* follow AP's channel bandwidth */
+ if (ISSUPP_CHANWIDTH40(priv->adapter->hw_dot_11n_dev_cap) &&
+ bss_desc->bcn_ht_cap &&
+ ISALLOWED_CHANWIDTH40(bss_desc->bcn_ht_oper->ht_param))
+ ht_oper->ht_param = bss_desc->bcn_ht_oper->ht_param;
+
+ if (vht_enabled) {
+ ht_oper->ht_param =
+ mwifiex_get_sec_chan_offset(bss_desc->channel);
+ ht_oper->ht_param |= BIT(2);
+ }
+
+ memcpy(&sta_ptr->tdls_cap.ht_oper, ht_oper,
+ sizeof(struct ieee80211_ht_operation));
+
+ return 0;
+}
+
static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
- u8 *mac, struct sk_buff *skb)
+ const u8 *mac, struct sk_buff *skb)
{
struct mwifiex_bssdescriptor *bss_desc;
struct ieee80211_vht_operation *vht_oper;
@@ -325,8 +368,9 @@ static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
}
static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
- u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, struct sk_buff *skb)
+ const u8 *peer, u8 action_code,
+ u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
{
struct ieee80211_tdls_data *tf;
int ret;
@@ -428,6 +472,17 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
dev_kfree_skb_any(skb);
return ret;
}
+ ret = mwifiex_tdls_add_ht_oper(priv, peer, 1, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ } else {
+ ret = mwifiex_tdls_add_ht_oper(priv, peer, 0, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
}
break;
@@ -453,7 +508,8 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
}
static void
-mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
+mwifiex_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
+ const u8 *peer, const u8 *bssid)
{
struct ieee80211_tdls_lnkie *lnkid;
@@ -467,8 +523,8 @@ mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
memcpy(lnkid->resp_sta, peer, ETH_ALEN);
}
-int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
- u8 *peer, u8 action_code, u8 dialog_token,
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
+ u8 action_code, u8 dialog_token,
u16 status_code, const u8 *extra_ies,
size_t extra_ies_len)
{
@@ -560,7 +616,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
}
static int
-mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
+mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
+ const u8 *peer,
u8 action_code, u8 dialog_token,
u16 status_code, struct sk_buff *skb)
{
@@ -638,10 +695,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
return 0;
}
-int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
- u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, const u8 *extra_ies,
- size_t extra_ies_len)
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
{
struct sk_buff *skb;
struct mwifiex_txinfo *tx_info;
@@ -848,7 +905,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
}
static int
-mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
@@ -869,7 +926,7 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
}
static int
-mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
@@ -896,7 +953,7 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
}
static int
-mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
@@ -925,7 +982,7 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
}
static int
-mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct ieee80211_mcs_info mcs;
@@ -982,7 +1039,7 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
return 0;
}
-int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
+int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action)
{
switch (action) {
case MWIFIEX_TDLS_ENABLE_LINK:
@@ -997,7 +1054,7 @@ int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
return 0;
}
-int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac)
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *sta_ptr;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 9be6544bdded..32643555dd2a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -175,17 +175,19 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
switch (GET_RXSTBC(cap_info)) {
case MWIFIEX_RX_STBC1:
/* HT_CAP 1X1 mode */
- memset(&bss_cfg->ht_cap.mcs, 0xff, 1);
+ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
break;
case MWIFIEX_RX_STBC12: /* fall through */
case MWIFIEX_RX_STBC123:
/* HT_CAP 2X2 mode */
- memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+ bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
default:
dev_warn(priv->adapter->dev,
"Unsupported RX-STBC, default to 2x2\n");
- memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+ bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
}
priv->ap_11n_enabled = 1;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index edbe4aff00d8..a8ce8130cfae 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,9 +22,9 @@
#define USB_VERSION "1.0"
+static u8 user_rmmod;
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
-static struct usb_card_rec *usb_card;
static struct usb_device_id mwifiex_usb_table[] = {
/* 8797 */
@@ -532,28 +532,38 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
static void mwifiex_usb_disconnect(struct usb_interface *intf)
{
struct usb_card_rec *card = usb_get_intfdata(intf);
+ struct mwifiex_adapter *adapter;
- if (!card) {
- pr_err("%s: card is NULL\n", __func__);
+ if (!card || !card->adapter) {
+ pr_err("%s: card or card->adapter is NULL\n", __func__);
return;
}
- mwifiex_usb_free(card);
+ adapter = card->adapter;
+ if (!adapter->priv_num)
+ return;
- if (card->adapter) {
- struct mwifiex_adapter *adapter = card->adapter;
+ if (user_rmmod) {
+#ifdef CONFIG_PM
+ if (adapter->is_suspended)
+ mwifiex_usb_resume(intf);
+#endif
- if (!adapter->priv_num)
- return;
+ mwifiex_deauthenticate_all(adapter);
- dev_dbg(adapter->dev, "%s: removing card\n", __func__);
- mwifiex_remove_card(adapter, &add_remove_card_sem);
+ mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_FUNC_SHUTDOWN);
}
+ mwifiex_usb_free(card);
+
+ dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+ mwifiex_remove_card(adapter, &add_remove_card_sem);
+
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
kfree(card);
- usb_card = NULL;
return;
}
@@ -565,6 +575,7 @@ static struct usb_driver mwifiex_usb_driver = {
.id_table = mwifiex_usb_table,
.suspend = mwifiex_usb_suspend,
.resume = mwifiex_usb_resume,
+ .soft_unbind = 1,
};
static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
@@ -762,7 +773,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
card->adapter = adapter;
adapter->dev = &card->udev->dev;
- usb_card = card;
switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
case USB8897_PID_1:
@@ -1025,25 +1035,8 @@ static void mwifiex_usb_cleanup_module(void)
if (!down_interruptible(&add_remove_card_sem))
up(&add_remove_card_sem);
- if (usb_card && usb_card->adapter) {
- struct mwifiex_adapter *adapter = usb_card->adapter;
-
- /* In case driver is removed when asynchronous FW downloading is
- * in progress
- */
- wait_for_completion(&adapter->fw_load);
-
-#ifdef CONFIG_PM
- if (adapter->is_suspended)
- mwifiex_usb_resume(usb_card->intf);
-#endif
-
- mwifiex_deauthenticate_all(adapter);
-
- mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- MWIFIEX_FUNC_SHUTDOWN);
- }
+ /* set the flag as user is removing this module */
+ user_rmmod = 1;
usb_deregister(&mwifiex_usb_driver);
}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index c3824e37f3f2..6da5abf52e61 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -259,7 +259,7 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
* NULL is returned if station entry is not found in associated STA list.
*/
struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
@@ -280,7 +280,7 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
* If received mac address is NULL, NULL is returned.
*/
struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
unsigned long flags;
@@ -332,7 +332,7 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
}
/* This function will delete a station entry from station list */
-void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
unsigned long flags;
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index ddae57021397..caadb3737b9e 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -20,32 +20,55 @@
#ifndef _MWIFIEX_UTIL_H_
#define _MWIFIEX_UTIL_H_
+struct mwifiex_dma_mapping {
+ dma_addr_t addr;
+ size_t len;
+};
+
+struct mwifiex_cb {
+ struct mwifiex_dma_mapping dma_mapping;
+ union {
+ struct mwifiex_rxinfo rx_info;
+ struct mwifiex_txinfo tx_info;
+ };
+};
+
static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
{
- return (struct mwifiex_rxinfo *)(skb->cb + sizeof(dma_addr_t));
+ struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(struct mwifiex_cb) > sizeof(skb->cb));
+ return &cb->rx_info;
}
static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
{
- return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
+ struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
+
+ return &cb->tx_info;
}
-struct mwifiex_dma_mapping {
- dma_addr_t addr;
- size_t len;
-};
+static inline void mwifiex_store_mapping(struct sk_buff *skb,
+ struct mwifiex_dma_mapping *mapping)
+{
+ struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
+
+ memcpy(&cb->dma_mapping, mapping, sizeof(*mapping));
+}
-static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb,
- struct mwifiex_dma_mapping *mapping)
+static inline void mwifiex_get_mapping(struct sk_buff *skb,
+ struct mwifiex_dma_mapping *mapping)
{
- memcpy(mapping, skb->cb, sizeof(*mapping));
+ struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
+
+ memcpy(mapping, &cb->dma_mapping, sizeof(*mapping));
}
static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
{
struct mwifiex_dma_mapping mapping;
- MWIFIEX_SKB_PACB(skb, &mapping);
+ mwifiex_get_mapping(skb, &mapping);
return mapping.addr;
}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 0a7cc742aed7..d3671d009f6c 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -92,7 +92,7 @@ mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
* The function also initializes the list with the provided RA.
*/
static struct mwifiex_ra_list_tbl *
-mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
+mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -139,8 +139,7 @@ static u8 mwifiex_get_random_ba_threshold(void)
* This function allocates and adds a RA list for all TIDs
* with the given RA.
*/
-void
-mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
+void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
{
int i;
struct mwifiex_ra_list_tbl *ra_list;
@@ -164,6 +163,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
if (!mwifiex_queuing_ra_based(priv)) {
if (mwifiex_get_tdls_link_status(priv, ra) ==
TDLS_SETUP_COMPLETE) {
+ ra_list->tdls_link = true;
ra_list->is_11n_enabled =
mwifiex_tdls_peer_11n_enabled(priv, ra);
} else {
@@ -426,15 +426,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->tos_to_tid_inv[i];
}
- priv->aggr_prio_tbl[6].amsdu
- = priv->aggr_prio_tbl[6].ampdu_ap
- = priv->aggr_prio_tbl[6].ampdu_user
- = BA_STREAM_NOT_ALLOWED;
-
- priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
- = priv->aggr_prio_tbl[7].ampdu_user
- = BA_STREAM_NOT_ALLOWED;
-
mwifiex_set_ba_params(priv);
mwifiex_reset_11n_rx_seq_num(priv);
@@ -575,7 +566,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
*/
static struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
- u8 *ra_addr)
+ const u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -596,7 +587,8 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
* retrieved.
*/
struct mwifiex_ra_list_tbl *
-mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
+ const u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -657,7 +649,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
dev_dbg(adapter->dev,
"TDLS setup packet for %pM. Don't block\n", ra);
- else
+ else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
tdls_status = mwifiex_get_tdls_link_status(priv, ra);
}
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 83e42083ebff..eca56e371a57 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -99,7 +99,7 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct sk_buff *skb);
-void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
+void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
void mwifiex_rotate_priolists(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra, int tid);
@@ -123,7 +123,8 @@ void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
const struct host_cmd_ds_command *resp);
struct mwifiex_ra_list_tbl *
-mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr);
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
+ const u8 *ra_addr);
u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 49300d04efdf..e27e32851f1e 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -988,8 +988,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
* tsc must be NULL or up to 8 bytes
*/
int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
- int set_tx, u8 *key, u8 *rsc, size_t rsc_len,
- u8 *tsc, size_t tsc_len)
+ int set_tx, const u8 *key, const u8 *rsc,
+ size_t rsc_len, const u8 *tsc, size_t tsc_len)
{
struct {
__le16 idx;
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 8f6831f4e328..466d1ede76f1 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -38,8 +38,8 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv);
int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
int __orinoco_hw_setup_enc(struct orinoco_private *priv);
int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
- int set_tx, u8 *key, u8 *rsc, size_t rsc_len,
- u8 *tsc, size_t tsc_len);
+ int set_tx, const u8 *key, const u8 *rsc,
+ size_t rsc_len, const u8 *tsc, size_t tsc_len);
int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 3ac71339d040..c90939ced0e4 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1673,7 +1673,7 @@ static int ezusb_probe(struct usb_interface *interface,
firmware.code = fw_entry->data;
}
if (firmware.size && firmware.code) {
- if (ezusb_firmware_download(upriv, &firmware))
+ if (ezusb_firmware_download(upriv, &firmware) < 0)
goto error;
} else {
err("No firmware to download");
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index b7a867b50b94..6abdaf0aa052 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -52,9 +52,9 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
priv->keys[index].seq_len = seq_len;
if (key_len)
- memcpy(priv->keys[index].key, key, key_len);
+ memcpy((void *)priv->keys[index].key, key, key_len);
if (seq_len)
- memcpy(priv->keys[index].seq, seq, seq_len);
+ memcpy((void *)priv->keys[index].seq, seq, seq_len);
switch (alg) {
case ORINOCO_ALG_TKIP:
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index eede90b63f84..7be3a4839640 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -669,7 +669,8 @@ static unsigned int p54_flush_count(struct p54_common *priv)
return total;
}
-static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop)
+static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct p54_common *priv = dev->priv;
unsigned int total, i;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index cbf0a589d32a..8330fa33e50b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -343,7 +343,7 @@ static void ray_detach(struct pcmcia_device *link)
ray_release(link);
local = netdev_priv(dev);
- del_timer(&local->timer);
+ del_timer_sync(&local->timer);
if (link->priv) {
unregister_netdev(dev);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 39d22a154341..d2a9a08210be 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -517,7 +517,7 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool unicast, bool multicast);
static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo);
+ const u8 *mac, struct station_info *sinfo);
static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo);
@@ -2490,7 +2490,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
}
static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 84164747ace0..54aaeb09debf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -656,6 +656,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
common->vif_info[ii].seq_start = seq_no;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ status = 0;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 1b28cda6ca88..2eefbf159bc0 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1083,7 +1083,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
{
if (status) {
rsi_hal_send_sta_notify_frame(common,
- NL80211_IFTYPE_STATION,
+ RSI_IFTYPE_STATION,
STA_CONNECTED,
bssid,
qos_enable,
@@ -1092,7 +1092,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
rsi_send_auto_rate_request(common);
} else {
rsi_hal_send_sta_notify_frame(common,
- NL80211_IFTYPE_STATION,
+ RSI_IFTYPE_STATION,
STA_DISCONNECTED,
bssid,
qos_enable,
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 2e39d38d6a9e..46e7af446f01 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -285,7 +285,6 @@ static void rsi_reset_card(struct sdio_func *pfunction)
if (err) {
rsi_dbg(ERR_ZONE, "%s: CCCR speed reg read failed: %d\n",
__func__, err);
- card->state &= ~MMC_STATE_HIGHSPEED;
} else {
err = rsi_cmd52writebyte(card,
SDIO_CCCR_SPEED,
@@ -296,14 +295,13 @@ static void rsi_reset_card(struct sdio_func *pfunction)
__func__, err);
return;
}
- mmc_card_set_highspeed(card);
host->ios.timing = MMC_TIMING_SD_HS;
host->ops->set_ios(host, &host->ios);
}
}
/* Set clock */
- if (mmc_card_highspeed(card))
+ if (mmc_card_hs(card))
clock = 50000000;
else
clock = card->cis.max_dtr;
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index f2f70784d4ad..d3fbe33d2324 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -63,7 +63,7 @@ static inline int rsi_create_kthread(struct rsi_common *common,
u8 *name)
{
init_completion(&thread->completion);
- thread->task = kthread_run(func_ptr, common, name);
+ thread->task = kthread_run(func_ptr, common, "%s", name);
if (IS_ERR(thread->task))
return (int)PTR_ERR(thread->task);
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index ac67c4ad63c2..225215a3b8bb 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -73,6 +73,7 @@
#define RX_BA_INDICATION 1
#define RSI_TBL_SZ 40
#define MAX_RETRIES 8
+#define RSI_IFTYPE_STATION 0
#define STD_RATE_MCS7 0x07
#define STD_RATE_MCS6 0x06
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2f1cd929c6f6..a511cccc9f01 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1681,8 +1681,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Detect if this device has an hardware controlled radio.
*/
- if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) {
__set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
+ /*
+ * On this device RFKILL initialized during probe does not work.
+ */
+ __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags);
+ }
/*
* Check if the BBP tuning should be enabled.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 41d4a8167dc3..c17fcf272728 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1005,10 +1005,9 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
entry->skb->len + padding_len);
/*
- * Enable beaconing again.
+ * Restore beaconing state.
*/
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
/*
* Clean up beacon skb.
@@ -1039,13 +1038,14 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
void rt2800_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &orig_reg);
+ reg = orig_reg;
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -1055,10 +1055,9 @@ void rt2800_clear_beacon(struct queue_entry *entry)
rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
/*
- * Enabled beaconing again.
+ * Restore beaconing state.
*/
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
}
EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a49c3d73ea2c..e11dab2216c6 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -229,6 +229,27 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
/*
* Firmware functions
*/
+static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
+{
+ __le32 reg;
+ u32 fw_mode;
+
+ /* cannot use rt2x00usb_register_read here as it uses different
+ * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
+ * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
+ * returned value would be invalid.
+ */
+ rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
+ USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
+ &reg, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE);
+ fw_mode = le32_to_cpu(reg);
+
+ if ((fw_mode & 0x00000003) == 2)
+ return 1;
+
+ return 0;
+}
+
static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
{
return FIRMWARE_RT2870;
@@ -257,8 +278,13 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
/*
* Write firmware to device.
*/
- rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
- data + offset, length);
+ if (rt2800usb_autorun_detect(rt2x00dev)) {
+ rt2x00_info(rt2x00dev,
+ "Firmware loading not required - NIC in AutoRun mode\n");
+ } else {
+ rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
+ data + offset, length);
+ }
rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
@@ -735,11 +761,18 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
/*
* Device probe functions.
*/
+static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
+{
+ if (rt2800usb_autorun_detect(rt2x00dev))
+ return 1;
+ return rt2800_efuse_detect(rt2x00dev);
+}
+
static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
int retval;
- if (rt2800_efuse_detect(rt2x00dev))
+ if (rt2800usb_efuse_detect(rt2x00dev))
retval = rt2800_read_eeprom_efuse(rt2x00dev);
else
retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index e3b885d8f7db..d13f25cd70d5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -693,6 +693,7 @@ enum rt2x00_capability_flags {
REQUIRE_SW_SEQNO,
REQUIRE_HT_TX_DESC,
REQUIRE_PS_AUTOWAKE,
+ REQUIRE_DELAYED_RFKILL,
/*
* Capabilities
@@ -1448,7 +1449,8 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 2bde6729f5e6..4fa43a2eeb73 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1126,9 +1126,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
return;
/*
- * Unregister extra components.
+ * Stop rfkill polling.
*/
- rt2x00rfkill_unregister(rt2x00dev);
+ if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+ rt2x00rfkill_unregister(rt2x00dev);
/*
* Allow the HW to uninitialize.
@@ -1166,6 +1167,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
+ /*
+ * Start rfkill polling.
+ */
+ if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+ rt2x00rfkill_register(rt2x00dev);
+
return 0;
}
@@ -1375,7 +1382,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
rt2x00link_register(rt2x00dev);
rt2x00leds_register(rt2x00dev);
rt2x00debug_register(rt2x00dev);
- rt2x00rfkill_register(rt2x00dev);
+
+ /*
+ * Start rfkill polling.
+ */
+ if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+ rt2x00rfkill_register(rt2x00dev);
return 0;
@@ -1391,6 +1403,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
/*
+ * Stop rfkill polling.
+ */
+ if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+ rt2x00rfkill_unregister(rt2x00dev);
+
+ /*
* Disable radio.
*/
rt2x00lib_disable_radio(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index a87ee9b6585a..004dff9b962d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -487,6 +487,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
crypto.cipher = rt2x00crypto_key_to_cipher(key);
if (crypto.cipher == CIPHER_NONE)
return -EOPNOTSUPP;
+ if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
+ return -EOPNOTSUPP;
crypto.cmd = cmd;
@@ -749,7 +751,8 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 10572452cc21..86c43d112a4b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -68,6 +68,12 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
}
}
+ /* If the port is powered down, we get a -EPROTO error, and this
+ * leads to a endless loop. So just say that the device is gone.
+ */
+ if (status == -EPROTO)
+ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+
rt2x00_err(rt2x00dev,
"Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
request, offset, status);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index e7bcf62347d5..831b65f93feb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -93,6 +93,7 @@ enum rt2x00usb_mode_offset {
USB_MODE_SLEEP = 7, /* RT73USB */
USB_MODE_FIRMWARE = 8, /* RT73USB */
USB_MODE_WAKEUP = 9, /* RT73USB */
+ USB_MODE_AUTORUN = 17, /* RT2800USB */
};
/**
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 24402984ee57..9048a9cbe52c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2031,13 +2031,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
static void rt61pci_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+ reg = orig_reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -2048,10 +2049,9 @@ static void rt61pci_clear_beacon(struct queue_entry *entry)
HW_BEACON_OFFSET(entry->entry_idx), 0);
/*
- * Enable beaconing again.
+ * Restore global beaconing state.
*/
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
+ rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
}
/*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a140170b1eb3..95724ff9c726 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1597,13 +1597,14 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
unsigned int beacon_base;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+ reg = orig_reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1614,10 +1615,9 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
/*
- * Enable beaconing again.
+ * Restore beaconing state.
*/
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
}
static int rt73usb_get_tx_data_len(struct queue_entry *entry)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
index 08b056db4a3b..21005bd8b43c 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -1,5 +1,5 @@
-rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
+rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
-obj-$(CONFIG_RTL8180) += rtl8180.o
+obj-$(CONFIG_RTL8180) += rtl818x_pci.o
ccflags-y += -Idrivers/net/wireless/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 98d8256f0377..2c1c02bafa10 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -284,6 +284,8 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.band = dev->conf.chandef.chan->band;
rx_status.mactime = tsft;
rx_status.flag |= RX_FLAG_MACTIME_START;
+ if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -461,18 +463,23 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
RTL818X_TX_DESC_FLAG_NO_ENC;
rc_flags = info->control.rates[0].flags;
+
+ /* HW will perform RTS-CTS when only RTS flags is set.
+ * HW will perform CTS-to-self when both RTS and CTS flags are set.
+ * RTS rate and RTS duration will be used also for CTS-to-self.
+ */
if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
+ rts_duration = ieee80211_rts_duration(dev, priv->vif,
+ skb->len, info);
} else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- tx_flags |= RTL818X_TX_DESC_FLAG_CTS;
+ tx_flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
+ rts_duration = ieee80211_ctstoself_duration(dev, priv->vif,
+ skb->len, info);
}
- if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
- rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
- info);
-
if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
unsigned int remainder;
@@ -683,9 +690,8 @@ static void rtl8180_int_enable(struct ieee80211_hw *dev)
struct rtl8180_priv *priv = dev->priv;
if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
- rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK |
- IMR_TBDER | IMR_THPDER |
- IMR_THPDER | IMR_THPDOK |
+ rtl818x_iowrite32(priv, &priv->map->IMR,
+ IMR_TBDER | IMR_TBDOK |
IMR_TVODER | IMR_TVODOK |
IMR_TVIDER | IMR_TVIDOK |
IMR_TBEDER | IMR_TBEDOK |
@@ -911,7 +917,10 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
reg32 &= 0x00ffff00;
reg32 |= 0xb8000054;
rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
- }
+ } else
+ /* stop unused queus (no dma alloc) */
+ rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+ (1<<1) | (1<<2));
priv->rf->init(dev);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 0ca17cda48fa..629ad8cfa17b 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -253,14 +253,21 @@ static void rtl8187_tx(struct ieee80211_hw *dev,
flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
if (ieee80211_has_morefrags(tx_hdr->frame_control))
flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
+
+ /* HW will perform RTS-CTS when only RTS flags is set.
+ * HW will perform CTS-to-self when both RTS and CTS flags are set.
+ * RTS rate and RTS duration will be used also for CTS-to-self.
+ */
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
flags |= RTL818X_TX_DESC_FLAG_RTS;
flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
rts_dur = ieee80211_rts_duration(dev, priv->vif,
skb->len, info);
} else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- flags |= RTL818X_TX_DESC_FLAG_CTS;
+ flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
+ rts_dur = ieee80211_ctstoself_duration(dev, priv->vif,
+ skb->len, info);
}
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -381,6 +388,8 @@ static void rtl8187_rx_cb(struct urb *urb)
rx_status.freq = dev->conf.chandef.chan->center_freq;
rx_status.band = dev->conf.chandef.chan->band;
rx_status.flag |= RX_FLAG_MACTIME_START;
+ if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 45ea4e1c4abe..7abef95d278b 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -334,9 +334,9 @@ struct rtl818x_csr {
* I don't like to introduce a ton of "reserved"..
* They are for RTL8187SE
*/
-#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + addr)
-#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + (addr >> 1))
-#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + (addr >> 2))
+#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + (addr))
+#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + ((addr) >> 1))
+#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + ((addr) >> 2))
#define FEMR_SE REG_ADDR2(0x1D4)
#define ARFR REG_ADDR2(0x1E0)
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 4ec424f26672..b1ed6d0796f6 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1387,7 +1387,8 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
* before switch channel or power save, or tx buffer packet
* maybe send after offchannel or rf sleep, this may cause
* dis-association by AP */
-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index 94cd9df98381..b14cf5a10f44 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -2515,23 +2515,3 @@ void rtl88ee_suspend(struct ieee80211_hw *hw)
void rtl88ee_resume(struct ieee80211_hw *hw)
{
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) /* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- else /* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config = 0x%08X, write_into_reg =%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
index b4460a41bd01..1850fde881b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
@@ -61,8 +61,6 @@ void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw);
void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw);
void rtl88ee_suspend(struct ieee80211_hw *hw);
void rtl88ee_resume(struct ieee80211_hw *hw);
-void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
void rtl88ee_fw_clk_off_timer_callback(unsigned long data);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index 1b4101bf9974..842d69349a37 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -93,7 +93,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
rtl8188ee_bt_reg_init(hw);
- rtlpci->msi_support = true;
+ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->dm.dm_flag = 0;
@@ -255,7 +255,6 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
.enable_hw_sec = rtl88ee_enable_hw_security_config,
.set_key = rtl88ee_set_key,
.init_sw_leds = rtl88ee_init_sw_leds,
- .allow_all_destaddr = rtl88ee_allow_all_destaddr,
.get_bbreg = rtl88e_phy_query_bb_reg,
.set_bbreg = rtl88e_phy_set_bb_reg,
.get_rfreg = rtl88e_phy_query_rf_reg,
@@ -267,6 +266,7 @@ static struct rtl_mod_params rtl88ee_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
+ .msi_support = false,
.debug = DBG_EMERG,
};
@@ -383,10 +383,12 @@ module_param_named(debug, rtl88ee_mod_params.debug, int, 0444);
module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl88ee_mod_params.msi_support, bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 55adf043aef7..cdecb0fd4d8e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -2423,24 +2423,3 @@ void rtl92ce_suspend(struct ieee80211_hw *hw)
void rtl92ce_resume(struct ieee80211_hw *hw)
{
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) {/* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- } else {/* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
- }
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config=0x%08X, write_into_reg=%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index 2d063b0c7760..5533070f266c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -76,7 +76,5 @@ void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw);
void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw);
void rtl92ce_suspend(struct ieee80211_hw *hw);
void rtl92ce_resume(struct ieee80211_hw *hw);
-void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index b790320d2030..12f21f4073e8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -229,7 +229,6 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
.enable_hw_sec = rtl92ce_enable_hw_security_config,
.set_key = rtl92ce_set_key,
.init_sw_leds = rtl92ce_init_sw_leds,
- .allow_all_destaddr = rtl92ce_allow_all_destaddr,
.get_bbreg = rtl92c_phy_query_bb_reg,
.set_bbreg = rtl92c_phy_set_bb_reg,
.set_rfreg = rtl92ce_phy_set_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 07cb06da6729..a903c2671b4d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -511,7 +511,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
pr_info("MAC auto ON okay!\n");
break;
}
- if (pollingCount++ > 100) {
+ if (pollingCount++ > 1000) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
"Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
return -ENODEV;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index c61311084d7e..361435f8608a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -395,9 +395,6 @@ static struct usb_driver rtl8192cu_driver = {
/* .resume = rtl_usb_resume, */
/* .reset_resume = rtl8192c_resume, */
#endif /* CONFIG_PM */
-#ifdef CONFIG_AUTOSUSPEND
- .supports_autosuspend = 1,
-#endif
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 9098558d916d..1c7101bcd790 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -2544,23 +2544,3 @@ void rtl92se_resume(struct ieee80211_hw *hw)
pci_write_config_dword(rtlpci->pdev, 0x40,
val & 0xffff00ff);
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) /* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- else /* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, RCR, rtlpci->receive_config);
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config=0x%08X, write_into_reg=%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
index da48aa8cbe6f..4cacee10f31e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
@@ -74,7 +74,5 @@ void rtl92se_set_key(struct ieee80211_hw *hw,
u8 enc_algo, bool is_wepkey, bool clear_all);
void rtl92se_suspend(struct ieee80211_hw *hw);
void rtl92se_resume(struct ieee80211_hw *hw);
-void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 2e8e6f8d2d51..1bff2a0f7600 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -290,7 +290,6 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
.enable_hw_sec = rtl92se_enable_hw_security_config,
.set_key = rtl92se_set_key,
.init_sw_leds = rtl92se_init_sw_leds,
- .allow_all_destaddr = rtl92se_allow_all_destaddr,
.get_bbreg = rtl92s_phy_query_bb_reg,
.set_bbreg = rtl92s_phy_set_bb_reg,
.get_rfreg = rtl92s_phy_query_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 48fee1be78c2..5b4a714f3c8c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -32,7 +32,6 @@
#include "dm.h"
#include "fw.h"
#include "../rtl8723com/fw_common.h"
-#include "../rtl8723com/fw_common.h"
#include "phy.h"
#include "reg.h"
#include "hal_btc.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 65c9e80e1f78..87f69166a7ed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -2383,24 +2383,3 @@ void rtl8723ae_suspend(struct ieee80211_hw *hw)
void rtl8723ae_resume(struct ieee80211_hw *hw)
{
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) /* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- else /* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config=0x%08X, write_into_reg=%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
index 6fa24f79b1d7..d3bc39fb27a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
@@ -67,7 +67,5 @@ void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw);
void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw);
void rtl8723ae_suspend(struct ieee80211_hw *hw);
void rtl8723ae_resume(struct ieee80211_hw *hw);
-void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 1087a3bd07fa..73cba1eec8cf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -238,7 +238,6 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
.enable_hw_sec = rtl8723ae_enable_hw_security_config,
.set_key = rtl8723ae_set_key,
.init_sw_leds = rtl8723ae_init_sw_leds,
- .allow_all_destaddr = rtl8723ae_allow_all_destaddr,
.get_bbreg = rtl8723_phy_query_bb_reg,
.set_bbreg = rtl8723_phy_set_bb_reg,
.get_rfreg = rtl8723ae_phy_query_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
index 0fdf0909321f..3d555495b453 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -2501,23 +2501,3 @@ void rtl8723be_suspend(struct ieee80211_hw *hw)
void rtl8723be_resume(struct ieee80211_hw *hw)
{
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
- bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) /* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- else /* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config = 0x%08X, write_into_reg =%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
index b7449a9b57e4..64c7551af6b7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
@@ -59,6 +59,4 @@ void rtl8723be_bt_reg_init(struct ieee80211_hw *hw);
void rtl8723be_bt_hw_init(struct ieee80211_hw *hw);
void rtl8723be_suspend(struct ieee80211_hw *hw);
void rtl8723be_resume(struct ieee80211_hw *hw);
-void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
- bool write_into_reg);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index b4577ebc4bb0..ff12bf41644b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -92,7 +92,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
rtl8723be_bt_reg_init(hw);
- rtlpci->msi_support = true;
+ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -253,6 +253,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
+ .msi_support = false,
.debug = DBG_EMERG,
};
@@ -365,9 +366,11 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
index e0a0d8c8fed5..969eaea5eddd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -33,7 +33,6 @@
#include "trx.h"
#include "led.h"
#include "dm.h"
-#include "phy.h"
static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
{
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 6965afdf572a..407a7936d364 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1960,8 +1960,6 @@ struct rtl_hal_ops {
u32 regaddr, u32 bitmask);
void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
u32 regaddr, u32 bitmask, u32 data);
- void (*allow_all_destaddr)(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
void (*linked_set_reg) (struct ieee80211_hw *hw);
void (*chk_switch_dmdp) (struct ieee80211_hw *hw);
void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
@@ -2030,6 +2028,10 @@ struct rtl_mod_params {
/* default: 1 = using linked fw power save */
bool fwctrl_lps;
+
+ /* default: 0 = not using MSI interrupts mode */
+ /* submodules should set their own defalut value */
+ bool msi_support;
};
struct rtl_hal_usbint_cfg {
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index 5a4ec56c83d0..5695628757ee 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -2,7 +2,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/crc7.h>
#include "wl1251.h"
#include "reg.h"
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index bf1fa18b9786..ede31f048ef9 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -2,7 +2,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/crc7.h>
#include <linux/etherdevice.h>
#include "wl1251.h"
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index db0105313745..c98630394a1a 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -124,11 +124,12 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
return ret;
}
- if (wl->vif && vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
+ if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
/* indicate to the stack, that beacons have been lost */
- ieee80211_beacon_loss(wl->vif);
+ if (wl->vif && wl->vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_beacon_loss(wl->vif);
}
if (vector & REGAINED_BSS_EVENT_ID) {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 757e25784a8a..4e782f18ae34 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -550,6 +550,34 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
+static int wl1251_build_null_data(struct wl1251 *wl)
+{
+ struct sk_buff *skb = NULL;
+ int size;
+ void *ptr;
+ int ret = -ENOMEM;
+
+ if (wl->bss_type == BSS_TYPE_IBSS) {
+ size = sizeof(struct wl12xx_null_data_template);
+ ptr = NULL;
+ } else {
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
+ size = skb->len;
+ ptr = skb->data;
+ }
+
+ ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, ptr, size);
+
+out:
+ dev_kfree_skb(skb);
+ if (ret)
+ wl1251_warning("cmd buld null data failed: %d", ret);
+
+ return ret;
+}
+
static int wl1251_build_qos_null_data(struct wl1251 *wl)
{
struct ieee80211_qos_hdr template;
@@ -687,16 +715,6 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
wl->power_level = conf->power_level;
}
- /*
- * Tell stack that connection is lost because hw encryption isn't
- * supported in monitor mode.
- * This requires temporary enabling of the hw connection monitor flag
- */
- if ((changed & IEEE80211_CONF_CHANGE_MONITOR) && wl->vif) {
- wl->hw->flags |= IEEE80211_HW_CONNECTION_MONITOR;
- ieee80211_connection_loss(wl->vif);
- }
-
out_sleep:
wl1251_ps_elp_sleep(wl);
@@ -1103,24 +1121,19 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
wl->rssi_thold = bss_conf->cqm_rssi_thold;
}
- if (changed & BSS_CHANGED_BSSID) {
+ if ((changed & BSS_CHANGED_BSSID) &&
+ memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
- if (!skb)
- goto out_sleep;
-
- ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
- skb->data, skb->len);
- dev_kfree_skb(skb);
- if (ret < 0)
- goto out_sleep;
+ if (!is_zero_ether_addr(wl->bssid)) {
+ ret = wl1251_build_null_data(wl);
+ if (ret < 0)
+ goto out_sleep;
- ret = wl1251_build_qos_null_data(wl);
- if (ret < 0)
- goto out;
+ ret = wl1251_build_qos_null_data(wl);
+ if (ret < 0)
+ goto out_sleep;
- if (wl->bss_type != BSS_TYPE_IBSS) {
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0)
@@ -1129,9 +1142,6 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- /* Disable temporary enabled hw connection monitor flag */
- wl->hw->flags &= ~IEEE80211_HW_CONNECTION_MONITOR;
-
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
@@ -1216,8 +1226,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
- wl->channel, wl->dtim_period);
+ ret = wl1251_join(wl, wl->bss_type, wl->channel,
+ wl->beacon_int, wl->dtim_period);
if (ret < 0)
goto out_sleep;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index b06d36d99362..a0aa8fa72392 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -23,6 +23,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/swab.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
@@ -83,47 +84,44 @@ static void wl1251_spi_reset(struct wl1251 *wl)
static void wl1251_spi_wake(struct wl1251 *wl)
{
- u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
struct spi_message m;
+ u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
- cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
wl1251_error("could not allocate cmd for spi init");
return;
}
- memset(crc, 0, sizeof(crc));
memset(&t, 0, sizeof(t));
spi_message_init(&m);
/* Set WSPI_INIT_COMMAND
* the data is being send from the MSB to LSB
*/
- cmd[2] = 0xff;
- cmd[3] = 0xff;
- cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
- cmd[0] = 0;
- cmd[7] = 0;
- cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
- cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+ cmd[0] = 0xff;
+ cmd[1] = 0xff;
+ cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
+ cmd[3] = 0;
+ cmd[4] = 0;
+ cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
+ cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+
+ cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
+ | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
- cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
+ cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
else
- cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
-
- cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
- | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
-
- crc[0] = cmd[1];
- crc[1] = cmd[0];
- crc[2] = cmd[7];
- crc[3] = cmd[6];
- crc[4] = cmd[5];
+ cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
- cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1;
- cmd[4] |= WSPI_INIT_CMD_END;
+ cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
+ /*
+ * The above is the logical order; it must actually be stored
+ * in the buffer byte-swapped.
+ */
+ __swab32s((u32 *)cmd);
+ __swab32s((u32 *)cmd+1);
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index f7381dd69009..0f2cfb0d2a9e 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -57,7 +57,7 @@ static const struct file_operations name## _ops = { \
wl, &name## _ops); \
if (!entry || IS_ERR(entry)) \
goto err; \
- } while (0);
+ } while (0)
#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
@@ -66,7 +66,7 @@ static const struct file_operations name## _ops = { \
wl, &prefix## _## name## _ops); \
if (!entry || IS_ERR(entry)) \
goto err; \
- } while (0);
+ } while (0)
#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
static ssize_t sub## _ ##name## _read(struct file *file, \
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e71eae353368..3d6028e62750 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1416,7 +1416,7 @@ void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
u16 offset, u8 flags,
- u8 *pattern, u8 len)
+ const u8 *pattern, u8 len)
{
struct wl12xx_rx_filter_field *field;
@@ -5184,7 +5184,8 @@ out:
mutex_unlock(&wl->mutex);
}
-static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct wl1271 *wl = hw->priv;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 29ef2492951f..d3dd7bfdf3f1 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct wlcore_platdev_data *pdev_data;
+ struct wlcore_platdev_data pdev_data;
struct wl12xx_sdio_glue *glue;
struct resource res[1];
mmc_pm_flag_t mmcflags;
@@ -228,16 +228,13 @@ static int wl1271_probe(struct sdio_func *func,
if (func->num != 0x02)
return -ENODEV;
- pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
- if (!pdev_data)
- goto out;
-
- pdev_data->if_ops = &sdio_ops;
+ memset(&pdev_data, 0x00, sizeof(pdev_data));
+ pdev_data.if_ops = &sdio_ops;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&func->dev, "can't allocate glue\n");
- goto out_free_pdev_data;
+ goto out;
}
glue->dev = &func->dev;
@@ -248,9 +245,9 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- pdev_data->pdata = wl12xx_get_platform_data();
- if (IS_ERR(pdev_data->pdata)) {
- ret = PTR_ERR(pdev_data->pdata);
+ pdev_data.pdata = wl12xx_get_platform_data();
+ if (IS_ERR(pdev_data.pdata)) {
+ ret = PTR_ERR(pdev_data.pdata);
dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
goto out_free_glue;
}
@@ -260,7 +257,7 @@ static int wl1271_probe(struct sdio_func *func,
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
- pdev_data->pdata->pwr_in_suspend = true;
+ pdev_data.pdata->pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
@@ -289,7 +286,7 @@ static int wl1271_probe(struct sdio_func *func,
memset(res, 0x00, sizeof(res));
- res[0].start = pdev_data->pdata->irq;
+ res[0].start = pdev_data.pdata->irq;
res[0].flags = IORESOURCE_IRQ;
res[0].name = "irq";
@@ -299,8 +296,8 @@ static int wl1271_probe(struct sdio_func *func,
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, pdev_data,
- sizeof(*pdev_data));
+ ret = platform_device_add_data(glue->core, &pdev_data,
+ sizeof(pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -319,9 +316,6 @@ out_dev_put:
out_free_glue:
kfree(glue);
-out_free_pdev_data:
- kfree(pdev_data);
-
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index dbe826dd7c23..392c882b28f0 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -24,11 +24,12 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include "wlcore.h"
#include "wl12xx_80211.h"
@@ -110,18 +111,16 @@ static void wl12xx_spi_reset(struct device *child)
static void wl12xx_spi_init(struct device *child)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
- u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
struct spi_message m;
+ u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
- cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
dev_err(child->parent,
"could not allocate cmd for spi init\n");
return;
}
- memset(crc, 0, sizeof(crc));
memset(&t, 0, sizeof(t));
spi_message_init(&m);
@@ -129,30 +128,29 @@ static void wl12xx_spi_init(struct device *child)
* Set WSPI_INIT_COMMAND
* the data is being send from the MSB to LSB
*/
- cmd[2] = 0xff;
- cmd[3] = 0xff;
- cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
- cmd[0] = 0;
- cmd[7] = 0;
- cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
- cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+ cmd[0] = 0xff;
+ cmd[1] = 0xff;
+ cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
+ cmd[3] = 0;
+ cmd[4] = 0;
+ cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
+ cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+
+ cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
+ | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
- cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
+ cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
else
- cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
-
- cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
- | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
-
- crc[0] = cmd[1];
- crc[1] = cmd[0];
- crc[2] = cmd[7];
- crc[3] = cmd[6];
- crc[4] = cmd[5];
+ cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
- cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1;
- cmd[4] |= WSPI_INIT_CMD_END;
+ cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
+ /*
+ * The above is the logical order; it must actually be stored
+ * in the buffer byte-swapped.
+ */
+ __swab32s((u32 *)cmd);
+ __swab32s((u32 *)cmd+1);
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
@@ -327,27 +325,25 @@ static struct wl1271_if_operations spi_ops = {
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
- struct wlcore_platdev_data *pdev_data;
+ struct wlcore_platdev_data pdev_data;
struct resource res[1];
int ret = -ENOMEM;
- pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
- if (!pdev_data)
- goto out;
+ memset(&pdev_data, 0x00, sizeof(pdev_data));
- pdev_data->pdata = dev_get_platdata(&spi->dev);
- if (!pdev_data->pdata) {
+ pdev_data.pdata = dev_get_platdata(&spi->dev);
+ if (!pdev_data.pdata) {
dev_err(&spi->dev, "no platform data\n");
ret = -ENODEV;
- goto out_free_pdev_data;
+ goto out;
}
- pdev_data->if_ops = &spi_ops;
+ pdev_data.if_ops = &spi_ops;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&spi->dev, "can't allocate glue\n");
- goto out_free_pdev_data;
+ goto out;
}
glue->dev = &spi->dev;
@@ -385,8 +381,8 @@ static int wl1271_probe(struct spi_device *spi)
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, pdev_data,
- sizeof(*pdev_data));
+ ret = platform_device_add_data(glue->core, &pdev_data,
+ sizeof(pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -406,9 +402,6 @@ out_dev_put:
out_free_glue:
kfree(glue);
-out_free_pdev_data:
- kfree(pdev_data);
-
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 756e890bc5ee..c2c34a84ff3d 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -512,8 +512,8 @@ int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
- u16 offset, u8 flags,
- u8 *pattern, u8 len);
+ u16 offset, u8 flags,
+ const u8 *pattern, u8 len);
void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter);
struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void);
int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 0d4a285cbd7e..2532ce85d718 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -99,22 +99,43 @@ struct xenvif_rx_meta {
*/
#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
-struct xenvif {
- /* Unique identifier for this interface. */
- domid_t domid;
- unsigned int handle;
+/* Queue name is interface name with "-qNNN" appended */
+#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
- /* Is this interface disabled? True when backend discovers
- * frontend is rogue.
+/* IRQ name is queue name with "-tx" or "-rx" appended */
+#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+
+struct xenvif;
+
+struct xenvif_stats {
+ /* Stats fields to be updated per-queue.
+ * A subset of struct net_device_stats that contains only the
+ * fields that are updated in netback.c for each queue.
*/
- bool disabled;
+ unsigned int rx_bytes;
+ unsigned int rx_packets;
+ unsigned int tx_bytes;
+ unsigned int tx_packets;
+
+ /* Additional stats used by xenvif */
+ unsigned long rx_gso_checksum_fixup;
+ unsigned long tx_zerocopy_sent;
+ unsigned long tx_zerocopy_success;
+ unsigned long tx_zerocopy_fail;
+ unsigned long tx_frag_overflow;
+};
+
+struct xenvif_queue { /* Per-queue data for xenvif */
+ unsigned int id; /* Queue ID, 0-based */
+ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
+ struct xenvif *vif; /* Parent VIF */
/* Use NAPI for guest TX */
struct napi_struct napi;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
unsigned int tx_irq;
/* Only used when feature-split-event-channels = 1 */
- char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
+ char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
struct xen_netif_tx_back_ring tx;
struct sk_buff_head tx_queue;
struct page *mmap_pages[MAX_PENDING_REQS];
@@ -150,7 +171,7 @@ struct xenvif {
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
unsigned int rx_irq;
/* Only used when feature-split-event-channels = 1 */
- char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
+ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
RING_IDX rx_last_skb_slots;
@@ -158,14 +179,29 @@ struct xenvif {
struct timer_list wake_queue;
- /* This array is allocated seperately as it is large */
- struct gnttab_copy *grant_copy_op;
+ struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
/* We create one meta structure per ring request we consume, so
* the maximum number is the same as the ring size.
*/
struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
+ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+ unsigned long credit_bytes;
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
+ u64 credit_window_start;
+
+ /* Statistics */
+ struct xenvif_stats stats;
+};
+
+struct xenvif {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+
u8 fe_dev_addr[6];
/* Frontend feature information. */
@@ -179,19 +215,14 @@ struct xenvif {
/* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */
- /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
- unsigned long credit_bytes;
- unsigned long credit_usec;
- unsigned long remaining_credit;
- struct timer_list credit_timeout;
- u64 credit_window_start;
+ /* Is this interface disabled? True when backend discovers
+ * frontend is rogue.
+ */
+ bool disabled;
- /* Statistics */
- unsigned long rx_gso_checksum_fixup;
- unsigned long tx_zerocopy_sent;
- unsigned long tx_zerocopy_success;
- unsigned long tx_zerocopy_fail;
- unsigned long tx_frag_overflow;
+ /* Queues */
+ struct xenvif_queue *queues;
+ unsigned int num_queues; /* active queues, resource allocated */
/* Miscellaneous private stuff. */
struct net_device *dev;
@@ -206,7 +237,10 @@ struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
unsigned int handle);
-int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
+int xenvif_init_queue(struct xenvif_queue *queue);
+void xenvif_deinit_queue(struct xenvif_queue *queue);
+
+int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn);
void xenvif_disconnect(struct xenvif *vif);
@@ -217,44 +251,47 @@ void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
-int xenvif_must_stop_queue(struct xenvif *vif);
+int xenvif_must_stop_queue(struct xenvif_queue *queue);
+
+int xenvif_queue_stopped(struct xenvif_queue *queue);
+void xenvif_wake_queue(struct xenvif_queue *queue);
/* (Un)Map communication rings. */
-void xenvif_unmap_frontend_rings(struct xenvif *vif);
-int xenvif_map_frontend_rings(struct xenvif *vif,
+void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
+int xenvif_map_frontend_rings(struct xenvif_queue *queue,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
-void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
-int xenvif_tx_action(struct xenvif *vif, int budget);
+int xenvif_tx_action(struct xenvif_queue *queue, int budget);
int xenvif_kthread_guest_rx(void *data);
-void xenvif_kick_thread(struct xenvif *vif);
+void xenvif_kick_thread(struct xenvif_queue *queue);
int xenvif_dealloc_kthread(void *data);
/* Determine whether the needed number of slots (req) are available,
* and set req_event if not.
*/
-bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
+bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
-void xenvif_stop_queue(struct xenvif *vif);
+void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
/* Unmap a pending page and release it back to the guest */
-void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx);
+void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
-static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
+static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
return MAX_PENDING_REQS -
- vif->pending_prod + vif->pending_cons;
+ queue->pending_prod + queue->pending_cons;
}
/* Callback from stack when TX packet can be released */
@@ -264,5 +301,6 @@ extern bool separate_tx_rx_irq;
extern unsigned int rx_drain_timeout_msecs;
extern unsigned int rx_drain_timeout_jiffies;
+extern unsigned int xenvif_max_queues;
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 20e9defa1060..9e97c7ca0ddd 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,16 @@
#define XENVIF_QUEUE_LENGTH 32
#define XENVIF_NAPI_WEIGHT 64
+static inline void xenvif_stop_queue(struct xenvif_queue *queue)
+{
+ struct net_device *dev = queue->vif->dev;
+
+ if (!queue->vif->can_queue)
+ return;
+
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+}
+
int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
@@ -50,33 +60,34 @@ int xenvif_schedulable(struct xenvif *vif)
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
- struct xenvif *vif = dev_id;
+ struct xenvif_queue *queue = dev_id;
- if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
- napi_schedule(&vif->napi);
+ if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
+ napi_schedule(&queue->napi);
return IRQ_HANDLED;
}
-static int xenvif_poll(struct napi_struct *napi, int budget)
+int xenvif_poll(struct napi_struct *napi, int budget)
{
- struct xenvif *vif = container_of(napi, struct xenvif, napi);
+ struct xenvif_queue *queue =
+ container_of(napi, struct xenvif_queue, napi);
int work_done;
/* This vif is rogue, we pretend we've there is nothing to do
* for this vif to deschedule it from NAPI. But this interface
* will be turned off in thread context later.
*/
- if (unlikely(vif->disabled)) {
+ if (unlikely(queue->vif->disabled)) {
napi_complete(napi);
return 0;
}
- work_done = xenvif_tx_action(vif, budget);
+ work_done = xenvif_tx_action(queue, budget);
if (work_done < budget) {
napi_complete(napi);
- xenvif_napi_schedule_or_enable_events(vif);
+ xenvif_napi_schedule_or_enable_events(queue);
}
return work_done;
@@ -84,9 +95,9 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
- struct xenvif *vif = dev_id;
+ struct xenvif_queue *queue = dev_id;
- xenvif_kick_thread(vif);
+ xenvif_kick_thread(queue);
return IRQ_HANDLED;
}
@@ -99,28 +110,59 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void xenvif_wake_queue(unsigned long data)
+int xenvif_queue_stopped(struct xenvif_queue *queue)
+{
+ struct net_device *dev = queue->vif->dev;
+ unsigned int id = queue->id;
+ return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
+}
+
+void xenvif_wake_queue(struct xenvif_queue *queue)
+{
+ struct net_device *dev = queue->vif->dev;
+ unsigned int id = queue->id;
+ netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
+}
+
+/* Callback to wake the queue and drain it on timeout */
+static void xenvif_wake_queue_callback(unsigned long data)
{
- struct xenvif *vif = (struct xenvif *)data;
+ struct xenvif_queue *queue = (struct xenvif_queue *)data;
- if (netif_queue_stopped(vif->dev)) {
- netdev_err(vif->dev, "draining TX queue\n");
- vif->rx_queue_purge = true;
- xenvif_kick_thread(vif);
- netif_wake_queue(vif->dev);
+ if (xenvif_queue_stopped(queue)) {
+ netdev_err(queue->vif->dev, "draining TX queue\n");
+ queue->rx_queue_purge = true;
+ xenvif_kick_thread(queue);
+ xenvif_wake_queue(queue);
}
}
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->num_queues;
+ u16 index;
int min_slots_needed;
BUG_ON(skb->dev != dev);
- /* Drop the packet if vif is not ready */
- if (vif->task == NULL ||
- vif->dealloc_task == NULL ||
+ /* Drop the packet if queues are not set up */
+ if (num_queues < 1)
+ goto drop;
+
+ /* Obtain the queue to be used to transmit this packet */
+ index = skb_get_queue_mapping(skb);
+ if (index >= num_queues) {
+ pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
+ index, vif->dev->name);
+ index %= num_queues;
+ }
+ queue = &vif->queues[index];
+
+ /* Drop the packet if queue is not ready */
+ if (queue->task == NULL ||
+ queue->dealloc_task == NULL ||
!xenvif_schedulable(vif))
goto drop;
@@ -139,16 +181,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
* then turn off the queue to give the ring a chance to
* drain.
*/
- if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
- vif->wake_queue.function = xenvif_wake_queue;
- vif->wake_queue.data = (unsigned long)vif;
- xenvif_stop_queue(vif);
- mod_timer(&vif->wake_queue,
+ if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
+ queue->wake_queue.function = xenvif_wake_queue_callback;
+ queue->wake_queue.data = (unsigned long)queue;
+ xenvif_stop_queue(queue);
+ mod_timer(&queue->wake_queue,
jiffies + rx_drain_timeout_jiffies);
}
- skb_queue_tail(&vif->rx_queue, skb);
- xenvif_kick_thread(vif);
+ skb_queue_tail(&queue->rx_queue, skb);
+ xenvif_kick_thread(queue);
return NETDEV_TX_OK;
@@ -161,25 +203,65 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->num_queues;
+ unsigned long rx_bytes = 0;
+ unsigned long rx_packets = 0;
+ unsigned long tx_bytes = 0;
+ unsigned long tx_packets = 0;
+ unsigned int index;
+
+ if (vif->queues == NULL)
+ goto out;
+
+ /* Aggregate tx and rx stats from each queue */
+ for (index = 0; index < num_queues; ++index) {
+ queue = &vif->queues[index];
+ rx_bytes += queue->stats.rx_bytes;
+ rx_packets += queue->stats.rx_packets;
+ tx_bytes += queue->stats.tx_bytes;
+ tx_packets += queue->stats.tx_packets;
+ }
+
+out:
+ vif->dev->stats.rx_bytes = rx_bytes;
+ vif->dev->stats.rx_packets = rx_packets;
+ vif->dev->stats.tx_bytes = tx_bytes;
+ vif->dev->stats.tx_packets = tx_packets;
+
return &vif->dev->stats;
}
static void xenvif_up(struct xenvif *vif)
{
- napi_enable(&vif->napi);
- enable_irq(vif->tx_irq);
- if (vif->tx_irq != vif->rx_irq)
- enable_irq(vif->rx_irq);
- xenvif_napi_schedule_or_enable_events(vif);
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->num_queues;
+ unsigned int queue_index;
+
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ queue = &vif->queues[queue_index];
+ napi_enable(&queue->napi);
+ enable_irq(queue->tx_irq);
+ if (queue->tx_irq != queue->rx_irq)
+ enable_irq(queue->rx_irq);
+ xenvif_napi_schedule_or_enable_events(queue);
+ }
}
static void xenvif_down(struct xenvif *vif)
{
- napi_disable(&vif->napi);
- disable_irq(vif->tx_irq);
- if (vif->tx_irq != vif->rx_irq)
- disable_irq(vif->rx_irq);
- del_timer_sync(&vif->credit_timeout);
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->num_queues;
+ unsigned int queue_index;
+
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ queue = &vif->queues[queue_index];
+ napi_disable(&queue->napi);
+ disable_irq(queue->tx_irq);
+ if (queue->tx_irq != queue->rx_irq)
+ disable_irq(queue->rx_irq);
+ del_timer_sync(&queue->credit_timeout);
+ }
}
static int xenvif_open(struct net_device *dev)
@@ -187,7 +269,7 @@ static int xenvif_open(struct net_device *dev)
struct xenvif *vif = netdev_priv(dev);
if (netif_carrier_ok(dev))
xenvif_up(vif);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -196,7 +278,7 @@ static int xenvif_close(struct net_device *dev)
struct xenvif *vif = netdev_priv(dev);
if (netif_carrier_ok(dev))
xenvif_down(vif);
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
return 0;
}
@@ -236,29 +318,29 @@ static const struct xenvif_stat {
} xenvif_stats[] = {
{
"rx_gso_checksum_fixup",
- offsetof(struct xenvif, rx_gso_checksum_fixup)
+ offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
},
/* If (sent != success + fail), there are probably packets never
* freed up properly!
*/
{
"tx_zerocopy_sent",
- offsetof(struct xenvif, tx_zerocopy_sent),
+ offsetof(struct xenvif_stats, tx_zerocopy_sent),
},
{
"tx_zerocopy_success",
- offsetof(struct xenvif, tx_zerocopy_success),
+ offsetof(struct xenvif_stats, tx_zerocopy_success),
},
{
"tx_zerocopy_fail",
- offsetof(struct xenvif, tx_zerocopy_fail)
+ offsetof(struct xenvif_stats, tx_zerocopy_fail)
},
/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
* a guest with the same MAX_SKB_FRAG
*/
{
"tx_frag_overflow",
- offsetof(struct xenvif, tx_frag_overflow)
+ offsetof(struct xenvif_stats, tx_frag_overflow)
},
};
@@ -275,11 +357,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set)
static void xenvif_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
- void *vif = netdev_priv(dev);
+ struct xenvif *vif = netdev_priv(dev);
+ unsigned int num_queues = vif->num_queues;
int i;
-
- for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
- data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
+ unsigned int queue_index;
+ struct xenvif_stats *vif_stats;
+
+ for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
+ unsigned long accum = 0;
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ vif_stats = &vif->queues[queue_index].stats;
+ accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
+ }
+ data[i] = accum;
+ }
}
static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -321,10 +412,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
struct net_device *dev;
struct xenvif *vif;
char name[IFNAMSIZ] = {};
- int i;
snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
- dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
+ /* Allocate a netdev with the max. supported number of queues.
+ * When the guest selects the desired number, it will be updated
+ * via netif_set_real_num_*_queues().
+ */
+ dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
+ xenvif_max_queues);
if (dev == NULL) {
pr_warn("Could not allocate netdev for %s\n", name);
return ERR_PTR(-ENOMEM);
@@ -334,66 +429,26 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif = netdev_priv(dev);
- vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
- MAX_GRANT_COPY_OPS);
- if (vif->grant_copy_op == NULL) {
- pr_warn("Could not allocate grant copy space for %s\n", name);
- free_netdev(dev);
- return ERR_PTR(-ENOMEM);
- }
-
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
vif->ip_csum = 1;
vif->dev = dev;
-
vif->disabled = false;
- vif->credit_bytes = vif->remaining_credit = ~0UL;
- vif->credit_usec = 0UL;
- init_timer(&vif->credit_timeout);
- vif->credit_window_start = get_jiffies_64();
-
- init_timer(&vif->wake_queue);
+ /* Start out with no queues. */
+ vif->queues = NULL;
+ vif->num_queues = 0;
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
dev->features = dev->hw_features | NETIF_F_RXCSUM;
- SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
+ dev->ethtool_ops = &xenvif_ethtool_ops;
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
- skb_queue_head_init(&vif->rx_queue);
- skb_queue_head_init(&vif->tx_queue);
-
- vif->pending_cons = 0;
- vif->pending_prod = MAX_PENDING_REQS;
- for (i = 0; i < MAX_PENDING_REQS; i++)
- vif->pending_ring[i] = i;
- spin_lock_init(&vif->callback_lock);
- spin_lock_init(&vif->response_lock);
- /* If ballooning is disabled, this will consume real memory, so you
- * better enable it. The long term solution would be to use just a
- * bunch of valid page descriptors, without dependency on ballooning
- */
- err = alloc_xenballooned_pages(MAX_PENDING_REQS,
- vif->mmap_pages,
- false);
- if (err) {
- netdev_err(dev, "Could not reserve mmap_pages\n");
- return ERR_PTR(-ENOMEM);
- }
- for (i = 0; i < MAX_PENDING_REQS; i++) {
- vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
- { .callback = xenvif_zerocopy_callback,
- .ctx = NULL,
- .desc = i };
- vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
- }
-
/*
* Initialise a dummy MAC address. We choose the numerically
* largest non-broadcast address to prevent the address getting
@@ -403,8 +458,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
memset(dev->dev_addr, 0xFF, ETH_ALEN);
dev->dev_addr[0] &= ~0x01;
- netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
-
netif_carrier_off(dev);
err = register_netdev(dev);
@@ -421,98 +474,147 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
return vif;
}
-int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
+int xenvif_init_queue(struct xenvif_queue *queue)
+{
+ int err, i;
+
+ queue->credit_bytes = queue->remaining_credit = ~0UL;
+ queue->credit_usec = 0UL;
+ init_timer(&queue->credit_timeout);
+ queue->credit_window_start = get_jiffies_64();
+
+ skb_queue_head_init(&queue->rx_queue);
+ skb_queue_head_init(&queue->tx_queue);
+
+ queue->pending_cons = 0;
+ queue->pending_prod = MAX_PENDING_REQS;
+ for (i = 0; i < MAX_PENDING_REQS; ++i)
+ queue->pending_ring[i] = i;
+
+ spin_lock_init(&queue->callback_lock);
+ spin_lock_init(&queue->response_lock);
+
+ /* If ballooning is disabled, this will consume real memory, so you
+ * better enable it. The long term solution would be to use just a
+ * bunch of valid page descriptors, without dependency on ballooning
+ */
+ err = alloc_xenballooned_pages(MAX_PENDING_REQS,
+ queue->mmap_pages,
+ false);
+ if (err) {
+ netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MAX_PENDING_REQS; i++) {
+ queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
+ { .callback = xenvif_zerocopy_callback,
+ .ctx = NULL,
+ .desc = i };
+ queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
+ }
+
+ init_timer(&queue->wake_queue);
+
+ netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
+ XENVIF_NAPI_WEIGHT);
+
+ return 0;
+}
+
+void xenvif_carrier_on(struct xenvif *vif)
+{
+ rtnl_lock();
+ if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
+ dev_set_mtu(vif->dev, ETH_DATA_LEN);
+ netdev_update_features(vif->dev);
+ netif_carrier_on(vif->dev);
+ if (netif_running(vif->dev))
+ xenvif_up(vif);
+ rtnl_unlock();
+}
+
+int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn)
{
struct task_struct *task;
int err = -ENOMEM;
- BUG_ON(vif->tx_irq);
- BUG_ON(vif->task);
- BUG_ON(vif->dealloc_task);
+ BUG_ON(queue->tx_irq);
+ BUG_ON(queue->task);
+ BUG_ON(queue->dealloc_task);
- err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
+ err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
- init_waitqueue_head(&vif->wq);
- init_waitqueue_head(&vif->dealloc_wq);
+ init_waitqueue_head(&queue->wq);
+ init_waitqueue_head(&queue->dealloc_wq);
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
- vif->domid, tx_evtchn, xenvif_interrupt, 0,
- vif->dev->name, vif);
+ queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
+ queue->name, queue);
if (err < 0)
goto err_unmap;
- vif->tx_irq = vif->rx_irq = err;
- disable_irq(vif->tx_irq);
+ queue->tx_irq = queue->rx_irq = err;
+ disable_irq(queue->tx_irq);
} else {
/* feature-split-event-channels == 1 */
- snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
- "%s-tx", vif->dev->name);
+ snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
+ "%s-tx", queue->name);
err = bind_interdomain_evtchn_to_irqhandler(
- vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
- vif->tx_irq_name, vif);
+ queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
+ queue->tx_irq_name, queue);
if (err < 0)
goto err_unmap;
- vif->tx_irq = err;
- disable_irq(vif->tx_irq);
+ queue->tx_irq = err;
+ disable_irq(queue->tx_irq);
- snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
- "%s-rx", vif->dev->name);
+ snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
+ "%s-rx", queue->name);
err = bind_interdomain_evtchn_to_irqhandler(
- vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
- vif->rx_irq_name, vif);
+ queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
+ queue->rx_irq_name, queue);
if (err < 0)
goto err_tx_unbind;
- vif->rx_irq = err;
- disable_irq(vif->rx_irq);
+ queue->rx_irq = err;
+ disable_irq(queue->rx_irq);
}
task = kthread_create(xenvif_kthread_guest_rx,
- (void *)vif, "%s-guest-rx", vif->dev->name);
+ (void *)queue, "%s-guest-rx", queue->name);
if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
+ pr_warn("Could not allocate kthread for %s\n", queue->name);
err = PTR_ERR(task);
goto err_rx_unbind;
}
-
- vif->task = task;
+ queue->task = task;
task = kthread_create(xenvif_dealloc_kthread,
- (void *)vif, "%s-dealloc", vif->dev->name);
+ (void *)queue, "%s-dealloc", queue->name);
if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
+ pr_warn("Could not allocate kthread for %s\n", queue->name);
err = PTR_ERR(task);
goto err_rx_unbind;
}
+ queue->dealloc_task = task;
- vif->dealloc_task = task;
-
- rtnl_lock();
- if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
- dev_set_mtu(vif->dev, ETH_DATA_LEN);
- netdev_update_features(vif->dev);
- netif_carrier_on(vif->dev);
- if (netif_running(vif->dev))
- xenvif_up(vif);
- rtnl_unlock();
-
- wake_up_process(vif->task);
- wake_up_process(vif->dealloc_task);
+ wake_up_process(queue->task);
+ wake_up_process(queue->dealloc_task);
return 0;
err_rx_unbind:
- unbind_from_irqhandler(vif->rx_irq, vif);
- vif->rx_irq = 0;
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ queue->rx_irq = 0;
err_tx_unbind:
- unbind_from_irqhandler(vif->tx_irq, vif);
- vif->tx_irq = 0;
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ queue->tx_irq = 0;
err_unmap:
- xenvif_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(queue);
err:
module_put(THIS_MODULE);
return err;
@@ -529,38 +631,77 @@ void xenvif_carrier_off(struct xenvif *vif)
rtnl_unlock();
}
+static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
+ unsigned int worst_case_skb_lifetime)
+{
+ int i, unmap_timeout = 0;
+
+ for (i = 0; i < MAX_PENDING_REQS; ++i) {
+ if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
+ unmap_timeout++;
+ schedule_timeout(msecs_to_jiffies(1000));
+ if (unmap_timeout > worst_case_skb_lifetime &&
+ net_ratelimit())
+ netdev_err(queue->vif->dev,
+ "Page still granted! Index: %x\n",
+ i);
+ i = -1;
+ }
+ }
+}
+
void xenvif_disconnect(struct xenvif *vif)
{
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->num_queues;
+ unsigned int queue_index;
+
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
- if (vif->task) {
- del_timer_sync(&vif->wake_queue);
- kthread_stop(vif->task);
- vif->task = NULL;
- }
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ queue = &vif->queues[queue_index];
- if (vif->dealloc_task) {
- kthread_stop(vif->dealloc_task);
- vif->dealloc_task = NULL;
- }
+ if (queue->task) {
+ del_timer_sync(&queue->wake_queue);
+ kthread_stop(queue->task);
+ queue->task = NULL;
+ }
- if (vif->tx_irq) {
- if (vif->tx_irq == vif->rx_irq)
- unbind_from_irqhandler(vif->tx_irq, vif);
- else {
- unbind_from_irqhandler(vif->tx_irq, vif);
- unbind_from_irqhandler(vif->rx_irq, vif);
+ if (queue->dealloc_task) {
+ kthread_stop(queue->dealloc_task);
+ queue->dealloc_task = NULL;
}
- vif->tx_irq = 0;
+
+ if (queue->tx_irq) {
+ if (queue->tx_irq == queue->rx_irq)
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ else {
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ }
+ queue->tx_irq = 0;
+ }
+
+ xenvif_unmap_frontend_rings(queue);
}
+}
- xenvif_unmap_frontend_rings(vif);
+/* Reverse the relevant parts of xenvif_init_queue().
+ * Used for queue teardown from xenvif_free(), and on the
+ * error handling paths in xenbus.c:connect().
+ */
+void xenvif_deinit_queue(struct xenvif_queue *queue)
+{
+ free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
+ netif_napi_del(&queue->napi);
}
void xenvif_free(struct xenvif *vif)
{
- int i, unmap_timeout = 0;
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->num_queues;
+ unsigned int queue_index;
/* Here we want to avoid timeout messages if an skb can be legitimately
* stuck somewhere else. Realistically this could be an another vif's
* internal or QDisc queue. That another vif also has this
@@ -575,33 +716,18 @@ void xenvif_free(struct xenvif *vif)
unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
- for (i = 0; i < MAX_PENDING_REQS; ++i) {
- if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
- unmap_timeout++;
- schedule_timeout(msecs_to_jiffies(1000));
- if (unmap_timeout > worst_case_skb_lifetime &&
- net_ratelimit())
- netdev_err(vif->dev,
- "Page still granted! Index: %x\n",
- i);
- /* If there are still unmapped pages, reset the loop to
- * start checking again. We shouldn't exit here until
- * dealloc thread and NAPI instance release all the
- * pages. If a kernel bug causes the skbs to stall
- * somewhere, the interface cannot be brought down
- * properly.
- */
- i = -1;
- }
- }
-
- free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
+ unregister_netdev(vif->dev);
- netif_napi_del(&vif->napi);
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ queue = &vif->queues[queue_index];
+ xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
+ xenvif_deinit_queue(queue);
+ }
- unregister_netdev(vif->dev);
+ vfree(vif->queues);
+ vif->queues = NULL;
+ vif->num_queues = 0;
- vfree(vif->grant_copy_op);
free_netdev(vif->dev);
module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7367208ee8cd..1844a47636b6 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -62,6 +62,11 @@ unsigned int rx_drain_timeout_msecs = 10000;
module_param(rx_drain_timeout_msecs, uint, 0444);
unsigned int rx_drain_timeout_jiffies;
+unsigned int xenvif_max_queues;
+module_param_named(max_queues, xenvif_max_queues, uint, 0644);
+MODULE_PARM_DESC(max_queues,
+ "Maximum number of queues per virtual interface");
+
/*
* This is the maximum slots a skb can have. If a guest sends a skb
* which exceeds this limit it is considered malicious.
@@ -70,33 +75,33 @@ unsigned int rx_drain_timeout_jiffies;
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);
-static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status);
-static void make_tx_response(struct xenvif *vif,
+static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
s8 st);
-static inline int tx_work_todo(struct xenvif *vif);
-static inline int rx_work_todo(struct xenvif *vif);
+static inline int tx_work_todo(struct xenvif_queue *queue);
+static inline int rx_work_todo(struct xenvif_queue *queue);
-static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
+static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
u16 id,
s8 st,
u16 offset,
u16 size,
u16 flags);
-static inline unsigned long idx_to_pfn(struct xenvif *vif,
+static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
u16 idx)
{
- return page_to_pfn(vif->mmap_pages[idx]);
+ return page_to_pfn(queue->mmap_pages[idx]);
}
-static inline unsigned long idx_to_kaddr(struct xenvif *vif,
+static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
u16 idx)
{
- return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
+ return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
}
#define callback_param(vif, pending_idx) \
@@ -104,13 +109,13 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
-static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
+static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
{
u16 pending_idx = ubuf->desc;
struct pending_tx_info *temp =
container_of(ubuf, struct pending_tx_info, callback_struct);
return container_of(temp - pending_idx,
- struct xenvif,
+ struct xenvif_queue,
pending_tx_info[0]);
}
@@ -136,24 +141,24 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
+bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
{
RING_IDX prod, cons;
do {
- prod = vif->rx.sring->req_prod;
- cons = vif->rx.req_cons;
+ prod = queue->rx.sring->req_prod;
+ cons = queue->rx.req_cons;
if (prod - cons >= needed)
return true;
- vif->rx.sring->req_event = prod + 1;
+ queue->rx.sring->req_event = prod + 1;
/* Make sure event is visible before we check prod
* again.
*/
mb();
- } while (vif->rx.sring->req_prod != prod);
+ } while (queue->rx.sring->req_prod != prod);
return false;
}
@@ -163,7 +168,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
* adding 'size' bytes to a buffer which currently contains 'offset'
* bytes.
*/
-static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+static bool start_new_rx_buffer(int offset, unsigned long size, int head,
+ bool full_coalesce)
{
/* simple case: we have completely filled the current buffer. */
if (offset == MAX_BUFFER_OFFSET)
@@ -175,6 +181,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* (i) this frag would fit completely in the next buffer
* and (ii) there is already some data in the current buffer
* and (iii) this is not the head buffer.
+ * and (iv) there is no need to fully utilize the buffers
*
* Where:
* - (i) stops us splitting a frag into two copies
@@ -185,6 +192,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* by (ii) but is explicitly checked because
* netfront relies on the first buffer being
* non-empty and can crash otherwise.
+ * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
+ * slot
*
* This means we will effectively linearise small
* frags but do not needlessly split large buffers
@@ -192,7 +201,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* own buffers as before.
*/
BUG_ON(size > MAX_BUFFER_OFFSET);
- if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
+ !full_coalesce)
return true;
return false;
@@ -207,13 +217,13 @@ struct netrx_pending_operations {
grant_ref_t copy_gref;
};
-static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
+static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
struct netrx_pending_operations *npo)
{
struct xenvif_rx_meta *meta;
struct xen_netif_rx_request *req;
- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
@@ -227,15 +237,22 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
return meta;
}
+struct xenvif_rx_cb {
+ int meta_slots_used;
+ bool full_coalesce;
+};
+
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
/*
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
*/
-static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
unsigned long offset, int *head,
- struct xenvif *foreign_vif,
+ struct xenvif_queue *foreign_queue,
grant_ref_t foreign_gref)
{
struct gnttab_copy *copy_gop;
@@ -261,14 +278,17 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
if (bytes > size)
bytes = size;
- if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
+ if (start_new_rx_buffer(npo->copy_off,
+ bytes,
+ *head,
+ XENVIF_RX_CB(skb)->full_coalesce)) {
/*
* Netfront requires there to be some data in the head
* buffer.
*/
BUG_ON(*head);
- meta = get_next_rx_buffer(vif, npo);
+ meta = get_next_rx_buffer(queue, npo);
}
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
@@ -278,8 +298,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes;
- if (foreign_vif) {
- copy_gop->source.domid = foreign_vif->domid;
+ if (foreign_queue) {
+ copy_gop->source.domid = foreign_queue->vif->domid;
copy_gop->source.u.ref = foreign_gref;
copy_gop->flags |= GNTCOPY_source_gref;
} else {
@@ -289,7 +309,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
copy_gop->source.offset = offset;
- copy_gop->dest.domid = vif->domid;
+ copy_gop->dest.domid = queue->vif->domid;
copy_gop->dest.offset = npo->copy_off;
copy_gop->dest.u.ref = npo->copy_gref;
@@ -314,8 +334,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
- if (*head && ((1 << gso_type) & vif->gso_mask))
- vif->rx.req_cons++;
+ if (*head && ((1 << gso_type) & queue->vif->gso_mask))
+ queue->rx.req_cons++;
*head = 0; /* There must be something in this buffer now. */
@@ -337,13 +357,13 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
const int i,
const struct ubuf_info *ubuf)
{
- struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+ struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
do {
u16 pending_idx = ubuf->desc;
if (skb_shinfo(skb)->frags[i].page.p ==
- foreign_vif->mmap_pages[pending_idx])
+ foreign_queue->mmap_pages[pending_idx])
break;
ubuf = (struct ubuf_info *) ubuf->ctx;
} while (ubuf);
@@ -364,7 +384,8 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
* frontend-side LRO).
*/
static int xenvif_gop_skb(struct sk_buff *skb,
- struct netrx_pending_operations *npo)
+ struct netrx_pending_operations *npo,
+ struct xenvif_queue *queue)
{
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -390,7 +411,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
/* Set up a GSO prefix descriptor, if necessary */
if ((1 << gso_type) & vif->gso_prefix_mask) {
- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
meta->gso_size = skb_shinfo(skb)->gso_size;
@@ -398,7 +419,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
meta->id = req->id;
}
- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
if ((1 << gso_type) & vif->gso_mask) {
@@ -422,7 +443,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data;
- xenvif_gop_frag_copy(vif, skb, npo,
+ xenvif_gop_frag_copy(queue, skb, npo,
virt_to_page(data), len, offset, &head,
NULL,
0);
@@ -433,7 +454,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
/* This variable also signals whether foreign_gref has a real
* value or not.
*/
- struct xenvif *foreign_vif = NULL;
+ struct xenvif_queue *foreign_queue = NULL;
grant_ref_t foreign_gref;
if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
@@ -458,8 +479,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
if (likely(ubuf)) {
u16 pending_idx = ubuf->desc;
- foreign_vif = ubuf_to_vif(ubuf);
- foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+ foreign_queue = ubuf_to_queue(ubuf);
+ foreign_gref =
+ foreign_queue->pending_tx_info[pending_idx].req.gref;
/* Just a safety measure. If this was the last
* element on the list, the for loop will
* iterate again if a local page were added to
@@ -477,13 +499,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
*/
ubuf = head_ubuf;
}
- xenvif_gop_frag_copy(vif, skb, npo,
+ xenvif_gop_frag_copy(queue, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head,
- foreign_vif,
- foreign_vif ? foreign_gref : UINT_MAX);
+ foreign_queue,
+ foreign_queue ? foreign_gref : UINT_MAX);
}
return npo->meta_prod - old_meta_prod;
@@ -515,7 +537,7 @@ static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
return status;
}
-static void xenvif_add_frag_responses(struct xenvif *vif, int status,
+static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
struct xenvif_rx_meta *meta,
int nr_meta_slots)
{
@@ -536,23 +558,17 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
flags = XEN_NETRXF_more_data;
offset = 0;
- make_rx_response(vif, meta[i].id, status, offset,
+ make_rx_response(queue, meta[i].id, status, offset,
meta[i].size, flags);
}
}
-struct xenvif_rx_cb {
- int meta_slots_used;
-};
-
-#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
-
-void xenvif_kick_thread(struct xenvif *vif)
+void xenvif_kick_thread(struct xenvif_queue *queue)
{
- wake_up(&vif->wq);
+ wake_up(&queue->wq);
}
-static void xenvif_rx_action(struct xenvif *vif)
+static void xenvif_rx_action(struct xenvif_queue *queue)
{
s8 status;
u16 flags;
@@ -565,13 +581,13 @@ static void xenvif_rx_action(struct xenvif *vif)
bool need_to_notify = false;
struct netrx_pending_operations npo = {
- .copy = vif->grant_copy_op,
- .meta = vif->meta,
+ .copy = queue->grant_copy_op,
+ .meta = queue->meta,
};
skb_queue_head_init(&rxq);
- while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
+ while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
RING_IDX max_slots_needed;
RING_IDX old_req_cons;
RING_IDX ring_slots_used;
@@ -602,10 +618,15 @@ static void xenvif_rx_action(struct xenvif *vif)
/* To avoid the estimate becoming too pessimal for some
* frontends that limit posted rx requests, cap the estimate
- * at MAX_SKB_FRAGS.
+ * at MAX_SKB_FRAGS. In this case netback will fully coalesce
+ * the skb into the provided slots.
*/
- if (max_slots_needed > MAX_SKB_FRAGS)
+ if (max_slots_needed > MAX_SKB_FRAGS) {
max_slots_needed = MAX_SKB_FRAGS;
+ XENVIF_RX_CB(skb)->full_coalesce = true;
+ } else {
+ XENVIF_RX_CB(skb)->full_coalesce = false;
+ }
/* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) &&
@@ -614,42 +635,42 @@ static void xenvif_rx_action(struct xenvif *vif)
max_slots_needed++;
/* If the skb may not fit then bail out now */
- if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
- skb_queue_head(&vif->rx_queue, skb);
+ if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
+ skb_queue_head(&queue->rx_queue, skb);
need_to_notify = true;
- vif->rx_last_skb_slots = max_slots_needed;
+ queue->rx_last_skb_slots = max_slots_needed;
break;
} else
- vif->rx_last_skb_slots = 0;
+ queue->rx_last_skb_slots = 0;
- old_req_cons = vif->rx.req_cons;
- XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
- ring_slots_used = vif->rx.req_cons - old_req_cons;
+ old_req_cons = queue->rx.req_cons;
+ XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
+ ring_slots_used = queue->rx.req_cons - old_req_cons;
BUG_ON(ring_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
}
- BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
+ BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
if (!npo.copy_prod)
goto done;
BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
- gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
+ gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
- if ((1 << vif->meta[npo.meta_cons].gso_type) &
- vif->gso_prefix_mask) {
- resp = RING_GET_RESPONSE(&vif->rx,
- vif->rx.rsp_prod_pvt++);
+ if ((1 << queue->meta[npo.meta_cons].gso_type) &
+ queue->vif->gso_prefix_mask) {
+ resp = RING_GET_RESPONSE(&queue->rx,
+ queue->rx.rsp_prod_pvt++);
resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
- resp->offset = vif->meta[npo.meta_cons].gso_size;
- resp->id = vif->meta[npo.meta_cons].id;
+ resp->offset = queue->meta[npo.meta_cons].gso_size;
+ resp->id = queue->meta[npo.meta_cons].id;
resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
npo.meta_cons++;
@@ -657,10 +678,10 @@ static void xenvif_rx_action(struct xenvif *vif)
}
- vif->dev->stats.tx_bytes += skb->len;
- vif->dev->stats.tx_packets++;
+ queue->stats.tx_bytes += skb->len;
+ queue->stats.tx_packets++;
- status = xenvif_check_gop(vif,
+ status = xenvif_check_gop(queue->vif,
XENVIF_RX_CB(skb)->meta_slots_used,
&npo);
@@ -676,22 +697,22 @@ static void xenvif_rx_action(struct xenvif *vif)
flags |= XEN_NETRXF_data_validated;
offset = 0;
- resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
+ resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
status, offset,
- vif->meta[npo.meta_cons].size,
+ queue->meta[npo.meta_cons].size,
flags);
- if ((1 << vif->meta[npo.meta_cons].gso_type) &
- vif->gso_mask) {
+ if ((1 << queue->meta[npo.meta_cons].gso_type) &
+ queue->vif->gso_mask) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
- RING_GET_RESPONSE(&vif->rx,
- vif->rx.rsp_prod_pvt++);
+ RING_GET_RESPONSE(&queue->rx,
+ queue->rx.rsp_prod_pvt++);
resp->flags |= XEN_NETRXF_extra_info;
- gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
- gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
+ gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+ gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
@@ -699,11 +720,11 @@ static void xenvif_rx_action(struct xenvif *vif)
gso->flags = 0;
}
- xenvif_add_frag_responses(vif, status,
- vif->meta + npo.meta_cons + 1,
+ xenvif_add_frag_responses(queue, status,
+ queue->meta + npo.meta_cons + 1,
XENVIF_RX_CB(skb)->meta_slots_used);
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
need_to_notify |= !!ret;
@@ -713,20 +734,20 @@ static void xenvif_rx_action(struct xenvif *vif)
done:
if (need_to_notify)
- notify_remote_via_irq(vif->rx_irq);
+ notify_remote_via_irq(queue->rx_irq);
}
-void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
{
int more_to_do;
- RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
+ RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
if (more_to_do)
- napi_schedule(&vif->napi);
+ napi_schedule(&queue->napi);
}
-static void tx_add_credit(struct xenvif *vif)
+static void tx_add_credit(struct xenvif_queue *queue)
{
unsigned long max_burst, max_credit;
@@ -734,55 +755,57 @@ static void tx_add_credit(struct xenvif *vif)
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
* Otherwise the interface can seize up due to insufficient credit.
*/
- max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
+ max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
max_burst = min(max_burst, 131072UL);
- max_burst = max(max_burst, vif->credit_bytes);
+ max_burst = max(max_burst, queue->credit_bytes);
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
- max_credit = vif->remaining_credit + vif->credit_bytes;
- if (max_credit < vif->remaining_credit)
+ max_credit = queue->remaining_credit + queue->credit_bytes;
+ if (max_credit < queue->remaining_credit)
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
- vif->remaining_credit = min(max_credit, max_burst);
+ queue->remaining_credit = min(max_credit, max_burst);
}
static void tx_credit_callback(unsigned long data)
{
- struct xenvif *vif = (struct xenvif *)data;
- tx_add_credit(vif);
- xenvif_napi_schedule_or_enable_events(vif);
+ struct xenvif_queue *queue = (struct xenvif_queue *)data;
+ tx_add_credit(queue);
+ xenvif_napi_schedule_or_enable_events(queue);
}
-static void xenvif_tx_err(struct xenvif *vif,
+static void xenvif_tx_err(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp, RING_IDX end)
{
- RING_IDX cons = vif->tx.req_cons;
+ RING_IDX cons = queue->tx.req_cons;
unsigned long flags;
do {
- spin_lock_irqsave(&vif->response_lock, flags);
- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- spin_unlock_irqrestore(&vif->response_lock, flags);
+ spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
- txp = RING_GET_REQUEST(&vif->tx, cons++);
+ txp = RING_GET_REQUEST(&queue->tx, cons++);
} while (1);
- vif->tx.req_cons = cons;
+ queue->tx.req_cons = cons;
}
static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
vif->disabled = true;
- xenvif_kick_thread(vif);
+ /* Disable the vif from queue 0's kthread */
+ if (vif->queues)
+ xenvif_kick_thread(&vif->queues[0]);
}
-static int xenvif_count_requests(struct xenvif *vif,
+static int xenvif_count_requests(struct xenvif_queue *queue,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp,
int work_to_do)
{
- RING_IDX cons = vif->tx.req_cons;
+ RING_IDX cons = queue->tx.req_cons;
int slots = 0;
int drop_err = 0;
int more_data;
@@ -794,10 +817,10 @@ static int xenvif_count_requests(struct xenvif *vif,
struct xen_netif_tx_request dropped_tx = { 0 };
if (slots >= work_to_do) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Asked for %d slots but exceeds this limit\n",
work_to_do);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
return -ENODATA;
}
@@ -805,10 +828,10 @@ static int xenvif_count_requests(struct xenvif *vif,
* considered malicious.
*/
if (unlikely(slots >= fatal_skb_slots)) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Malicious frontend using %d slots, threshold %u\n",
slots, fatal_skb_slots);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
return -E2BIG;
}
@@ -821,7 +844,7 @@ static int xenvif_count_requests(struct xenvif *vif,
*/
if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
if (net_ratelimit())
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Too many slots (%d) exceeding limit (%d), dropping packet\n",
slots, XEN_NETBK_LEGACY_SLOTS_MAX);
drop_err = -E2BIG;
@@ -830,7 +853,7 @@ static int xenvif_count_requests(struct xenvif *vif,
if (drop_err)
txp = &dropped_tx;
- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
+ memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
sizeof(*txp));
/* If the guest submitted a frame >= 64 KiB then
@@ -844,7 +867,7 @@ static int xenvif_count_requests(struct xenvif *vif,
*/
if (!drop_err && txp->size > first->size) {
if (net_ratelimit())
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Invalid tx request, slot size %u > remaining size %u\n",
txp->size, first->size);
drop_err = -EIO;
@@ -854,9 +877,9 @@ static int xenvif_count_requests(struct xenvif *vif,
slots++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
+ netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
txp->offset, txp->size);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
return -EINVAL;
}
@@ -868,7 +891,7 @@ static int xenvif_count_requests(struct xenvif *vif,
} while (more_data);
if (drop_err) {
- xenvif_tx_err(vif, first, cons + slots);
+ xenvif_tx_err(queue, first, cons + slots);
return drop_err;
}
@@ -882,17 +905,17 @@ struct xenvif_tx_cb {
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
-static inline void xenvif_tx_create_map_op(struct xenvif *vif,
+static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
u16 pending_idx,
struct xen_netif_tx_request *txp,
struct gnttab_map_grant_ref *mop)
{
- vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
- gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx),
+ queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
+ gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
GNTMAP_host_map | GNTMAP_readonly,
- txp->gref, vif->domid);
+ txp->gref, queue->vif->domid);
- memcpy(&vif->pending_tx_info[pending_idx].req, txp,
+ memcpy(&queue->pending_tx_info[pending_idx].req, txp,
sizeof(*txp));
}
@@ -913,7 +936,7 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
return skb;
}
-static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
+static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
struct gnttab_map_grant_ref *gop)
@@ -940,9 +963,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
- index = pending_index(vif->pending_cons++);
- pending_idx = vif->pending_ring[index];
- xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+ xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
}
@@ -950,7 +973,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
struct sk_buff *nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
if (net_ratelimit())
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Can't allocate the frag_list skb.\n");
return NULL;
}
@@ -960,9 +983,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
shinfo->nr_frags++, txp++, gop++) {
- index = pending_index(vif->pending_cons++);
- pending_idx = vif->pending_ring[index];
- xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+ xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
}
@@ -973,34 +996,34 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
return gop;
}
-static inline void xenvif_grant_handle_set(struct xenvif *vif,
+static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
u16 pending_idx,
grant_handle_t handle)
{
- if (unlikely(vif->grant_tx_handle[pending_idx] !=
+ if (unlikely(queue->grant_tx_handle[pending_idx] !=
NETBACK_INVALID_HANDLE)) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Trying to overwrite active handle! pending_idx: %x\n",
pending_idx);
BUG();
}
- vif->grant_tx_handle[pending_idx] = handle;
+ queue->grant_tx_handle[pending_idx] = handle;
}
-static inline void xenvif_grant_handle_reset(struct xenvif *vif,
+static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
u16 pending_idx)
{
- if (unlikely(vif->grant_tx_handle[pending_idx] ==
+ if (unlikely(queue->grant_tx_handle[pending_idx] ==
NETBACK_INVALID_HANDLE)) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Trying to unmap invalid handle! pending_idx: %x\n",
pending_idx);
BUG();
}
- vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
+ queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
}
-static int xenvif_tx_check_gop(struct xenvif *vif,
+static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct sk_buff *skb,
struct gnttab_map_grant_ref **gopp_map,
struct gnttab_copy **gopp_copy)
@@ -1017,12 +1040,12 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
(*gopp_copy)++;
if (unlikely(err)) {
if (net_ratelimit())
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
}
check_frags:
@@ -1035,24 +1058,24 @@ check_frags:
newerr = gop_map->status;
if (likely(!newerr)) {
- xenvif_grant_handle_set(vif,
+ xenvif_grant_handle_set(queue,
pending_idx,
gop_map->handle);
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xenvif_idx_unmap(vif, pending_idx);
+ xenvif_idx_unmap(queue, pending_idx);
continue;
}
/* Error on this fragment: respond to client with an error. */
if (net_ratelimit())
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
i,
gop_map->status,
pending_idx,
gop_map->ref);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
@@ -1060,7 +1083,7 @@ check_frags:
/* First error: invalidate preceding fragments. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_unmap(vif, pending_idx);
+ xenvif_idx_unmap(queue, pending_idx);
}
/* Remember the error: invalidate all subsequent fragments. */
@@ -1084,7 +1107,7 @@ check_frags:
shinfo = skb_shinfo(first_skb);
for (j = 0; j < shinfo->nr_frags; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_unmap(vif, pending_idx);
+ xenvif_idx_unmap(queue, pending_idx);
}
}
@@ -1092,7 +1115,7 @@ check_frags:
return err;
}
-static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
+static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
@@ -1110,23 +1133,23 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
/* If this is not the first frag, chain it to the previous*/
if (prev_pending_idx == INVALID_PENDING_IDX)
skb_shinfo(skb)->destructor_arg =
- &callback_param(vif, pending_idx);
+ &callback_param(queue, pending_idx);
else
- callback_param(vif, prev_pending_idx).ctx =
- &callback_param(vif, pending_idx);
+ callback_param(queue, prev_pending_idx).ctx =
+ &callback_param(queue, pending_idx);
- callback_param(vif, pending_idx).ctx = NULL;
+ callback_param(queue, pending_idx).ctx = NULL;
prev_pending_idx = pending_idx;
- txp = &vif->pending_tx_info[pending_idx].req;
- page = virt_to_page(idx_to_kaddr(vif, pending_idx));
+ txp = &queue->pending_tx_info[pending_idx].req;
+ page = virt_to_page(idx_to_kaddr(queue, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
/* Take an extra reference to offset network stack's put_page */
- get_page(vif->mmap_pages[pending_idx]);
+ get_page(queue->mmap_pages[pending_idx]);
}
/* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
* overlaps with "index", and "mapping" is not set. I think mapping
@@ -1136,33 +1159,33 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
skb->pfmemalloc = false;
}
-static int xenvif_get_extras(struct xenvif *vif,
+static int xenvif_get_extras(struct xenvif_queue *queue,
struct xen_netif_extra_info *extras,
int work_to_do)
{
struct xen_netif_extra_info extra;
- RING_IDX cons = vif->tx.req_cons;
+ RING_IDX cons = queue->tx.req_cons;
do {
if (unlikely(work_to_do-- <= 0)) {
- netdev_err(vif->dev, "Missing extra info\n");
- xenvif_fatal_tx_err(vif);
+ netdev_err(queue->vif->dev, "Missing extra info\n");
+ xenvif_fatal_tx_err(queue->vif);
return -EBADR;
}
- memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
+ memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
sizeof(extra));
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
- vif->tx.req_cons = ++cons;
- netdev_err(vif->dev,
+ queue->tx.req_cons = ++cons;
+ netdev_err(queue->vif->dev,
"Invalid extra type: %d\n", extra.type);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
return -EINVAL;
}
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
- vif->tx.req_cons = ++cons;
+ queue->tx.req_cons = ++cons;
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do;
@@ -1197,7 +1220,7 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return 0;
}
-static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
{
bool recalculate_partial_csum = false;
@@ -1207,7 +1230,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
* recalculate the partial checksum.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
- vif->rx_gso_checksum_fixup++;
+ queue->stats.rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = true;
}
@@ -1219,31 +1242,31 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
return skb_checksum_setup(skb, recalculate_partial_csum);
}
-static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
{
u64 now = get_jiffies_64();
- u64 next_credit = vif->credit_window_start +
- msecs_to_jiffies(vif->credit_usec / 1000);
+ u64 next_credit = queue->credit_window_start +
+ msecs_to_jiffies(queue->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
- if (timer_pending(&vif->credit_timeout))
+ if (timer_pending(&queue->credit_timeout))
return true;
/* Passed the point where we can replenish credit? */
if (time_after_eq64(now, next_credit)) {
- vif->credit_window_start = now;
- tx_add_credit(vif);
+ queue->credit_window_start = now;
+ tx_add_credit(queue);
}
/* Still too big to send right now? Set a callback. */
- if (size > vif->remaining_credit) {
- vif->credit_timeout.data =
- (unsigned long)vif;
- vif->credit_timeout.function =
+ if (size > queue->remaining_credit) {
+ queue->credit_timeout.data =
+ (unsigned long)queue;
+ queue->credit_timeout.function =
tx_credit_callback;
- mod_timer(&vif->credit_timeout,
+ mod_timer(&queue->credit_timeout,
next_credit);
- vif->credit_window_start = next_credit;
+ queue->credit_window_start = next_credit;
return true;
}
@@ -1251,16 +1274,16 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return false;
}
-static void xenvif_tx_build_gops(struct xenvif *vif,
+static void xenvif_tx_build_gops(struct xenvif_queue *queue,
int budget,
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
struct sk_buff *skb;
int ret;
- while (skb_queue_len(&vif->tx_queue) < budget) {
+ while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
@@ -1270,69 +1293,69 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
unsigned int data_len;
pending_ring_idx_t index;
- if (vif->tx.sring->req_prod - vif->tx.req_cons >
+ if (queue->tx.sring->req_prod - queue->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Impossible number of requests. "
"req_prod %d, req_cons %d, size %ld\n",
- vif->tx.sring->req_prod, vif->tx.req_cons,
+ queue->tx.sring->req_prod, queue->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
break;
}
- work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
+ work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
if (!work_to_do)
break;
- idx = vif->tx.req_cons;
+ idx = queue->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
- memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
+ memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
/* Credit-based scheduling. */
- if (txreq.size > vif->remaining_credit &&
- tx_credit_exceeded(vif, txreq.size))
+ if (txreq.size > queue->remaining_credit &&
+ tx_credit_exceeded(queue, txreq.size))
break;
- vif->remaining_credit -= txreq.size;
+ queue->remaining_credit -= txreq.size;
work_to_do--;
- vif->tx.req_cons = ++idx;
+ queue->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
- work_to_do = xenvif_get_extras(vif, extras,
+ work_to_do = xenvif_get_extras(queue, extras,
work_to_do);
- idx = vif->tx.req_cons;
+ idx = queue->tx.req_cons;
if (unlikely(work_to_do < 0))
break;
}
- ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
+ ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
break;
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Bad packet size: %d\n", txreq.size);
- xenvif_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, idx);
break;
}
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
break;
}
- index = pending_index(vif->pending_cons);
- pending_idx = vif->pending_ring[index];
+ index = pending_index(queue->pending_cons);
+ pending_idx = queue->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1340,9 +1363,9 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Can't allocate a skb in start_xmit.\n");
- xenvif_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, idx);
break;
}
@@ -1350,7 +1373,7 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
- if (xenvif_set_skb_gso(vif, skb, gso)) {
+ if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
break;
@@ -1360,18 +1383,18 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
- vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
- vif->tx_copy_ops[*copy_ops].source.domid = vif->domid;
- vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
+ queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
+ queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
+ queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
- vif->tx_copy_ops[*copy_ops].dest.u.gmfn =
+ queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
virt_to_mfn(skb->data);
- vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
- vif->tx_copy_ops[*copy_ops].dest.offset =
+ queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
+ queue->tx_copy_ops[*copy_ops].dest.offset =
offset_in_page(skb->data);
- vif->tx_copy_ops[*copy_ops].len = data_len;
- vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
+ queue->tx_copy_ops[*copy_ops].len = data_len;
+ queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
(*copy_ops)++;
@@ -1380,42 +1403,42 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
- xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop);
+ xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
gop++;
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
- memcpy(&vif->pending_tx_info[pending_idx].req, &txreq,
+ memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
sizeof(txreq));
}
- vif->pending_cons++;
+ queue->pending_cons++;
- request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
+ request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
- xenvif_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, idx);
break;
}
gop = request_gop;
- __skb_queue_tail(&vif->tx_queue, skb);
+ __skb_queue_tail(&queue->tx_queue, skb);
- vif->tx.req_cons = idx;
+ queue->tx.req_cons = idx;
- if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) ||
- (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops)))
+ if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
+ (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
break;
}
- (*map_ops) = gop - vif->tx_map_ops;
+ (*map_ops) = gop - queue->tx_map_ops;
return;
}
/* Consolidate skb with a frag_list into a brand new one with local pages on
* frags. Returns 0 or -ENOMEM if can't allocate new pages.
*/
-static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
+static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
{
unsigned int offset = skb_headlen(skb);
skb_frag_t frags[MAX_SKB_FRAGS];
@@ -1423,10 +1446,10 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
struct ubuf_info *uarg;
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
- vif->tx_zerocopy_sent += 2;
- vif->tx_frag_overflow++;
+ queue->stats.tx_zerocopy_sent += 2;
+ queue->stats.tx_frag_overflow++;
- xenvif_fill_frags(vif, nskb);
+ xenvif_fill_frags(queue, nskb);
/* Subtract frags size, we will correct it later */
skb->truesize -= skb->data_len;
skb->len += nskb->len;
@@ -1478,37 +1501,37 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
return 0;
}
-static int xenvif_tx_submit(struct xenvif *vif)
+static int xenvif_tx_submit(struct xenvif_queue *queue)
{
- struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops;
- struct gnttab_copy *gop_copy = vif->tx_copy_ops;
+ struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
+ struct gnttab_copy *gop_copy = queue->tx_copy_ops;
struct sk_buff *skb;
int work_done = 0;
- while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
+ while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
unsigned data_len;
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
- txp = &vif->pending_tx_info[pending_idx].req;
+ txp = &queue->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
- if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) {
+ if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
continue;
}
data_len = skb->len;
- callback_param(vif, pending_idx).ctx = NULL;
+ callback_param(queue, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
- xenvif_idx_release(vif, pending_idx,
+ xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
@@ -1517,12 +1540,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
else if (txp->flags & XEN_NETTXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- xenvif_fill_frags(vif, skb);
+ xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
- if (xenvif_handle_frag_list(vif, skb)) {
+ if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Not enough memory to consolidate frag_list!\n");
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
kfree_skb(skb);
@@ -1535,12 +1558,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
__pskb_pull_tail(skb, target - skb_headlen(skb));
}
- skb->dev = vif->dev;
+ skb->dev = queue->vif->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
skb_reset_network_header(skb);
- if (checksum_setup(vif, skb)) {
- netdev_dbg(vif->dev,
+ if (checksum_setup(queue, skb)) {
+ netdev_dbg(queue->vif->dev,
"Can't setup checksum in net_tx_action\n");
/* We have to set this flag to trigger the callback */
if (skb_shinfo(skb)->destructor_arg)
@@ -1565,8 +1588,8 @@ static int xenvif_tx_submit(struct xenvif *vif)
DIV_ROUND_UP(skb->len - hdrlen, mss);
}
- vif->dev->stats.rx_bytes += skb->len;
- vif->dev->stats.rx_packets++;
+ queue->stats.rx_bytes += skb->len;
+ queue->stats.rx_packets++;
work_done++;
@@ -1577,7 +1600,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
*/
if (skb_shinfo(skb)->destructor_arg) {
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
- vif->tx_zerocopy_sent++;
+ queue->stats.tx_zerocopy_sent++;
}
netif_receive_skb(skb);
@@ -1590,47 +1613,47 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
- struct xenvif *vif = ubuf_to_vif(ubuf);
+ struct xenvif_queue *queue = ubuf_to_queue(ubuf);
/* This is the only place where we grab this lock, to protect callbacks
* from each other.
*/
- spin_lock_irqsave(&vif->callback_lock, flags);
+ spin_lock_irqsave(&queue->callback_lock, flags);
do {
u16 pending_idx = ubuf->desc;
ubuf = (struct ubuf_info *) ubuf->ctx;
- BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
+ BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
MAX_PENDING_REQS);
- index = pending_index(vif->dealloc_prod);
- vif->dealloc_ring[index] = pending_idx;
+ index = pending_index(queue->dealloc_prod);
+ queue->dealloc_ring[index] = pending_idx;
/* Sync with xenvif_tx_dealloc_action:
* insert idx then incr producer.
*/
smp_wmb();
- vif->dealloc_prod++;
+ queue->dealloc_prod++;
} while (ubuf);
- wake_up(&vif->dealloc_wq);
- spin_unlock_irqrestore(&vif->callback_lock, flags);
+ wake_up(&queue->dealloc_wq);
+ spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success))
- vif->tx_zerocopy_success++;
+ queue->stats.tx_zerocopy_success++;
else
- vif->tx_zerocopy_fail++;
+ queue->stats.tx_zerocopy_fail++;
}
-static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
+static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
{
struct gnttab_unmap_grant_ref *gop;
pending_ring_idx_t dc, dp;
u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
unsigned int i = 0;
- dc = vif->dealloc_cons;
- gop = vif->tx_unmap_ops;
+ dc = queue->dealloc_cons;
+ gop = queue->tx_unmap_ops;
/* Free up any grants we have finished using */
do {
- dp = vif->dealloc_prod;
+ dp = queue->dealloc_prod;
/* Ensure we see all indices enqueued by all
* xenvif_zerocopy_callback().
@@ -1638,38 +1661,38 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
smp_rmb();
while (dc != dp) {
- BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
+ BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
pending_idx =
- vif->dealloc_ring[pending_index(dc++)];
+ queue->dealloc_ring[pending_index(dc++)];
- pending_idx_release[gop-vif->tx_unmap_ops] =
+ pending_idx_release[gop-queue->tx_unmap_ops] =
pending_idx;
- vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
- vif->mmap_pages[pending_idx];
+ queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
+ queue->mmap_pages[pending_idx];
gnttab_set_unmap_op(gop,
- idx_to_kaddr(vif, pending_idx),
+ idx_to_kaddr(queue, pending_idx),
GNTMAP_host_map,
- vif->grant_tx_handle[pending_idx]);
- xenvif_grant_handle_reset(vif, pending_idx);
+ queue->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(queue, pending_idx);
++gop;
}
- } while (dp != vif->dealloc_prod);
+ } while (dp != queue->dealloc_prod);
- vif->dealloc_cons = dc;
+ queue->dealloc_cons = dc;
- if (gop - vif->tx_unmap_ops > 0) {
+ if (gop - queue->tx_unmap_ops > 0) {
int ret;
- ret = gnttab_unmap_refs(vif->tx_unmap_ops,
+ ret = gnttab_unmap_refs(queue->tx_unmap_ops,
NULL,
- vif->pages_to_unmap,
- gop - vif->tx_unmap_ops);
+ queue->pages_to_unmap,
+ gop - queue->tx_unmap_ops);
if (ret) {
- netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
- gop - vif->tx_unmap_ops, ret);
- for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
+ netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+ gop - queue->tx_unmap_ops, ret);
+ for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
if (gop[i].status != GNTST_okay)
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
" host_addr: %llx handle: %x status: %d\n",
gop[i].host_addr,
gop[i].handle,
@@ -1679,91 +1702,91 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
}
}
- for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
- xenvif_idx_release(vif, pending_idx_release[i],
+ for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
+ xenvif_idx_release(queue, pending_idx_release[i],
XEN_NETIF_RSP_OKAY);
}
/* Called after netfront has transmitted */
-int xenvif_tx_action(struct xenvif *vif, int budget)
+int xenvif_tx_action(struct xenvif_queue *queue, int budget)
{
unsigned nr_mops, nr_cops = 0;
int work_done, ret;
- if (unlikely(!tx_work_todo(vif)))
+ if (unlikely(!tx_work_todo(queue)))
return 0;
- xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops);
+ xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
if (nr_cops == 0)
return 0;
- gnttab_batch_copy(vif->tx_copy_ops, nr_cops);
+ gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
if (nr_mops != 0) {
- ret = gnttab_map_refs(vif->tx_map_ops,
+ ret = gnttab_map_refs(queue->tx_map_ops,
NULL,
- vif->pages_to_map,
+ queue->pages_to_map,
nr_mops);
BUG_ON(ret);
}
- work_done = xenvif_tx_submit(vif);
+ work_done = xenvif_tx_submit(queue);
return work_done;
}
-static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status)
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
unsigned long flags;
- pending_tx_info = &vif->pending_tx_info[pending_idx];
- spin_lock_irqsave(&vif->response_lock, flags);
- make_tx_response(vif, &pending_tx_info->req, status);
- index = pending_index(vif->pending_prod);
- vif->pending_ring[index] = pending_idx;
+ pending_tx_info = &queue->pending_tx_info[pending_idx];
+ spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, &pending_tx_info->req, status);
+ index = pending_index(queue->pending_prod);
+ queue->pending_ring[index] = pending_idx;
/* TX shouldn't use the index before we give it back here */
mb();
- vif->pending_prod++;
- spin_unlock_irqrestore(&vif->response_lock, flags);
+ queue->pending_prod++;
+ spin_unlock_irqrestore(&queue->response_lock, flags);
}
-static void make_tx_response(struct xenvif *vif,
+static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
s8 st)
{
- RING_IDX i = vif->tx.rsp_prod_pvt;
+ RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
int notify;
- resp = RING_GET_RESPONSE(&vif->tx, i);
+ resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id;
resp->status = st;
if (txp->flags & XEN_NETTXF_extra_info)
- RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
- vif->tx.rsp_prod_pvt = ++i;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
+ queue->tx.rsp_prod_pvt = ++i;
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
- notify_remote_via_irq(vif->tx_irq);
+ notify_remote_via_irq(queue->tx_irq);
}
-static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
+static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
u16 id,
s8 st,
u16 offset,
u16 size,
u16 flags)
{
- RING_IDX i = vif->rx.rsp_prod_pvt;
+ RING_IDX i = queue->rx.rsp_prod_pvt;
struct xen_netif_rx_response *resp;
- resp = RING_GET_RESPONSE(&vif->rx, i);
+ resp = RING_GET_RESPONSE(&queue->rx, i);
resp->offset = offset;
resp->flags = flags;
resp->id = id;
@@ -1771,26 +1794,26 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
if (st < 0)
resp->status = (s16)st;
- vif->rx.rsp_prod_pvt = ++i;
+ queue->rx.rsp_prod_pvt = ++i;
return resp;
}
-void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
+void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{
int ret;
struct gnttab_unmap_grant_ref tx_unmap_op;
gnttab_set_unmap_op(&tx_unmap_op,
- idx_to_kaddr(vif, pending_idx),
+ idx_to_kaddr(queue, pending_idx),
GNTMAP_host_map,
- vif->grant_tx_handle[pending_idx]);
- xenvif_grant_handle_reset(vif, pending_idx);
+ queue->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(queue, pending_idx);
ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
- &vif->mmap_pages[pending_idx], 1);
+ &queue->mmap_pages[pending_idx], 1);
if (ret) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
ret,
pending_idx,
@@ -1800,41 +1823,40 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
BUG();
}
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
}
-static inline int rx_work_todo(struct xenvif *vif)
+static inline int rx_work_todo(struct xenvif_queue *queue)
{
- return (!skb_queue_empty(&vif->rx_queue) &&
- xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
- vif->rx_queue_purge;
+ return (!skb_queue_empty(&queue->rx_queue) &&
+ xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) ||
+ queue->rx_queue_purge;
}
-static inline int tx_work_todo(struct xenvif *vif)
+static inline int tx_work_todo(struct xenvif_queue *queue)
{
-
- if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
return 1;
return 0;
}
-static inline bool tx_dealloc_work_todo(struct xenvif *vif)
+static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
{
- return vif->dealloc_cons != vif->dealloc_prod;
+ return queue->dealloc_cons != queue->dealloc_prod;
}
-void xenvif_unmap_frontend_rings(struct xenvif *vif)
+void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
{
- if (vif->tx.sring)
- xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
- vif->tx.sring);
- if (vif->rx.sring)
- xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
- vif->rx.sring);
+ if (queue->tx.sring)
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
+ queue->tx.sring);
+ if (queue->rx.sring)
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
+ queue->rx.sring);
}
-int xenvif_map_frontend_rings(struct xenvif *vif,
+int xenvif_map_frontend_rings(struct xenvif_queue *queue,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref)
{
@@ -1844,85 +1866,78 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
int err = -ENOMEM;
- err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
tx_ring_ref, &addr);
if (err)
goto err;
txs = (struct xen_netif_tx_sring *)addr;
- BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
+ BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
- err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
rx_ring_ref, &addr);
if (err)
goto err;
rxs = (struct xen_netif_rx_sring *)addr;
- BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
+ BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
return 0;
err:
- xenvif_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(queue);
return err;
}
-void xenvif_stop_queue(struct xenvif *vif)
+static void xenvif_start_queue(struct xenvif_queue *queue)
{
- if (!vif->can_queue)
- return;
-
- netif_stop_queue(vif->dev);
-}
-
-static void xenvif_start_queue(struct xenvif *vif)
-{
- if (xenvif_schedulable(vif))
- netif_wake_queue(vif->dev);
+ if (xenvif_schedulable(queue->vif))
+ xenvif_wake_queue(queue);
}
int xenvif_kthread_guest_rx(void *data)
{
- struct xenvif *vif = data;
+ struct xenvif_queue *queue = data;
struct sk_buff *skb;
while (!kthread_should_stop()) {
- wait_event_interruptible(vif->wq,
- rx_work_todo(vif) ||
- vif->disabled ||
+ wait_event_interruptible(queue->wq,
+ rx_work_todo(queue) ||
+ queue->vif->disabled ||
kthread_should_stop());
/* This frontend is found to be rogue, disable it in
* kthread context. Currently this is only set when
* netback finds out frontend sends malformed packet,
* but we cannot disable the interface in softirq
- * context so we defer it here.
+ * context so we defer it here, if this thread is
+ * associated with queue 0.
*/
- if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
- xenvif_carrier_off(vif);
+ if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0))
+ xenvif_carrier_off(queue->vif);
if (kthread_should_stop())
break;
- if (vif->rx_queue_purge) {
- skb_queue_purge(&vif->rx_queue);
- vif->rx_queue_purge = false;
+ if (queue->rx_queue_purge) {
+ skb_queue_purge(&queue->rx_queue);
+ queue->rx_queue_purge = false;
}
- if (!skb_queue_empty(&vif->rx_queue))
- xenvif_rx_action(vif);
+ if (!skb_queue_empty(&queue->rx_queue))
+ xenvif_rx_action(queue);
- if (skb_queue_empty(&vif->rx_queue) &&
- netif_queue_stopped(vif->dev)) {
- del_timer_sync(&vif->wake_queue);
- xenvif_start_queue(vif);
+ if (skb_queue_empty(&queue->rx_queue) &&
+ xenvif_queue_stopped(queue)) {
+ del_timer_sync(&queue->wake_queue);
+ xenvif_start_queue(queue);
}
cond_resched();
}
/* Bin any remaining skbs */
- while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
+ while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
dev_kfree_skb(skb);
return 0;
@@ -1930,22 +1945,22 @@ int xenvif_kthread_guest_rx(void *data)
int xenvif_dealloc_kthread(void *data)
{
- struct xenvif *vif = data;
+ struct xenvif_queue *queue = data;
while (!kthread_should_stop()) {
- wait_event_interruptible(vif->dealloc_wq,
- tx_dealloc_work_todo(vif) ||
+ wait_event_interruptible(queue->dealloc_wq,
+ tx_dealloc_work_todo(queue) ||
kthread_should_stop());
if (kthread_should_stop())
break;
- xenvif_tx_dealloc_action(vif);
+ xenvif_tx_dealloc_action(queue);
cond_resched();
}
/* Unmap anything remaining*/
- if (tx_dealloc_work_todo(vif))
- xenvif_tx_dealloc_action(vif);
+ if (tx_dealloc_work_todo(queue))
+ xenvif_tx_dealloc_action(queue);
return 0;
}
@@ -1957,6 +1972,9 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
+ /* Allow as many queues as there are CPUs, by default */
+ xenvif_max_queues = num_online_cpus();
+
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 7a206cffb062..3d85acd84bad 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -19,6 +19,8 @@
*/
#include "common.h"
+#include <linux/vmalloc.h>
+#include <linux/rtnetlink.h>
struct backend_info {
struct xenbus_device *dev;
@@ -34,8 +36,9 @@ struct backend_info {
u8 have_hotplug_status_watch:1;
};
-static int connect_rings(struct backend_info *);
-static void connect(struct backend_info *);
+static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
+static void connect(struct backend_info *be);
+static int read_xenbus_vif_flags(struct backend_info *be);
static void backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be);
static void set_backend_state(struct backend_info *be,
@@ -157,6 +160,12 @@ static int netback_probe(struct xenbus_device *dev,
if (err)
pr_debug("Error writing feature-split-event-channels\n");
+ /* Multi-queue support: This is an optional feature. */
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "multi-queue-max-queues", "%u", xenvif_max_queues);
+ if (err)
+ pr_debug("Error writing multi-queue-max-queues\n");
+
err = xenbus_switch_state(dev, XenbusStateInitWait);
if (err)
goto fail;
@@ -485,10 +494,26 @@ static void connect(struct backend_info *be)
{
int err;
struct xenbus_device *dev = be->dev;
+ unsigned long credit_bytes, credit_usec;
+ unsigned int queue_index;
+ unsigned int requested_num_queues;
+ struct xenvif_queue *queue;
- err = connect_rings(be);
- if (err)
+ /* Check whether the frontend requested multiple queues
+ * and read the number requested.
+ */
+ err = xenbus_scanf(XBT_NIL, dev->otherend,
+ "multi-queue-num-queues",
+ "%u", &requested_num_queues);
+ if (err < 0) {
+ requested_num_queues = 1; /* Fall back to single queue */
+ } else if (requested_num_queues > xenvif_max_queues) {
+ /* buggy or malicious guest */
+ xenbus_dev_fatal(dev, err,
+ "guest requested %u queues, exceeding the maximum of %u.",
+ requested_num_queues, xenvif_max_queues);
return;
+ }
err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
if (err) {
@@ -496,9 +521,56 @@ static void connect(struct backend_info *be)
return;
}
- xen_net_read_rate(dev, &be->vif->credit_bytes,
- &be->vif->credit_usec);
- be->vif->remaining_credit = be->vif->credit_bytes;
+ xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+ read_xenbus_vif_flags(be);
+
+ /* Use the number of queues requested by the frontend */
+ be->vif->queues = vzalloc(requested_num_queues *
+ sizeof(struct xenvif_queue));
+ be->vif->num_queues = requested_num_queues;
+
+ for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
+ queue = &be->vif->queues[queue_index];
+ queue->vif = be->vif;
+ queue->id = queue_index;
+ snprintf(queue->name, sizeof(queue->name), "%s-q%u",
+ be->vif->dev->name, queue->id);
+
+ err = xenvif_init_queue(queue);
+ if (err) {
+ /* xenvif_init_queue() cleans up after itself on
+ * failure, but we need to clean up any previously
+ * initialised queues. Set num_queues to i so that
+ * earlier queues can be destroyed using the regular
+ * disconnect logic.
+ */
+ be->vif->num_queues = queue_index;
+ goto err;
+ }
+
+ queue->remaining_credit = credit_bytes;
+
+ err = connect_rings(be, queue);
+ if (err) {
+ /* connect_rings() cleans up after itself on failure,
+ * but we need to clean up after xenvif_init_queue() here,
+ * and also clean up any previously initialised queues.
+ */
+ xenvif_deinit_queue(queue);
+ be->vif->num_queues = queue_index;
+ goto err;
+ }
+ }
+
+ /* Initialisation completed, tell core driver the number of
+ * active queues.
+ */
+ rtnl_lock();
+ netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
+ netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
+ rtnl_unlock();
+
+ xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
@@ -507,45 +579,107 @@ static void connect(struct backend_info *be)
if (!err)
be->have_hotplug_status_watch = 1;
- netif_wake_queue(be->vif->dev);
+ netif_tx_wake_all_queues(be->vif->dev);
+
+ return;
+
+err:
+ if (be->vif->num_queues > 0)
+ xenvif_disconnect(be->vif); /* Clean up existing queues */
+ vfree(be->vif->queues);
+ be->vif->queues = NULL;
+ be->vif->num_queues = 0;
+ return;
}
-static int connect_rings(struct backend_info *be)
+static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
{
- struct xenvif *vif = be->vif;
struct xenbus_device *dev = be->dev;
+ unsigned int num_queues = queue->vif->num_queues;
unsigned long tx_ring_ref, rx_ring_ref;
- unsigned int tx_evtchn, rx_evtchn, rx_copy;
+ unsigned int tx_evtchn, rx_evtchn;
int err;
- int val;
+ char *xspath;
+ size_t xspathsize;
+ const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
+
+ /* If the frontend requested 1 queue, or we have fallen back
+ * to single queue due to lack of frontend support for multi-
+ * queue, expect the remaining XenStore keys in the toplevel
+ * directory. Otherwise, expect them in a subdirectory called
+ * queue-N.
+ */
+ if (num_queues == 1) {
+ xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
+ if (!xspath) {
+ xenbus_dev_fatal(dev, -ENOMEM,
+ "reading ring references");
+ return -ENOMEM;
+ }
+ strcpy(xspath, dev->otherend);
+ } else {
+ xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
+ xspath = kzalloc(xspathsize, GFP_KERNEL);
+ if (!xspath) {
+ xenbus_dev_fatal(dev, -ENOMEM,
+ "reading ring references");
+ return -ENOMEM;
+ }
+ snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
+ queue->id);
+ }
- err = xenbus_gather(XBT_NIL, dev->otherend,
+ err = xenbus_gather(XBT_NIL, xspath,
"tx-ring-ref", "%lu", &tx_ring_ref,
"rx-ring-ref", "%lu", &rx_ring_ref, NULL);
if (err) {
xenbus_dev_fatal(dev, err,
"reading %s/ring-ref",
- dev->otherend);
- return err;
+ xspath);
+ goto err;
}
/* Try split event channels first, then single event channel. */
- err = xenbus_gather(XBT_NIL, dev->otherend,
+ err = xenbus_gather(XBT_NIL, xspath,
"event-channel-tx", "%u", &tx_evtchn,
"event-channel-rx", "%u", &rx_evtchn, NULL);
if (err < 0) {
- err = xenbus_scanf(XBT_NIL, dev->otherend,
+ err = xenbus_scanf(XBT_NIL, xspath,
"event-channel", "%u", &tx_evtchn);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"reading %s/event-channel(-tx/rx)",
- dev->otherend);
- return err;
+ xspath);
+ goto err;
}
rx_evtchn = tx_evtchn;
}
+ /* Map the shared frame, irq etc. */
+ err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
+ tx_evtchn, rx_evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "mapping shared-frames %lu/%lu port tx %u rx %u",
+ tx_ring_ref, rx_ring_ref,
+ tx_evtchn, rx_evtchn);
+ goto err;
+ }
+
+ err = 0;
+err: /* Regular return falls through with err == 0 */
+ kfree(xspath);
+ return err;
+}
+
+static int read_xenbus_vif_flags(struct backend_info *be)
+{
+ struct xenvif *vif = be->vif;
+ struct xenbus_device *dev = be->dev;
+ unsigned int rx_copy;
+ int err, val;
+
err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
&rx_copy);
if (err == -ENOENT) {
@@ -621,16 +755,6 @@ static int connect_rings(struct backend_info *be)
val = 0;
vif->ipv6_csum = !!val;
- /* Map the shared frame, irq etc. */
- err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
- tx_evtchn, rx_evtchn);
- if (err) {
- xenbus_dev_fatal(dev, err,
- "mapping shared-frames %lu/%lu port tx %u rx %u",
- tx_ring_ref, rx_ring_ref,
- tx_evtchn, rx_evtchn);
- return err;
- }
return 0;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 158b5e639fc7..2ccb4a02368b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,6 +57,12 @@
#include <xen/interface/memory.h>
#include <xen/interface/grant_table.h>
+/* Module parameters */
+static unsigned int xennet_max_queues;
+module_param_named(max_queues, xennet_max_queues, uint, 0644);
+MODULE_PARM_DESC(max_queues,
+ "Maximum number of queues per virtual interface");
+
static const struct ethtool_ops xennet_ethtool_ops;
struct netfront_cb {
@@ -73,6 +79,12 @@ struct netfront_cb {
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
+/* Queue name is interface name with "-qNNN" appended */
+#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
+
+/* IRQ name is queue name with "-tx" or "-rx" appended */
+#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+
struct netfront_stats {
u64 rx_packets;
u64 tx_packets;
@@ -81,9 +93,12 @@ struct netfront_stats {
struct u64_stats_sync syncp;
};
-struct netfront_info {
- struct list_head list;
- struct net_device *netdev;
+struct netfront_info;
+
+struct netfront_queue {
+ unsigned int id; /* Queue ID, 0-based */
+ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
+ struct netfront_info *info;
struct napi_struct napi;
@@ -93,10 +108,8 @@ struct netfront_info {
unsigned int tx_evtchn, rx_evtchn;
unsigned int tx_irq, rx_irq;
/* Only used when split event channels support is enabled */
- char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
- char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
-
- struct xenbus_device *xbdev;
+ char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
+ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
spinlock_t tx_lock;
struct xen_netif_tx_front_ring tx;
@@ -140,11 +153,21 @@ struct netfront_info {
unsigned long rx_pfn_array[NET_RX_RING_SIZE];
struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+};
+
+struct netfront_info {
+ struct list_head list;
+ struct net_device *netdev;
+
+ struct xenbus_device *xbdev;
+
+ /* Multi-queue support */
+ struct netfront_queue *queues;
/* Statistics */
struct netfront_stats __percpu *stats;
- unsigned long rx_gso_checksum_fixup;
+ atomic_t rx_gso_checksum_fixup;
};
struct netfront_rx_info {
@@ -187,21 +210,21 @@ static int xennet_rxidx(RING_IDX idx)
return idx & (NET_RX_RING_SIZE - 1);
}
-static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
+static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
RING_IDX ri)
{
int i = xennet_rxidx(ri);
- struct sk_buff *skb = np->rx_skbs[i];
- np->rx_skbs[i] = NULL;
+ struct sk_buff *skb = queue->rx_skbs[i];
+ queue->rx_skbs[i] = NULL;
return skb;
}
-static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
+static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
RING_IDX ri)
{
int i = xennet_rxidx(ri);
- grant_ref_t ref = np->grant_rx_ref[i];
- np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ grant_ref_t ref = queue->grant_rx_ref[i];
+ queue->grant_rx_ref[i] = GRANT_INVALID_REF;
return ref;
}
@@ -221,41 +244,40 @@ static bool xennet_can_sg(struct net_device *dev)
static void rx_refill_timeout(unsigned long data)
{
- struct net_device *dev = (struct net_device *)data;
- struct netfront_info *np = netdev_priv(dev);
- napi_schedule(&np->napi);
+ struct netfront_queue *queue = (struct netfront_queue *)data;
+ napi_schedule(&queue->napi);
}
-static int netfront_tx_slot_available(struct netfront_info *np)
+static int netfront_tx_slot_available(struct netfront_queue *queue)
{
- return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
+ return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
}
-static void xennet_maybe_wake_tx(struct net_device *dev)
+static void xennet_maybe_wake_tx(struct netfront_queue *queue)
{
- struct netfront_info *np = netdev_priv(dev);
+ struct net_device *dev = queue->info->netdev;
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
- if (unlikely(netif_queue_stopped(dev)) &&
- netfront_tx_slot_available(np) &&
+ if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
+ netfront_tx_slot_available(queue) &&
likely(netif_running(dev)))
- netif_wake_queue(dev);
+ netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
}
-static void xennet_alloc_rx_buffers(struct net_device *dev)
+static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
{
unsigned short id;
- struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
struct page *page;
int i, batch_target, notify;
- RING_IDX req_prod = np->rx.req_prod_pvt;
+ RING_IDX req_prod = queue->rx.req_prod_pvt;
grant_ref_t ref;
unsigned long pfn;
void *vaddr;
struct xen_netif_rx_request *req;
- if (unlikely(!netif_carrier_ok(dev)))
+ if (unlikely(!netif_carrier_ok(queue->info->netdev)))
return;
/*
@@ -264,9 +286,10 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
* allocator, so should reduce the chance of failed allocation requests
* both for ourself and for other kernel subsystems.
*/
- batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
- for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
- skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
+ batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
+ for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) {
+ skb = __netdev_alloc_skb(queue->info->netdev,
+ RX_COPY_THRESHOLD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
@@ -279,7 +302,7 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
kfree_skb(skb);
no_skb:
/* Could not allocate any skbuffs. Try again later. */
- mod_timer(&np->rx_refill_timer,
+ mod_timer(&queue->rx_refill_timer,
jiffies + (HZ/10));
/* Any skbuffs queued for refill? Force them out. */
@@ -289,44 +312,44 @@ no_skb:
}
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
- __skb_queue_tail(&np->rx_batch, skb);
+ __skb_queue_tail(&queue->rx_batch, skb);
}
/* Is the batch large enough to be worthwhile? */
- if (i < (np->rx_target/2)) {
- if (req_prod > np->rx.sring->req_prod)
+ if (i < (queue->rx_target/2)) {
+ if (req_prod > queue->rx.sring->req_prod)
goto push;
return;
}
/* Adjust our fill target if we risked running out of buffers. */
- if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
- ((np->rx_target *= 2) > np->rx_max_target))
- np->rx_target = np->rx_max_target;
+ if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) &&
+ ((queue->rx_target *= 2) > queue->rx_max_target))
+ queue->rx_target = queue->rx_max_target;
refill:
for (i = 0; ; i++) {
- skb = __skb_dequeue(&np->rx_batch);
+ skb = __skb_dequeue(&queue->rx_batch);
if (skb == NULL)
break;
- skb->dev = dev;
+ skb->dev = queue->info->netdev;
id = xennet_rxidx(req_prod + i);
- BUG_ON(np->rx_skbs[id]);
- np->rx_skbs[id] = skb;
+ BUG_ON(queue->rx_skbs[id]);
+ queue->rx_skbs[id] = skb;
- ref = gnttab_claim_grant_reference(&np->gref_rx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
BUG_ON((signed short)ref < 0);
- np->grant_rx_ref[id] = ref;
+ queue->grant_rx_ref[id] = ref;
pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
- req = RING_GET_REQUEST(&np->rx, req_prod + i);
+ req = RING_GET_REQUEST(&queue->rx, req_prod + i);
gnttab_grant_foreign_access_ref(ref,
- np->xbdev->otherend_id,
+ queue->info->xbdev->otherend_id,
pfn_to_mfn(pfn),
0);
@@ -337,72 +360,77 @@ no_skb:
wmb(); /* barrier so backend seens requests */
/* Above is a suitable barrier to ensure backend will see requests. */
- np->rx.req_prod_pvt = req_prod + i;
+ queue->rx.req_prod_pvt = req_prod + i;
push:
- RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
- notify_remote_via_irq(np->rx_irq);
+ notify_remote_via_irq(queue->rx_irq);
}
static int xennet_open(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
-
- napi_enable(&np->napi);
-
- spin_lock_bh(&np->rx_lock);
- if (netif_carrier_ok(dev)) {
- xennet_alloc_rx_buffers(dev);
- np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
- if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
- napi_schedule(&np->napi);
+ unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned int i = 0;
+ struct netfront_queue *queue = NULL;
+
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ napi_enable(&queue->napi);
+
+ spin_lock_bh(&queue->rx_lock);
+ if (netif_carrier_ok(dev)) {
+ xennet_alloc_rx_buffers(queue);
+ queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
+ if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
+ napi_schedule(&queue->napi);
+ }
+ spin_unlock_bh(&queue->rx_lock);
}
- spin_unlock_bh(&np->rx_lock);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
-static void xennet_tx_buf_gc(struct net_device *dev)
+static void xennet_tx_buf_gc(struct netfront_queue *queue)
{
RING_IDX cons, prod;
unsigned short id;
- struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
- BUG_ON(!netif_carrier_ok(dev));
+ BUG_ON(!netif_carrier_ok(queue->info->netdev));
do {
- prod = np->tx.sring->rsp_prod;
+ prod = queue->tx.sring->rsp_prod;
rmb(); /* Ensure we see responses up to 'rp'. */
- for (cons = np->tx.rsp_cons; cons != prod; cons++) {
+ for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response *txrsp;
- txrsp = RING_GET_RESPONSE(&np->tx, cons);
+ txrsp = RING_GET_RESPONSE(&queue->tx, cons);
if (txrsp->status == XEN_NETIF_RSP_NULL)
continue;
id = txrsp->id;
- skb = np->tx_skbs[id].skb;
+ skb = queue->tx_skbs[id].skb;
if (unlikely(gnttab_query_foreign_access(
- np->grant_tx_ref[id]) != 0)) {
+ queue->grant_tx_ref[id]) != 0)) {
pr_alert("%s: warning -- grant still in use by backend domain\n",
__func__);
BUG();
}
gnttab_end_foreign_access_ref(
- np->grant_tx_ref[id], GNTMAP_readonly);
+ queue->grant_tx_ref[id], GNTMAP_readonly);
gnttab_release_grant_reference(
- &np->gref_tx_head, np->grant_tx_ref[id]);
- np->grant_tx_ref[id] = GRANT_INVALID_REF;
- np->grant_tx_page[id] = NULL;
- add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
+ &queue->gref_tx_head, queue->grant_tx_ref[id]);
+ queue->grant_tx_ref[id] = GRANT_INVALID_REF;
+ queue->grant_tx_page[id] = NULL;
+ add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
dev_kfree_skb_irq(skb);
}
- np->tx.rsp_cons = prod;
+ queue->tx.rsp_cons = prod;
/*
* Set a new event, then check for race with update of tx_cons.
@@ -412,21 +440,20 @@ static void xennet_tx_buf_gc(struct net_device *dev)
* data is outstanding: in such cases notification from Xen is
* likely to be the only kick that we'll get.
*/
- np->tx.sring->rsp_event =
- prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
+ queue->tx.sring->rsp_event =
+ prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
mb(); /* update shared area */
- } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
+ } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
- xennet_maybe_wake_tx(dev);
+ xennet_maybe_wake_tx(queue);
}
-static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
struct xen_netif_tx_request *tx)
{
- struct netfront_info *np = netdev_priv(dev);
char *data = skb->data;
unsigned long mfn;
- RING_IDX prod = np->tx.req_prod_pvt;
+ RING_IDX prod = queue->tx.req_prod_pvt;
int frags = skb_shinfo(skb)->nr_frags;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
@@ -443,19 +470,19 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
data += tx->size;
offset = 0;
- id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
- np->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&np->tx, prod++);
+ id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+ queue->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&queue->tx, prod++);
tx->id = id;
- ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(data);
- gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+ gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
mfn, GNTMAP_readonly);
- np->grant_tx_page[id] = virt_to_page(data);
- tx->gref = np->grant_tx_ref[id] = ref;
+ queue->grant_tx_page[id] = virt_to_page(data);
+ tx->gref = queue->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
tx->flags = 0;
@@ -487,21 +514,21 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
tx->flags |= XEN_NETTXF_more_data;
- id = get_id_from_freelist(&np->tx_skb_freelist,
- np->tx_skbs);
- np->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&np->tx, prod++);
+ id = get_id_from_freelist(&queue->tx_skb_freelist,
+ queue->tx_skbs);
+ queue->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&queue->tx, prod++);
tx->id = id;
- ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref,
- np->xbdev->otherend_id,
+ queue->info->xbdev->otherend_id,
mfn, GNTMAP_readonly);
- np->grant_tx_page[id] = page;
- tx->gref = np->grant_tx_ref[id] = ref;
+ queue->grant_tx_page[id] = page;
+ tx->gref = queue->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = bytes;
tx->flags = 0;
@@ -518,7 +545,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
}
}
- np->tx.req_prod_pvt = prod;
+ queue->tx.req_prod_pvt = prod;
}
/*
@@ -544,6 +571,24 @@ static int xennet_count_skb_frag_slots(struct sk_buff *skb)
return pages;
}
+static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ unsigned int num_queues = dev->real_num_tx_queues;
+ u32 hash;
+ u16 queue_idx;
+
+ /* First, check if there is only one queue */
+ if (num_queues == 1) {
+ queue_idx = 0;
+ } else {
+ hash = skb_get_hash(skb);
+ queue_idx = hash % num_queues;
+ }
+
+ return queue_idx;
+}
+
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
@@ -559,6 +604,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
unsigned long flags;
+ struct netfront_queue *queue = NULL;
+ unsigned int num_queues = dev->real_num_tx_queues;
+ u16 queue_index;
+
+ /* Drop the packet if no queues are set up */
+ if (num_queues < 1)
+ goto drop;
+ /* Determine which queue to transmit this SKB on */
+ queue_index = skb_get_queue_mapping(skb);
+ queue = &np->queues[queue_index];
/* If skb->len is too big for wire format, drop skb and alert
* user about misconfiguration.
@@ -578,30 +633,30 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- spin_lock_irqsave(&np->tx_lock, flags);
+ spin_lock_irqsave(&queue->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
(slots > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) {
- spin_unlock_irqrestore(&np->tx_lock, flags);
+ spin_unlock_irqrestore(&queue->tx_lock, flags);
goto drop;
}
- i = np->tx.req_prod_pvt;
+ i = queue->tx.req_prod_pvt;
- id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
- np->tx_skbs[id].skb = skb;
+ id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+ queue->tx_skbs[id].skb = skb;
- tx = RING_GET_REQUEST(&np->tx, i);
+ tx = RING_GET_REQUEST(&queue->tx, i);
tx->id = id;
- ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(
- ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
- np->grant_tx_page[id] = virt_to_page(data);
- tx->gref = np->grant_tx_ref[id] = ref;
+ ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
+ queue->grant_tx_page[id] = virt_to_page(data);
+ tx->gref = queue->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
@@ -617,7 +672,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct xen_netif_extra_info *gso;
gso = (struct xen_netif_extra_info *)
- RING_GET_REQUEST(&np->tx, ++i);
+ RING_GET_REQUEST(&queue->tx, ++i);
tx->flags |= XEN_NETTXF_extra_info;
@@ -632,14 +687,14 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
gso->flags = 0;
}
- np->tx.req_prod_pvt = i + 1;
+ queue->tx.req_prod_pvt = i + 1;
- xennet_make_frags(skb, dev, tx);
+ xennet_make_frags(skb, queue, tx);
tx->size = skb->len;
- RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
- notify_remote_via_irq(np->tx_irq);
+ notify_remote_via_irq(queue->tx_irq);
u64_stats_update_begin(&stats->syncp);
stats->tx_bytes += skb->len;
@@ -647,12 +702,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_end(&stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
- xennet_tx_buf_gc(dev);
+ xennet_tx_buf_gc(queue);
- if (!netfront_tx_slot_available(np))
- netif_stop_queue(dev);
+ if (!netfront_tx_slot_available(queue))
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
- spin_unlock_irqrestore(&np->tx_lock, flags);
+ spin_unlock_irqrestore(&queue->tx_lock, flags);
return NETDEV_TX_OK;
@@ -665,32 +720,38 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int xennet_close(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
- netif_stop_queue(np->netdev);
- napi_disable(&np->napi);
+ unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned int i;
+ struct netfront_queue *queue;
+ netif_tx_stop_all_queues(np->netdev);
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ napi_disable(&queue->napi);
+ }
return 0;
}
-static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
+static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
grant_ref_t ref)
{
- int new = xennet_rxidx(np->rx.req_prod_pvt);
-
- BUG_ON(np->rx_skbs[new]);
- np->rx_skbs[new] = skb;
- np->grant_rx_ref[new] = ref;
- RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
- RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
- np->rx.req_prod_pvt++;
+ int new = xennet_rxidx(queue->rx.req_prod_pvt);
+
+ BUG_ON(queue->rx_skbs[new]);
+ queue->rx_skbs[new] = skb;
+ queue->grant_rx_ref[new] = ref;
+ RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
+ RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
+ queue->rx.req_prod_pvt++;
}
-static int xennet_get_extras(struct netfront_info *np,
+static int xennet_get_extras(struct netfront_queue *queue,
struct xen_netif_extra_info *extras,
RING_IDX rp)
{
struct xen_netif_extra_info *extra;
- struct device *dev = &np->netdev->dev;
- RING_IDX cons = np->rx.rsp_cons;
+ struct device *dev = &queue->info->netdev->dev;
+ RING_IDX cons = queue->rx.rsp_cons;
int err = 0;
do {
@@ -705,7 +766,7 @@ static int xennet_get_extras(struct netfront_info *np,
}
extra = (struct xen_netif_extra_info *)
- RING_GET_RESPONSE(&np->rx, ++cons);
+ RING_GET_RESPONSE(&queue->rx, ++cons);
if (unlikely(!extra->type ||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
@@ -718,33 +779,33 @@ static int xennet_get_extras(struct netfront_info *np,
sizeof(*extra));
}
- skb = xennet_get_rx_skb(np, cons);
- ref = xennet_get_rx_ref(np, cons);
- xennet_move_rx_slot(np, skb, ref);
+ skb = xennet_get_rx_skb(queue, cons);
+ ref = xennet_get_rx_ref(queue, cons);
+ xennet_move_rx_slot(queue, skb, ref);
} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
- np->rx.rsp_cons = cons;
+ queue->rx.rsp_cons = cons;
return err;
}
-static int xennet_get_responses(struct netfront_info *np,
+static int xennet_get_responses(struct netfront_queue *queue,
struct netfront_rx_info *rinfo, RING_IDX rp,
struct sk_buff_head *list)
{
struct xen_netif_rx_response *rx = &rinfo->rx;
struct xen_netif_extra_info *extras = rinfo->extras;
- struct device *dev = &np->netdev->dev;
- RING_IDX cons = np->rx.rsp_cons;
- struct sk_buff *skb = xennet_get_rx_skb(np, cons);
- grant_ref_t ref = xennet_get_rx_ref(np, cons);
+ struct device *dev = &queue->info->netdev->dev;
+ RING_IDX cons = queue->rx.rsp_cons;
+ struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
+ grant_ref_t ref = xennet_get_rx_ref(queue, cons);
int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
int slots = 1;
int err = 0;
unsigned long ret;
if (rx->flags & XEN_NETRXF_extra_info) {
- err = xennet_get_extras(np, extras, rp);
- cons = np->rx.rsp_cons;
+ err = xennet_get_extras(queue, extras, rp);
+ cons = queue->rx.rsp_cons;
}
for (;;) {
@@ -753,7 +814,7 @@ static int xennet_get_responses(struct netfront_info *np,
if (net_ratelimit())
dev_warn(dev, "rx->offset: %x, size: %u\n",
rx->offset, rx->status);
- xennet_move_rx_slot(np, skb, ref);
+ xennet_move_rx_slot(queue, skb, ref);
err = -EINVAL;
goto next;
}
@@ -774,7 +835,7 @@ static int xennet_get_responses(struct netfront_info *np,
ret = gnttab_end_foreign_access_ref(ref, 0);
BUG_ON(!ret);
- gnttab_release_grant_reference(&np->gref_rx_head, ref);
+ gnttab_release_grant_reference(&queue->gref_rx_head, ref);
__skb_queue_tail(list, skb);
@@ -789,9 +850,9 @@ next:
break;
}
- rx = RING_GET_RESPONSE(&np->rx, cons + slots);
- skb = xennet_get_rx_skb(np, cons + slots);
- ref = xennet_get_rx_ref(np, cons + slots);
+ rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
+ skb = xennet_get_rx_skb(queue, cons + slots);
+ ref = xennet_get_rx_ref(queue, cons + slots);
slots++;
}
@@ -802,7 +863,7 @@ next:
}
if (unlikely(err))
- np->rx.rsp_cons = cons + slots;
+ queue->rx.rsp_cons = cons + slots;
return err;
}
@@ -836,17 +897,17 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
return 0;
}
-static RING_IDX xennet_fill_frags(struct netfront_info *np,
+static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
- RING_IDX cons = np->rx.rsp_cons;
+ RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) {
struct xen_netif_rx_response *rx =
- RING_GET_RESPONSE(&np->rx, ++cons);
+ RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
@@ -879,7 +940,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
struct netfront_info *np = netdev_priv(dev);
- np->rx_gso_checksum_fixup++;
+ atomic_inc(&np->rx_gso_checksum_fixup);
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = true;
}
@@ -891,11 +952,10 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
return skb_checksum_setup(skb, recalculate_partial_csum);
}
-static int handle_incoming_queue(struct net_device *dev,
+static int handle_incoming_queue(struct netfront_queue *queue,
struct sk_buff_head *rxq)
{
- struct netfront_info *np = netdev_priv(dev);
- struct netfront_stats *stats = this_cpu_ptr(np->stats);
+ struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
int packets_dropped = 0;
struct sk_buff *skb;
@@ -906,13 +966,13 @@ static int handle_incoming_queue(struct net_device *dev,
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
/* Ethernet work: Delayed to here as it peeks the header. */
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, queue->info->netdev);
skb_reset_network_header(skb);
- if (checksum_setup(dev, skb)) {
+ if (checksum_setup(queue->info->netdev, skb)) {
kfree_skb(skb);
packets_dropped++;
- dev->stats.rx_errors++;
+ queue->info->netdev->stats.rx_errors++;
continue;
}
@@ -922,7 +982,7 @@ static int handle_incoming_queue(struct net_device *dev,
u64_stats_update_end(&stats->syncp);
/* Pass it up. */
- napi_gro_receive(&np->napi, skb);
+ napi_gro_receive(&queue->napi, skb);
}
return packets_dropped;
@@ -930,8 +990,8 @@ static int handle_incoming_queue(struct net_device *dev,
static int xennet_poll(struct napi_struct *napi, int budget)
{
- struct netfront_info *np = container_of(napi, struct netfront_info, napi);
- struct net_device *dev = np->netdev;
+ struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
+ struct net_device *dev = queue->info->netdev;
struct sk_buff *skb;
struct netfront_rx_info rinfo;
struct xen_netif_rx_response *rx = &rinfo.rx;
@@ -944,29 +1004,29 @@ static int xennet_poll(struct napi_struct *napi, int budget)
unsigned long flags;
int err;
- spin_lock(&np->rx_lock);
+ spin_lock(&queue->rx_lock);
skb_queue_head_init(&rxq);
skb_queue_head_init(&errq);
skb_queue_head_init(&tmpq);
- rp = np->rx.sring->rsp_prod;
+ rp = queue->rx.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
- i = np->rx.rsp_cons;
+ i = queue->rx.rsp_cons;
work_done = 0;
while ((i != rp) && (work_done < budget)) {
- memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
+ memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
memset(extras, 0, sizeof(rinfo.extras));
- err = xennet_get_responses(np, &rinfo, rp, &tmpq);
+ err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
if (unlikely(err)) {
err:
while ((skb = __skb_dequeue(&tmpq)))
__skb_queue_tail(&errq, skb);
dev->stats.rx_errors++;
- i = np->rx.rsp_cons;
+ i = queue->rx.rsp_cons;
continue;
}
@@ -978,7 +1038,7 @@ err:
if (unlikely(xennet_set_skb_gso(skb, gso))) {
__skb_queue_head(&tmpq, skb);
- np->rx.rsp_cons += skb_queue_len(&tmpq);
+ queue->rx.rsp_cons += skb_queue_len(&tmpq);
goto err;
}
}
@@ -992,7 +1052,7 @@ err:
skb->data_len = rx->status;
skb->len += rx->status;
- i = xennet_fill_frags(np, skb, &tmpq);
+ i = xennet_fill_frags(queue, skb, &tmpq);
if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1001,22 +1061,22 @@ err:
__skb_queue_tail(&rxq, skb);
- np->rx.rsp_cons = ++i;
+ queue->rx.rsp_cons = ++i;
work_done++;
}
__skb_queue_purge(&errq);
- work_done -= handle_incoming_queue(dev, &rxq);
+ work_done -= handle_incoming_queue(queue, &rxq);
/* If we get a callback with very few responses, reduce fill target. */
/* NB. Note exponential increase, linear decrease. */
- if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
- ((3*np->rx_target) / 4)) &&
- (--np->rx_target < np->rx_min_target))
- np->rx_target = np->rx_min_target;
+ if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
+ ((3*queue->rx_target) / 4)) &&
+ (--queue->rx_target < queue->rx_min_target))
+ queue->rx_target = queue->rx_min_target;
- xennet_alloc_rx_buffers(dev);
+ xennet_alloc_rx_buffers(queue);
if (work_done < budget) {
int more_to_do = 0;
@@ -1025,14 +1085,14 @@ err:
local_irq_save(flags);
- RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
+ RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
if (!more_to_do)
__napi_complete(napi);
local_irq_restore(flags);
}
- spin_unlock(&np->rx_lock);
+ spin_unlock(&queue->rx_lock);
return work_done;
}
@@ -1080,43 +1140,43 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
return tot;
}
-static void xennet_release_tx_bufs(struct netfront_info *np)
+static void xennet_release_tx_bufs(struct netfront_queue *queue)
{
struct sk_buff *skb;
int i;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
/* Skip over entries which are actually freelist references */
- if (skb_entry_is_link(&np->tx_skbs[i]))
+ if (skb_entry_is_link(&queue->tx_skbs[i]))
continue;
- skb = np->tx_skbs[i].skb;
- get_page(np->grant_tx_page[i]);
- gnttab_end_foreign_access(np->grant_tx_ref[i],
+ skb = queue->tx_skbs[i].skb;
+ get_page(queue->grant_tx_page[i]);
+ gnttab_end_foreign_access(queue->grant_tx_ref[i],
GNTMAP_readonly,
- (unsigned long)page_address(np->grant_tx_page[i]));
- np->grant_tx_page[i] = NULL;
- np->grant_tx_ref[i] = GRANT_INVALID_REF;
- add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
+ (unsigned long)page_address(queue->grant_tx_page[i]));
+ queue->grant_tx_page[i] = NULL;
+ queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
dev_kfree_skb_irq(skb);
}
}
-static void xennet_release_rx_bufs(struct netfront_info *np)
+static void xennet_release_rx_bufs(struct netfront_queue *queue)
{
int id, ref;
- spin_lock_bh(&np->rx_lock);
+ spin_lock_bh(&queue->rx_lock);
for (id = 0; id < NET_RX_RING_SIZE; id++) {
struct sk_buff *skb;
struct page *page;
- skb = np->rx_skbs[id];
+ skb = queue->rx_skbs[id];
if (!skb)
continue;
- ref = np->grant_rx_ref[id];
+ ref = queue->grant_rx_ref[id];
if (ref == GRANT_INVALID_REF)
continue;
@@ -1128,21 +1188,28 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
get_page(page);
gnttab_end_foreign_access(ref, 0,
(unsigned long)page_address(page));
- np->grant_rx_ref[id] = GRANT_INVALID_REF;
+ queue->grant_rx_ref[id] = GRANT_INVALID_REF;
kfree_skb(skb);
}
- spin_unlock_bh(&np->rx_lock);
+ spin_unlock_bh(&queue->rx_lock);
}
static void xennet_uninit(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
- xennet_release_tx_bufs(np);
- xennet_release_rx_bufs(np);
- gnttab_free_grant_references(np->gref_tx_head);
- gnttab_free_grant_references(np->gref_rx_head);
+ unsigned int num_queues = dev->real_num_tx_queues;
+ struct netfront_queue *queue;
+ unsigned int i;
+
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ xennet_release_tx_bufs(queue);
+ xennet_release_rx_bufs(queue);
+ gnttab_free_grant_references(queue->gref_tx_head);
+ gnttab_free_grant_references(queue->gref_rx_head);
+ }
}
static netdev_features_t xennet_fix_features(struct net_device *dev,
@@ -1203,25 +1270,24 @@ static int xennet_set_features(struct net_device *dev,
static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
{
- struct netfront_info *np = dev_id;
- struct net_device *dev = np->netdev;
+ struct netfront_queue *queue = dev_id;
unsigned long flags;
- spin_lock_irqsave(&np->tx_lock, flags);
- xennet_tx_buf_gc(dev);
- spin_unlock_irqrestore(&np->tx_lock, flags);
+ spin_lock_irqsave(&queue->tx_lock, flags);
+ xennet_tx_buf_gc(queue);
+ spin_unlock_irqrestore(&queue->tx_lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
{
- struct netfront_info *np = dev_id;
- struct net_device *dev = np->netdev;
+ struct netfront_queue *queue = dev_id;
+ struct net_device *dev = queue->info->netdev;
if (likely(netif_carrier_ok(dev) &&
- RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
- napi_schedule(&np->napi);
+ RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
+ napi_schedule(&queue->napi);
return IRQ_HANDLED;
}
@@ -1236,7 +1302,12 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void xennet_poll_controller(struct net_device *dev)
{
- xennet_interrupt(0, dev);
+ /* Poll each queue */
+ struct netfront_info *info = netdev_priv(dev);
+ unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned int i;
+ for (i = 0; i < num_queues; ++i)
+ xennet_interrupt(0, &info->queues[i]);
}
#endif
@@ -1251,6 +1322,7 @@ static const struct net_device_ops xennet_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_fix_features = xennet_fix_features,
.ndo_set_features = xennet_set_features,
+ .ndo_select_queue = xennet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xennet_poll_controller,
#endif
@@ -1258,66 +1330,30 @@ static const struct net_device_ops xennet_netdev_ops = {
static struct net_device *xennet_create_dev(struct xenbus_device *dev)
{
- int i, err;
+ int err;
struct net_device *netdev;
struct netfront_info *np;
- netdev = alloc_etherdev(sizeof(struct netfront_info));
+ netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
if (!netdev)
return ERR_PTR(-ENOMEM);
np = netdev_priv(netdev);
np->xbdev = dev;
- spin_lock_init(&np->tx_lock);
- spin_lock_init(&np->rx_lock);
-
- skb_queue_head_init(&np->rx_batch);
- np->rx_target = RX_DFL_MIN_TARGET;
- np->rx_min_target = RX_DFL_MIN_TARGET;
- np->rx_max_target = RX_MAX_TARGET;
-
- init_timer(&np->rx_refill_timer);
- np->rx_refill_timer.data = (unsigned long)netdev;
- np->rx_refill_timer.function = rx_refill_timeout;
+ /* No need to use rtnl_lock() before the call below as it
+ * happens before register_netdev().
+ */
+ netif_set_real_num_tx_queues(netdev, 0);
+ np->queues = NULL;
err = -ENOMEM;
np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->stats == NULL)
goto exit;
- /* Initialise tx_skbs as a free chain containing every entry. */
- np->tx_skb_freelist = 0;
- for (i = 0; i < NET_TX_RING_SIZE; i++) {
- skb_entry_set_link(&np->tx_skbs[i], i+1);
- np->grant_tx_ref[i] = GRANT_INVALID_REF;
- np->grant_tx_page[i] = NULL;
- }
-
- /* Clear out rx_skbs */
- for (i = 0; i < NET_RX_RING_SIZE; i++) {
- np->rx_skbs[i] = NULL;
- np->grant_rx_ref[i] = GRANT_INVALID_REF;
- }
-
- /* A grant for every tx ring slot */
- if (gnttab_alloc_grant_references(TX_MAX_TARGET,
- &np->gref_tx_head) < 0) {
- pr_alert("can't alloc tx grant refs\n");
- err = -ENOMEM;
- goto exit_free_stats;
- }
- /* A grant for every rx ring slot */
- if (gnttab_alloc_grant_references(RX_MAX_TARGET,
- &np->gref_rx_head) < 0) {
- pr_alert("can't alloc rx grant refs\n");
- err = -ENOMEM;
- goto exit_free_tx;
- }
-
netdev->netdev_ops = &xennet_netdev_ops;
- netif_napi_add(netdev, &np->napi, xennet_poll, 64);
netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_GSO_ROBUST;
netdev->hw_features = NETIF_F_SG |
@@ -1332,7 +1368,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
*/
netdev->features |= netdev->hw_features;
- SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+ netdev->ethtool_ops = &xennet_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
@@ -1343,10 +1379,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return netdev;
- exit_free_tx:
- gnttab_free_grant_references(np->gref_tx_head);
- exit_free_stats:
- free_percpu(np->stats);
exit:
free_netdev(netdev);
return ERR_PTR(err);
@@ -1404,30 +1436,37 @@ static void xennet_end_access(int ref, void *page)
static void xennet_disconnect_backend(struct netfront_info *info)
{
- /* Stop old i/f to prevent errors whilst we rebuild the state. */
- spin_lock_bh(&info->rx_lock);
- spin_lock_irq(&info->tx_lock);
- netif_carrier_off(info->netdev);
- spin_unlock_irq(&info->tx_lock);
- spin_unlock_bh(&info->rx_lock);
-
- if (info->tx_irq && (info->tx_irq == info->rx_irq))
- unbind_from_irqhandler(info->tx_irq, info);
- if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
- unbind_from_irqhandler(info->tx_irq, info);
- unbind_from_irqhandler(info->rx_irq, info);
- }
- info->tx_evtchn = info->rx_evtchn = 0;
- info->tx_irq = info->rx_irq = 0;
+ unsigned int i = 0;
+ unsigned int num_queues = info->netdev->real_num_tx_queues;
+
+ for (i = 0; i < num_queues; ++i) {
+ struct netfront_queue *queue = &info->queues[i];
+
+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
+ spin_lock_bh(&queue->rx_lock);
+ spin_lock_irq(&queue->tx_lock);
+ netif_carrier_off(queue->info->netdev);
+ spin_unlock_irq(&queue->tx_lock);
+ spin_unlock_bh(&queue->rx_lock);
+
+ if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ }
+ queue->tx_evtchn = queue->rx_evtchn = 0;
+ queue->tx_irq = queue->rx_irq = 0;
- /* End access and free the pages */
- xennet_end_access(info->tx_ring_ref, info->tx.sring);
- xennet_end_access(info->rx_ring_ref, info->rx.sring);
+ /* End access and free the pages */
+ xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
+ xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
- info->tx_ring_ref = GRANT_INVALID_REF;
- info->rx_ring_ref = GRANT_INVALID_REF;
- info->tx.sring = NULL;
- info->rx.sring = NULL;
+ queue->tx_ring_ref = GRANT_INVALID_REF;
+ queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->tx.sring = NULL;
+ queue->rx.sring = NULL;
+ }
}
/**
@@ -1468,100 +1507,86 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
return 0;
}
-static int setup_netfront_single(struct netfront_info *info)
+static int setup_netfront_single(struct netfront_queue *queue)
{
int err;
- err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
+ err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
if (err < 0)
goto fail;
- err = bind_evtchn_to_irqhandler(info->tx_evtchn,
+ err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
xennet_interrupt,
- 0, info->netdev->name, info);
+ 0, queue->info->netdev->name, queue);
if (err < 0)
goto bind_fail;
- info->rx_evtchn = info->tx_evtchn;
- info->rx_irq = info->tx_irq = err;
+ queue->rx_evtchn = queue->tx_evtchn;
+ queue->rx_irq = queue->tx_irq = err;
return 0;
bind_fail:
- xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
- info->tx_evtchn = 0;
+ xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
+ queue->tx_evtchn = 0;
fail:
return err;
}
-static int setup_netfront_split(struct netfront_info *info)
+static int setup_netfront_split(struct netfront_queue *queue)
{
int err;
- err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
+ err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
if (err < 0)
goto fail;
- err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
+ err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
if (err < 0)
goto alloc_rx_evtchn_fail;
- snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
- "%s-tx", info->netdev->name);
- err = bind_evtchn_to_irqhandler(info->tx_evtchn,
+ snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
+ "%s-tx", queue->name);
+ err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
xennet_tx_interrupt,
- 0, info->tx_irq_name, info);
+ 0, queue->tx_irq_name, queue);
if (err < 0)
goto bind_tx_fail;
- info->tx_irq = err;
+ queue->tx_irq = err;
- snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
- "%s-rx", info->netdev->name);
- err = bind_evtchn_to_irqhandler(info->rx_evtchn,
+ snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
+ "%s-rx", queue->name);
+ err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
xennet_rx_interrupt,
- 0, info->rx_irq_name, info);
+ 0, queue->rx_irq_name, queue);
if (err < 0)
goto bind_rx_fail;
- info->rx_irq = err;
+ queue->rx_irq = err;
return 0;
bind_rx_fail:
- unbind_from_irqhandler(info->tx_irq, info);
- info->tx_irq = 0;
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ queue->tx_irq = 0;
bind_tx_fail:
- xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
- info->rx_evtchn = 0;
+ xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
+ queue->rx_evtchn = 0;
alloc_rx_evtchn_fail:
- xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
- info->tx_evtchn = 0;
+ xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
+ queue->tx_evtchn = 0;
fail:
return err;
}
-static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
+static int setup_netfront(struct xenbus_device *dev,
+ struct netfront_queue *queue, unsigned int feature_split_evtchn)
{
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
int err;
- struct net_device *netdev = info->netdev;
- unsigned int feature_split_evtchn;
-
- info->tx_ring_ref = GRANT_INVALID_REF;
- info->rx_ring_ref = GRANT_INVALID_REF;
- info->rx.sring = NULL;
- info->tx.sring = NULL;
- netdev->irq = 0;
-
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-split-event-channels", "%u",
- &feature_split_evtchn);
- if (err < 0)
- feature_split_evtchn = 0;
- err = xen_net_read_mac(dev, netdev->dev_addr);
- if (err) {
- xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
- goto fail;
- }
+ queue->tx_ring_ref = GRANT_INVALID_REF;
+ queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->rx.sring = NULL;
+ queue->tx.sring = NULL;
txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!txs) {
@@ -1570,13 +1595,13 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
goto fail;
}
SHARED_RING_INIT(txs);
- FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
+ FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
err = xenbus_grant_ring(dev, virt_to_mfn(txs));
if (err < 0)
goto grant_tx_ring_fail;
+ queue->tx_ring_ref = err;
- info->tx_ring_ref = err;
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!rxs) {
err = -ENOMEM;
@@ -1584,21 +1609,21 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
goto alloc_rx_ring_fail;
}
SHARED_RING_INIT(rxs);
- FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
+ FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
if (err < 0)
goto grant_rx_ring_fail;
- info->rx_ring_ref = err;
+ queue->rx_ring_ref = err;
if (feature_split_evtchn)
- err = setup_netfront_split(info);
+ err = setup_netfront_split(queue);
/* setup single event channel if
* a) feature-split-event-channels == 0
* b) feature-split-event-channels == 1 but failed to setup
*/
if (!feature_split_evtchn || (feature_split_evtchn && err))
- err = setup_netfront_single(info);
+ err = setup_netfront_single(queue);
if (err)
goto alloc_evtchn_fail;
@@ -1609,17 +1634,225 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
* granted pages because backend is not accessing it at this point.
*/
alloc_evtchn_fail:
- gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
+ gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
grant_rx_ring_fail:
free_page((unsigned long)rxs);
alloc_rx_ring_fail:
- gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
+ gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
grant_tx_ring_fail:
free_page((unsigned long)txs);
fail:
return err;
}
+/* Queue-specific initialisation
+ * This used to be done in xennet_create_dev() but must now
+ * be run per-queue.
+ */
+static int xennet_init_queue(struct netfront_queue *queue)
+{
+ unsigned short i;
+ int err = 0;
+
+ spin_lock_init(&queue->tx_lock);
+ spin_lock_init(&queue->rx_lock);
+
+ skb_queue_head_init(&queue->rx_batch);
+ queue->rx_target = RX_DFL_MIN_TARGET;
+ queue->rx_min_target = RX_DFL_MIN_TARGET;
+ queue->rx_max_target = RX_MAX_TARGET;
+
+ init_timer(&queue->rx_refill_timer);
+ queue->rx_refill_timer.data = (unsigned long)queue;
+ queue->rx_refill_timer.function = rx_refill_timeout;
+
+ snprintf(queue->name, sizeof(queue->name), "%s-q%u",
+ queue->info->netdev->name, queue->id);
+
+ /* Initialise tx_skbs as a free chain containing every entry. */
+ queue->tx_skb_freelist = 0;
+ for (i = 0; i < NET_TX_RING_SIZE; i++) {
+ skb_entry_set_link(&queue->tx_skbs[i], i+1);
+ queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_tx_page[i] = NULL;
+ }
+
+ /* Clear out rx_skbs */
+ for (i = 0; i < NET_RX_RING_SIZE; i++) {
+ queue->rx_skbs[i] = NULL;
+ queue->grant_rx_ref[i] = GRANT_INVALID_REF;
+ }
+
+ /* A grant for every tx ring slot */
+ if (gnttab_alloc_grant_references(TX_MAX_TARGET,
+ &queue->gref_tx_head) < 0) {
+ pr_alert("can't alloc tx grant refs\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* A grant for every rx ring slot */
+ if (gnttab_alloc_grant_references(RX_MAX_TARGET,
+ &queue->gref_rx_head) < 0) {
+ pr_alert("can't alloc rx grant refs\n");
+ err = -ENOMEM;
+ goto exit_free_tx;
+ }
+
+ return 0;
+
+ exit_free_tx:
+ gnttab_free_grant_references(queue->gref_tx_head);
+ exit:
+ return err;
+}
+
+static int write_queue_xenstore_keys(struct netfront_queue *queue,
+ struct xenbus_transaction *xbt, int write_hierarchical)
+{
+ /* Write the queue-specific keys into XenStore in the traditional
+ * way for a single queue, or in a queue subkeys for multiple
+ * queues.
+ */
+ struct xenbus_device *dev = queue->info->xbdev;
+ int err;
+ const char *message;
+ char *path;
+ size_t pathsize;
+
+ /* Choose the correct place to write the keys */
+ if (write_hierarchical) {
+ pathsize = strlen(dev->nodename) + 10;
+ path = kzalloc(pathsize, GFP_KERNEL);
+ if (!path) {
+ err = -ENOMEM;
+ message = "out of memory while writing ring references";
+ goto error;
+ }
+ snprintf(path, pathsize, "%s/queue-%u",
+ dev->nodename, queue->id);
+ } else {
+ path = (char *)dev->nodename;
+ }
+
+ /* Write ring references */
+ err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
+ queue->tx_ring_ref);
+ if (err) {
+ message = "writing tx-ring-ref";
+ goto error;
+ }
+
+ err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
+ queue->rx_ring_ref);
+ if (err) {
+ message = "writing rx-ring-ref";
+ goto error;
+ }
+
+ /* Write event channels; taking into account both shared
+ * and split event channel scenarios.
+ */
+ if (queue->tx_evtchn == queue->rx_evtchn) {
+ /* Shared event channel */
+ err = xenbus_printf(*xbt, path,
+ "event-channel", "%u", queue->tx_evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto error;
+ }
+ } else {
+ /* Split event channels */
+ err = xenbus_printf(*xbt, path,
+ "event-channel-tx", "%u", queue->tx_evtchn);
+ if (err) {
+ message = "writing event-channel-tx";
+ goto error;
+ }
+
+ err = xenbus_printf(*xbt, path,
+ "event-channel-rx", "%u", queue->rx_evtchn);
+ if (err) {
+ message = "writing event-channel-rx";
+ goto error;
+ }
+ }
+
+ if (write_hierarchical)
+ kfree(path);
+ return 0;
+
+error:
+ if (write_hierarchical)
+ kfree(path);
+ xenbus_dev_fatal(dev, err, "%s", message);
+ return err;
+}
+
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+ unsigned int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+ struct netfront_queue *queue = &info->queues[i];
+
+ if (netif_running(info->netdev))
+ napi_disable(&queue->napi);
+ netif_napi_del(&queue->napi);
+ }
+
+ rtnl_unlock();
+
+ kfree(info->queues);
+ info->queues = NULL;
+}
+
+static int xennet_create_queues(struct netfront_info *info,
+ unsigned int num_queues)
+{
+ unsigned int i;
+ int ret;
+
+ info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+ GFP_KERNEL);
+ if (!info->queues)
+ return -ENOMEM;
+
+ rtnl_lock();
+
+ for (i = 0; i < num_queues; i++) {
+ struct netfront_queue *queue = &info->queues[i];
+
+ queue->id = i;
+ queue->info = info;
+
+ ret = xennet_init_queue(queue);
+ if (ret < 0) {
+ dev_warn(&info->netdev->dev, "only created %d queues\n",
+ num_queues);
+ num_queues = i;
+ break;
+ }
+
+ netif_napi_add(queue->info->netdev, &queue->napi,
+ xennet_poll, 64);
+ if (netif_running(info->netdev))
+ napi_enable(&queue->napi);
+ }
+
+ netif_set_real_num_tx_queues(info->netdev, num_queues);
+
+ rtnl_unlock();
+
+ if (num_queues == 0) {
+ dev_err(&info->netdev->dev, "no queues\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_netback(struct xenbus_device *dev,
struct netfront_info *info)
@@ -1627,11 +1860,61 @@ static int talk_to_netback(struct xenbus_device *dev,
const char *message;
struct xenbus_transaction xbt;
int err;
+ unsigned int feature_split_evtchn;
+ unsigned int i = 0;
+ unsigned int max_queues = 0;
+ struct netfront_queue *queue = NULL;
+ unsigned int num_queues = 1;
- /* Create shared ring, alloc event channel. */
- err = setup_netfront(dev, info);
- if (err)
+ info->netdev->irq = 0;
+
+ /* Check if backend supports multiple queues */
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "multi-queue-max-queues", "%u", &max_queues);
+ if (err < 0)
+ max_queues = 1;
+ num_queues = min(max_queues, xennet_max_queues);
+
+ /* Check feature-split-event-channels */
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-split-event-channels", "%u",
+ &feature_split_evtchn);
+ if (err < 0)
+ feature_split_evtchn = 0;
+
+ /* Read mac addr. */
+ err = xen_net_read_mac(dev, info->netdev->dev_addr);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
goto out;
+ }
+
+ if (info->queues)
+ xennet_destroy_queues(info);
+
+ err = xennet_create_queues(info, num_queues);
+ if (err < 0)
+ goto destroy_ring;
+
+ /* Create shared ring, alloc event channel -- for each queue */
+ for (i = 0; i < num_queues; ++i) {
+ queue = &info->queues[i];
+ err = setup_netfront(dev, queue, feature_split_evtchn);
+ if (err) {
+ /* setup_netfront() will tidy up the current
+ * queue on error, but we need to clean up
+ * those already allocated.
+ */
+ if (i > 0) {
+ rtnl_lock();
+ netif_set_real_num_tx_queues(info->netdev, i);
+ rtnl_unlock();
+ goto destroy_ring;
+ } else {
+ goto out;
+ }
+ }
+ }
again:
err = xenbus_transaction_start(&xbt);
@@ -1640,41 +1923,29 @@ again:
goto destroy_ring;
}
- err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
- info->tx_ring_ref);
- if (err) {
- message = "writing tx ring-ref";
- goto abort_transaction;
- }
- err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
- info->rx_ring_ref);
- if (err) {
- message = "writing rx ring-ref";
- goto abort_transaction;
- }
-
- if (info->tx_evtchn == info->rx_evtchn) {
- err = xenbus_printf(xbt, dev->nodename,
- "event-channel", "%u", info->tx_evtchn);
- if (err) {
- message = "writing event-channel";
- goto abort_transaction;
- }
+ if (num_queues == 1) {
+ err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
+ if (err)
+ goto abort_transaction_no_dev_fatal;
} else {
- err = xenbus_printf(xbt, dev->nodename,
- "event-channel-tx", "%u", info->tx_evtchn);
+ /* Write the number of queues */
+ err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
+ "%u", num_queues);
if (err) {
- message = "writing event-channel-tx";
- goto abort_transaction;
+ message = "writing multi-queue-num-queues";
+ goto abort_transaction_no_dev_fatal;
}
- err = xenbus_printf(xbt, dev->nodename,
- "event-channel-rx", "%u", info->rx_evtchn);
- if (err) {
- message = "writing event-channel-rx";
- goto abort_transaction;
+
+ /* Write the keys for each queue */
+ for (i = 0; i < num_queues; ++i) {
+ queue = &info->queues[i];
+ err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
+ if (err)
+ goto abort_transaction_no_dev_fatal;
}
}
+ /* The remaining keys are not queue-specific */
err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1);
if (err) {
@@ -1724,10 +1995,16 @@ again:
return 0;
abort_transaction:
- xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, err, "%s", message);
+abort_transaction_no_dev_fatal:
+ xenbus_transaction_end(xbt, 1);
destroy_ring:
xennet_disconnect_backend(info);
+ kfree(info->queues);
+ info->queues = NULL;
+ rtnl_lock();
+ netif_set_real_num_tx_queues(info->netdev, 0);
+ rtnl_lock();
out:
return err;
}
@@ -1735,11 +2012,14 @@ again:
static int xennet_connect(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
+ unsigned int num_queues = 0;
int i, requeue_idx, err;
struct sk_buff *skb;
grant_ref_t ref;
struct xen_netif_rx_request *req;
unsigned int feature_rx_copy;
+ unsigned int j = 0;
+ struct netfront_queue *queue = NULL;
err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
"feature-rx-copy", "%u", &feature_rx_copy);
@@ -1756,40 +2036,47 @@ static int xennet_connect(struct net_device *dev)
if (err)
return err;
+ /* talk_to_netback() sets the correct number of queues */
+ num_queues = dev->real_num_tx_queues;
+
rtnl_lock();
netdev_update_features(dev);
rtnl_unlock();
- spin_lock_bh(&np->rx_lock);
- spin_lock_irq(&np->tx_lock);
+ /* By now, the queue structures have been set up */
+ for (j = 0; j < num_queues; ++j) {
+ queue = &np->queues[j];
+ spin_lock_bh(&queue->rx_lock);
+ spin_lock_irq(&queue->tx_lock);
- /* Step 1: Discard all pending TX packet fragments. */
- xennet_release_tx_bufs(np);
+ /* Step 1: Discard all pending TX packet fragments. */
+ xennet_release_tx_bufs(queue);
- /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
- for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
- skb_frag_t *frag;
- const struct page *page;
- if (!np->rx_skbs[i])
- continue;
+ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
+ skb_frag_t *frag;
+ const struct page *page;
+ if (!queue->rx_skbs[i])
+ continue;
- skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
- ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
- req = RING_GET_REQUEST(&np->rx, requeue_idx);
+ skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
+ ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
+ req = RING_GET_REQUEST(&queue->rx, requeue_idx);
- frag = &skb_shinfo(skb)->frags[0];
- page = skb_frag_page(frag);
- gnttab_grant_foreign_access_ref(
- ref, np->xbdev->otherend_id,
- pfn_to_mfn(page_to_pfn(page)),
- 0);
- req->gref = ref;
- req->id = requeue_idx;
+ frag = &skb_shinfo(skb)->frags[0];
+ page = skb_frag_page(frag);
+ gnttab_grant_foreign_access_ref(
+ ref, queue->info->xbdev->otherend_id,
+ pfn_to_mfn(page_to_pfn(page)),
+ 0);
+ req->gref = ref;
+ req->id = requeue_idx;
- requeue_idx++;
- }
+ requeue_idx++;
+ }
- np->rx.req_prod_pvt = requeue_idx;
+ queue->rx.req_prod_pvt = requeue_idx;
+ }
/*
* Step 3: All public and private state should now be sane. Get
@@ -1798,14 +2085,17 @@ static int xennet_connect(struct net_device *dev)
* packets.
*/
netif_carrier_on(np->netdev);
- notify_remote_via_irq(np->tx_irq);
- if (np->tx_irq != np->rx_irq)
- notify_remote_via_irq(np->rx_irq);
- xennet_tx_buf_gc(dev);
- xennet_alloc_rx_buffers(dev);
-
- spin_unlock_irq(&np->tx_lock);
- spin_unlock_bh(&np->rx_lock);
+ for (j = 0; j < num_queues; ++j) {
+ queue = &np->queues[j];
+ notify_remote_via_irq(queue->tx_irq);
+ if (queue->tx_irq != queue->rx_irq)
+ notify_remote_via_irq(queue->rx_irq);
+ xennet_tx_buf_gc(queue);
+ xennet_alloc_rx_buffers(queue);
+
+ spin_unlock_irq(&queue->tx_lock);
+ spin_unlock_bh(&queue->rx_lock);
+ }
return 0;
}
@@ -1878,7 +2168,7 @@ static void xennet_get_ethtool_stats(struct net_device *dev,
int i;
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
- data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
+ data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
}
static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -1909,8 +2199,12 @@ static ssize_t show_rxbuf_min(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *info = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
- return sprintf(buf, "%u\n", info->rx_min_target);
+ if (num_queues)
+ return sprintf(buf, "%u\n", info->queues[0].rx_min_target);
+ else
+ return sprintf(buf, "%u\n", RX_MIN_TARGET);
}
static ssize_t store_rxbuf_min(struct device *dev,
@@ -1919,8 +2213,11 @@ static ssize_t store_rxbuf_min(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *np = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
char *endp;
unsigned long target;
+ unsigned int i;
+ struct netfront_queue *queue;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -1934,16 +2231,19 @@ static ssize_t store_rxbuf_min(struct device *dev,
if (target > RX_MAX_TARGET)
target = RX_MAX_TARGET;
- spin_lock_bh(&np->rx_lock);
- if (target > np->rx_max_target)
- np->rx_max_target = target;
- np->rx_min_target = target;
- if (target > np->rx_target)
- np->rx_target = target;
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ spin_lock_bh(&queue->rx_lock);
+ if (target > queue->rx_max_target)
+ queue->rx_max_target = target;
+ queue->rx_min_target = target;
+ if (target > queue->rx_target)
+ queue->rx_target = target;
- xennet_alloc_rx_buffers(netdev);
+ xennet_alloc_rx_buffers(queue);
- spin_unlock_bh(&np->rx_lock);
+ spin_unlock_bh(&queue->rx_lock);
+ }
return len;
}
@@ -1952,8 +2252,12 @@ static ssize_t show_rxbuf_max(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *info = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
- return sprintf(buf, "%u\n", info->rx_max_target);
+ if (num_queues)
+ return sprintf(buf, "%u\n", info->queues[0].rx_max_target);
+ else
+ return sprintf(buf, "%u\n", RX_MAX_TARGET);
}
static ssize_t store_rxbuf_max(struct device *dev,
@@ -1962,8 +2266,11 @@ static ssize_t store_rxbuf_max(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *np = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
char *endp;
unsigned long target;
+ unsigned int i = 0;
+ struct netfront_queue *queue = NULL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -1977,16 +2284,19 @@ static ssize_t store_rxbuf_max(struct device *dev,
if (target > RX_MAX_TARGET)
target = RX_MAX_TARGET;
- spin_lock_bh(&np->rx_lock);
- if (target < np->rx_min_target)
- np->rx_min_target = target;
- np->rx_max_target = target;
- if (target < np->rx_target)
- np->rx_target = target;
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ spin_lock_bh(&queue->rx_lock);
+ if (target < queue->rx_min_target)
+ queue->rx_min_target = target;
+ queue->rx_max_target = target;
+ if (target < queue->rx_target)
+ queue->rx_target = target;
- xennet_alloc_rx_buffers(netdev);
+ xennet_alloc_rx_buffers(queue);
- spin_unlock_bh(&np->rx_lock);
+ spin_unlock_bh(&queue->rx_lock);
+ }
return len;
}
@@ -1995,8 +2305,12 @@ static ssize_t show_rxbuf_cur(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *info = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
- return sprintf(buf, "%u\n", info->rx_target);
+ if (num_queues)
+ return sprintf(buf, "%u\n", info->queues[0].rx_target);
+ else
+ return sprintf(buf, "0\n");
}
static struct device_attribute xennet_attrs[] = {
@@ -2043,6 +2357,9 @@ static const struct xenbus_device_id netfront_ids[] = {
static int xennet_remove(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
+ unsigned int num_queues = info->netdev->real_num_tx_queues;
+ struct netfront_queue *queue = NULL;
+ unsigned int i = 0;
dev_dbg(&dev->dev, "%s\n", dev->nodename);
@@ -2052,7 +2369,15 @@ static int xennet_remove(struct xenbus_device *dev)
unregister_netdev(info->netdev);
- del_timer_sync(&info->rx_refill_timer);
+ for (i = 0; i < num_queues; ++i) {
+ queue = &info->queues[i];
+ del_timer_sync(&queue->rx_refill_timer);
+ }
+
+ if (num_queues) {
+ kfree(info->queues);
+ info->queues = NULL;
+ }
free_percpu(info->stats);
@@ -2078,6 +2403,9 @@ static int __init netif_init(void)
pr_info("Initialising Xen virtual ethernet driver\n");
+ /* Allow as many queues as there are CPUs, by default */
+ xennet_max_queues = num_online_cpus();
+
return xenbus_register_frontend(&netfront_driver);
}
module_init(netif_init);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 65d4ca19d132..26c66a126551 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -71,5 +71,6 @@ config NFC_PORT100
source "drivers/nfc/pn544/Kconfig"
source "drivers/nfc/microread/Kconfig"
source "drivers/nfc/nfcmrvl/Kconfig"
+source "drivers/nfc/st21nfca/Kconfig"
endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index ae42a3fa60c9..23225b0287fd 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_NFC_SIM) += nfcsim.o
obj-$(CONFIG_NFC_PORT100) += port100.o
obj-$(CONFIG_NFC_MRVL) += nfcmrvl/
obj-$(CONFIG_NFC_TRF7970A) += trf7970a.o
+obj-$(CONFIG_NFC_ST21NFCA) += st21nfca/
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index f2acd85be86e..440291ab7263 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -22,6 +22,8 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -857,6 +859,92 @@ exit_state_wait_secure_write_answer:
}
}
+#ifdef CONFIG_OF
+
+static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
+{
+ struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
+ struct device_node *pp;
+ int ret;
+
+ pp = client->dev.of_node;
+ if (!pp) {
+ ret = -ENODEV;
+ goto err_dt;
+ }
+
+ /* Obtention of EN GPIO from device tree */
+ ret = of_get_named_gpio(pp, "enable-gpios", 0);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ nfc_err(&client->dev,
+ "Failed to get EN gpio, error: %d\n", ret);
+ goto err_dt;
+ }
+ phy->gpio_en = ret;
+
+ /* Configuration of EN GPIO */
+ ret = gpio_request(phy->gpio_en, "pn544_en");
+ if (ret) {
+ nfc_err(&client->dev, "Fail EN pin\n");
+ goto err_dt;
+ }
+ ret = gpio_direction_output(phy->gpio_en, 0);
+ if (ret) {
+ nfc_err(&client->dev, "Fail EN pin direction\n");
+ goto err_gpio_en;
+ }
+
+ /* Obtention of FW GPIO from device tree */
+ ret = of_get_named_gpio(pp, "firmware-gpios", 0);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ nfc_err(&client->dev,
+ "Failed to get FW gpio, error: %d\n", ret);
+ goto err_gpio_en;
+ }
+ phy->gpio_fw = ret;
+
+ /* Configuration of FW GPIO */
+ ret = gpio_request(phy->gpio_fw, "pn544_fw");
+ if (ret) {
+ nfc_err(&client->dev, "Fail FW pin\n");
+ goto err_gpio_en;
+ }
+ ret = gpio_direction_output(phy->gpio_fw, 0);
+ if (ret) {
+ nfc_err(&client->dev, "Fail FW pin direction\n");
+ goto err_gpio_fw;
+ }
+
+ /* IRQ */
+ ret = irq_of_parse_and_map(pp, 0);
+ if (ret < 0) {
+ nfc_err(&client->dev,
+ "Unable to get irq, error: %d\n", ret);
+ goto err_gpio_fw;
+ }
+ client->irq = ret;
+
+ return 0;
+
+err_gpio_fw:
+ gpio_free(phy->gpio_fw);
+err_gpio_en:
+ gpio_free(phy->gpio_en);
+err_dt:
+ return ret;
+}
+
+#else
+
+static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
+{
+ return -ENODEV;
+}
+
+#endif
+
static int pn544_hci_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -887,25 +975,36 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, phy);
pdata = client->dev.platform_data;
- if (pdata == NULL) {
- nfc_err(&client->dev, "No platform data\n");
- return -EINVAL;
- }
- if (pdata->request_resources == NULL) {
- nfc_err(&client->dev, "request_resources() missing\n");
- return -EINVAL;
- }
+ /* No platform data, using device tree. */
+ if (!pdata && client->dev.of_node) {
+ r = pn544_hci_i2c_of_request_resources(client);
+ if (r) {
+ nfc_err(&client->dev, "No DT data\n");
+ return r;
+ }
+ /* Using platform data. */
+ } else if (pdata) {
- r = pdata->request_resources(client);
- if (r) {
- nfc_err(&client->dev, "Cannot get platform resources\n");
- return r;
- }
+ if (pdata->request_resources == NULL) {
+ nfc_err(&client->dev, "request_resources() missing\n");
+ return -EINVAL;
+ }
- phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
- phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
- phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+ r = pdata->request_resources(client);
+ if (r) {
+ nfc_err(&client->dev,
+ "Cannot get platform resources\n");
+ return r;
+ }
+
+ phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
+ phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
+ phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+ } else {
+ nfc_err(&client->dev, "No platform data\n");
+ return -EINVAL;
+ }
pn544_hci_i2c_platform_init(phy);
@@ -930,8 +1029,12 @@ err_hci:
free_irq(client->irq, phy);
err_rti:
- if (pdata->free_resources != NULL)
+ if (!pdata) {
+ gpio_free(phy->gpio_en);
+ gpio_free(phy->gpio_fw);
+ } else if (pdata->free_resources) {
pdata->free_resources();
+ }
return r;
}
@@ -953,15 +1056,30 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
pn544_hci_i2c_disable(phy);
free_irq(client->irq, phy);
- if (pdata->free_resources)
+
+ /* No platform data, GPIOs have been requested by this driver */
+ if (!pdata) {
+ gpio_free(phy->gpio_en);
+ gpio_free(phy->gpio_fw);
+ /* Using platform data */
+ } else if (pdata->free_resources) {
pdata->free_resources();
+ }
return 0;
}
+static const struct of_device_id of_pn544_i2c_match[] = {
+ { .compatible = "nxp,pn544-i2c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_pn544_i2c_match);
+
static struct i2c_driver pn544_hci_i2c_driver = {
.driver = {
.name = PN544_HCI_I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_pn544_i2c_match),
},
.probe = pn544_hci_i2c_probe,
.id_table = pn544_hci_i2c_id_table,
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index b7a372af5eb7..4ac4d31f6c59 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -28,7 +28,8 @@
NFC_PROTO_MIFARE_MASK | \
NFC_PROTO_FELICA_MASK | \
NFC_PROTO_NFC_DEP_MASK | \
- NFC_PROTO_ISO14443_MASK)
+ NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_ISO14443_B_MASK)
#define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
NFC_DIGITAL_DRV_CAPS_TG_CRC)
@@ -120,6 +121,7 @@ struct port100_in_rf_setting {
#define PORT100_COMM_TYPE_IN_212F 0x01
#define PORT100_COMM_TYPE_IN_424F 0x02
#define PORT100_COMM_TYPE_IN_106A 0x03
+#define PORT100_COMM_TYPE_IN_106B 0x07
static const struct port100_in_rf_setting in_rf_settings[] = {
[NFC_DIGITAL_RF_TECH_212F] = {
@@ -140,6 +142,12 @@ static const struct port100_in_rf_setting in_rf_settings[] = {
.in_recv_set_number = 15,
.in_recv_comm_type = PORT100_COMM_TYPE_IN_106A,
},
+ [NFC_DIGITAL_RF_TECH_106B] = {
+ .in_send_set_number = 3,
+ .in_send_comm_type = PORT100_COMM_TYPE_IN_106B,
+ .in_recv_set_number = 15,
+ .in_recv_comm_type = PORT100_COMM_TYPE_IN_106B,
+ },
/* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */
[NFC_DIGITAL_RF_TECH_LAST] = { 0 },
};
@@ -340,6 +348,32 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
[NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
{ PORT100_IN_PROT_END, 0 },
},
+ [NFC_DIGITAL_FRAMING_NFCB] = {
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 20 },
+ { PORT100_IN_PROT_ADD_CRC, 1 },
+ { PORT100_IN_PROT_CHECK_CRC, 1 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 0 },
+ { PORT100_IN_PROT_CHECK_PARITY, 0 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 1 },
+ { PORT100_IN_PROT_CHECK_SOF, 1 },
+ { PORT100_IN_PROT_ADD_EOF, 1 },
+ { PORT100_IN_PROT_CHECK_EOF, 1 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
+ },
+ [NFC_DIGITAL_FRAMING_NFCB_T4T] = {
+ /* nfc_digital_framing_nfcb */
+ { PORT100_IN_PROT_END, 0 },
+ },
/* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */
[NFC_DIGITAL_FRAMING_LAST] = {
{ PORT100_IN_PROT_END, 0 },
diff --git a/drivers/nfc/st21nfca/Kconfig b/drivers/nfc/st21nfca/Kconfig
new file mode 100644
index 000000000000..ee459f066ade
--- /dev/null
+++ b/drivers/nfc/st21nfca/Kconfig
@@ -0,0 +1,23 @@
+config NFC_ST21NFCA
+ tristate "STMicroelectronics ST21NFCA NFC driver"
+ depends on NFC_HCI
+ select CRC_CCITT
+ default n
+ ---help---
+ STMicroelectronics ST21NFCA core driver. It implements the chipset
+ HCI logic and hooks into the NFC kernel APIs. Physical layers will
+ register against it.
+
+ To compile this driver as a module, choose m here. The module will
+ be called st21nfca.
+ Say N if unsure.
+
+config NFC_ST21NFCA_I2C
+ tristate "NFC ST21NFCA i2c support"
+ depends on NFC_ST21NFCA && I2C && NFC_SHDLC
+ ---help---
+ This module adds support for the STMicroelectronics st21nfca i2c interface.
+ Select this if your platform is using the i2c bus.
+
+ If you choose to build a module, it'll be called st21nfca_i2c.
+ Say N if unsure.
diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile
new file mode 100644
index 000000000000..038ed093a119
--- /dev/null
+++ b/drivers/nfc/st21nfca/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for ST21NFCA HCI based NFC driver
+#
+
+st21nfca_i2c-objs = i2c.o
+
+obj-$(CONFIG_NFC_ST21NFCA) += st21nfca.o
+obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
new file mode 100644
index 000000000000..3f954ed86d98
--- /dev/null
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -0,0 +1,724 @@
+/*
+ * I2C Link Layer for ST21NFCA HCI based Driver
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc-ccitt.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nfc.h>
+#include <linux/firmware.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/platform_data/st21nfca.h>
+
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+#include <net/nfc/nfc.h>
+
+#include "st21nfca.h"
+
+/*
+ * Every frame starts with ST21NFCA_SOF_EOF and ends with ST21NFCA_SOF_EOF.
+ * Because ST21NFCA_SOF_EOF is a possible data value, there is a mecanism
+ * called byte stuffing has been introduced.
+ *
+ * if byte == ST21NFCA_SOF_EOF or ST21NFCA_ESCAPE_BYTE_STUFFING
+ * - insert ST21NFCA_ESCAPE_BYTE_STUFFING (escape byte)
+ * - xor byte with ST21NFCA_BYTE_STUFFING_MASK
+ */
+#define ST21NFCA_SOF_EOF 0x7e
+#define ST21NFCA_BYTE_STUFFING_MASK 0x20
+#define ST21NFCA_ESCAPE_BYTE_STUFFING 0x7d
+
+/* SOF + 00 */
+#define ST21NFCA_FRAME_HEADROOM 2
+
+/* 2 bytes crc + EOF */
+#define ST21NFCA_FRAME_TAILROOM 3
+#define IS_START_OF_FRAME(buf) (buf[0] == ST21NFCA_SOF_EOF && \
+ buf[1] == 0)
+
+#define ST21NFCA_HCI_I2C_DRIVER_NAME "st21nfca_hci_i2c"
+
+static struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
+ {ST21NFCA_HCI_DRIVER_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table);
+
+struct st21nfca_i2c_phy {
+ struct i2c_client *i2c_dev;
+ struct nfc_hci_dev *hdev;
+
+ unsigned int gpio_ena;
+ unsigned int gpio_irq;
+ unsigned int irq_polarity;
+
+ struct sk_buff *pending_skb;
+ int current_read_len;
+ /*
+ * crc might have fail because i2c macro
+ * is disable due to other interface activity
+ */
+ int crc_trials;
+
+ int powered;
+ int run_mode;
+
+ /*
+ * < 0 if hardware error occured (e.g. i2c err)
+ * and prevents normal operation.
+ */
+ int hard_fault;
+ struct mutex phy_lock;
+};
+static u8 len_seq[] = { 13, 24, 15, 29 };
+static u16 wait_tab[] = { 2, 3, 5, 15, 20, 40};
+
+#define I2C_DUMP_SKB(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \
+ 16, 1, (skb)->data, (skb)->len, 0); \
+} while (0)
+
+/*
+ * In order to get the CLF in a known state we generate an internal reboot
+ * using a proprietary command.
+ * Once the reboot is completed, we expect to receive a ST21NFCA_SOF_EOF
+ * fill buffer.
+ */
+static int st21nfca_hci_platform_init(struct st21nfca_i2c_phy *phy)
+{
+ u16 wait_reboot[] = { 50, 300, 1000 };
+ char reboot_cmd[] = { 0x7E, 0x66, 0x48, 0xF6, 0x7E };
+ u8 tmp[ST21NFCA_HCI_LLC_MAX_SIZE];
+ int i, r = -1;
+
+ for (i = 0; i < ARRAY_SIZE(wait_reboot) && r < 0; i++) {
+ r = i2c_master_send(phy->i2c_dev, reboot_cmd,
+ sizeof(reboot_cmd));
+ if (r < 0)
+ msleep(wait_reboot[i]);
+ }
+ if (r < 0)
+ return r;
+
+ /* CLF is spending about 20ms to do an internal reboot */
+ msleep(20);
+ r = -1;
+ for (i = 0; i < ARRAY_SIZE(wait_reboot) && r < 0; i++) {
+ r = i2c_master_recv(phy->i2c_dev, tmp,
+ ST21NFCA_HCI_LLC_MAX_SIZE);
+ if (r < 0)
+ msleep(wait_reboot[i]);
+ }
+ if (r < 0)
+ return r;
+
+ for (i = 0; i < ST21NFCA_HCI_LLC_MAX_SIZE &&
+ tmp[i] == ST21NFCA_SOF_EOF; i++)
+ ;
+
+ if (r != ST21NFCA_HCI_LLC_MAX_SIZE)
+ return -ENODEV;
+
+ usleep_range(1000, 1500);
+ return 0;
+}
+
+static int st21nfca_hci_i2c_enable(void *phy_id)
+{
+ struct st21nfca_i2c_phy *phy = phy_id;
+
+ gpio_set_value(phy->gpio_ena, 1);
+ phy->powered = 1;
+ phy->run_mode = ST21NFCA_HCI_MODE;
+
+ usleep_range(10000, 15000);
+
+ return 0;
+}
+
+static void st21nfca_hci_i2c_disable(void *phy_id)
+{
+ struct st21nfca_i2c_phy *phy = phy_id;
+
+ pr_info("\n");
+ gpio_set_value(phy->gpio_ena, 0);
+
+ phy->powered = 0;
+}
+
+static void st21nfca_hci_add_len_crc(struct sk_buff *skb)
+{
+ u16 crc;
+ u8 tmp;
+
+ *skb_push(skb, 1) = 0;
+
+ crc = crc_ccitt(0xffff, skb->data, skb->len);
+ crc = ~crc;
+
+ tmp = crc & 0x00ff;
+ *skb_put(skb, 1) = tmp;
+
+ tmp = (crc >> 8) & 0x00ff;
+ *skb_put(skb, 1) = tmp;
+}
+
+static void st21nfca_hci_remove_len_crc(struct sk_buff *skb)
+{
+ skb_pull(skb, ST21NFCA_FRAME_HEADROOM);
+ skb_trim(skb, skb->len - ST21NFCA_FRAME_TAILROOM);
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int st21nfca_hci_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+ int r = -1, i, j;
+ struct st21nfca_i2c_phy *phy = phy_id;
+ struct i2c_client *client = phy->i2c_dev;
+ u8 tmp[ST21NFCA_HCI_LLC_MAX_SIZE * 2];
+
+ I2C_DUMP_SKB("st21nfca_hci_i2c_write", skb);
+
+
+ if (phy->hard_fault != 0)
+ return phy->hard_fault;
+
+ /*
+ * Compute CRC before byte stuffing computation on frame
+ * Note st21nfca_hci_add_len_crc is doing a byte stuffing
+ * on its own value
+ */
+ st21nfca_hci_add_len_crc(skb);
+
+ /* add ST21NFCA_SOF_EOF on tail */
+ *skb_put(skb, 1) = ST21NFCA_SOF_EOF;
+ /* add ST21NFCA_SOF_EOF on head */
+ *skb_push(skb, 1) = ST21NFCA_SOF_EOF;
+
+ /*
+ * Compute byte stuffing
+ * if byte == ST21NFCA_SOF_EOF or ST21NFCA_ESCAPE_BYTE_STUFFING
+ * insert ST21NFCA_ESCAPE_BYTE_STUFFING (escape byte)
+ * xor byte with ST21NFCA_BYTE_STUFFING_MASK
+ */
+ tmp[0] = skb->data[0];
+ for (i = 1, j = 1; i < skb->len - 1; i++, j++) {
+ if (skb->data[i] == ST21NFCA_SOF_EOF
+ || skb->data[i] == ST21NFCA_ESCAPE_BYTE_STUFFING) {
+ tmp[j] = ST21NFCA_ESCAPE_BYTE_STUFFING;
+ j++;
+ tmp[j] = skb->data[i] ^ ST21NFCA_BYTE_STUFFING_MASK;
+ } else {
+ tmp[j] = skb->data[i];
+ }
+ }
+ tmp[j] = skb->data[i];
+ j++;
+
+ /*
+ * Manage sleep mode
+ * Try 3 times to send data with delay between each
+ */
+ mutex_lock(&phy->phy_lock);
+ for (i = 0; i < ARRAY_SIZE(wait_tab) && r < 0; i++) {
+ r = i2c_master_send(client, tmp, j);
+ if (r < 0)
+ msleep(wait_tab[i]);
+ }
+ mutex_unlock(&phy->phy_lock);
+
+ if (r >= 0) {
+ if (r != j)
+ r = -EREMOTEIO;
+ else
+ r = 0;
+ }
+
+ st21nfca_hci_remove_len_crc(skb);
+
+ return r;
+}
+
+static int get_frame_size(u8 *buf, int buflen)
+{
+ int len = 0;
+ if (buf[len + 1] == ST21NFCA_SOF_EOF)
+ return 0;
+
+ for (len = 1; len < buflen && buf[len] != ST21NFCA_SOF_EOF; len++)
+ ;
+
+ return len;
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+ u16 crc;
+
+ crc = crc_ccitt(0xffff, buf, buflen - 2);
+ crc = ~crc;
+
+ if (buf[buflen - 2] != (crc & 0xff) || buf[buflen - 1] != (crc >> 8)) {
+ pr_err(ST21NFCA_HCI_DRIVER_NAME
+ ": CRC error 0x%x != 0x%x 0x%x\n", crc, buf[buflen - 1],
+ buf[buflen - 2]);
+
+ pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+ print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+ 16, 2, buf, buflen, false);
+ return -EPERM;
+ }
+ return 0;
+}
+
+/*
+ * Prepare received data for upper layer.
+ * Received data include byte stuffing, crc and sof/eof
+ * which is not usable by hci part.
+ * returns:
+ * frame size without sof/eof, header and byte stuffing
+ * -EBADMSG : frame was incorrect and discarded
+ */
+static int st21nfca_hci_i2c_repack(struct sk_buff *skb)
+{
+ int i, j, r, size;
+ if (skb->len < 1 || (skb->len > 1 && skb->data[1] != 0))
+ return -EBADMSG;
+
+ size = get_frame_size(skb->data, skb->len);
+ if (size > 0) {
+ skb_trim(skb, size);
+ /* remove ST21NFCA byte stuffing for upper layer */
+ for (i = 1, j = 0; i < skb->len; i++) {
+ if (skb->data[i + j] ==
+ (u8) ST21NFCA_ESCAPE_BYTE_STUFFING) {
+ skb->data[i] = skb->data[i + j + 1]
+ | ST21NFCA_BYTE_STUFFING_MASK;
+ i++;
+ j++;
+ }
+ skb->data[i] = skb->data[i + j];
+ }
+ /* remove byte stuffing useless byte */
+ skb_trim(skb, i - j);
+ /* remove ST21NFCA_SOF_EOF from head */
+ skb_pull(skb, 1);
+
+ r = check_crc(skb->data, skb->len);
+ if (r != 0) {
+ i = 0;
+ return -EBADMSG;
+ }
+
+ /* remove headbyte */
+ skb_pull(skb, 1);
+ /* remove crc. Byte Stuffing is already removed here */
+ skb_trim(skb, skb->len - 2);
+ return skb->len;
+ }
+ return 0;
+}
+
+/*
+ * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
+ * that i2c bus will be flushed and that next read will start on a new frame.
+ * returned skb contains only LLC header and payload.
+ * returns:
+ * frame size : if received frame is complete (find ST21NFCA_SOF_EOF at
+ * end of read)
+ * -EAGAIN : if received frame is incomplete (not find ST21NFCA_SOF_EOF
+ * at end of read)
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * (value returned from st21nfca_hci_i2c_repack)
+ * -EIO : if no ST21NFCA_SOF_EOF is found after reaching
+ * the read length end sequence
+ */
+static int st21nfca_hci_i2c_read(struct st21nfca_i2c_phy *phy,
+ struct sk_buff *skb)
+{
+ int r, i;
+ u8 len;
+ u8 buf[ST21NFCA_HCI_LLC_MAX_PAYLOAD];
+ struct i2c_client *client = phy->i2c_dev;
+
+ if (phy->current_read_len < ARRAY_SIZE(len_seq)) {
+ len = len_seq[phy->current_read_len];
+
+ /*
+ * Add retry mecanism
+ * Operation on I2C interface may fail in case of operation on
+ * RF or SWP interface
+ */
+ r = 0;
+ mutex_lock(&phy->phy_lock);
+ for (i = 0; i < ARRAY_SIZE(wait_tab) && r <= 0; i++) {
+ r = i2c_master_recv(client, buf, len);
+ if (r < 0)
+ msleep(wait_tab[i]);
+ }
+ mutex_unlock(&phy->phy_lock);
+
+ if (r != len) {
+ phy->current_read_len = 0;
+ return -EREMOTEIO;
+ }
+
+ /*
+ * The first read sequence does not start with SOF.
+ * Data is corrupeted so we drop it.
+ */
+ if (!phy->current_read_len && buf[0] != ST21NFCA_SOF_EOF) {
+ skb_trim(skb, 0);
+ phy->current_read_len = 0;
+ return -EIO;
+ } else if (phy->current_read_len &&
+ IS_START_OF_FRAME(buf)) {
+ /*
+ * Previous frame transmission was interrupted and
+ * the frame got repeated.
+ * Received frame start with ST21NFCA_SOF_EOF + 00.
+ */
+ skb_trim(skb, 0);
+ phy->current_read_len = 0;
+ }
+
+ memcpy(skb_put(skb, len), buf, len);
+
+ if (skb->data[skb->len - 1] == ST21NFCA_SOF_EOF) {
+ phy->current_read_len = 0;
+ return st21nfca_hci_i2c_repack(skb);
+ }
+ phy->current_read_len++;
+ return -EAGAIN;
+ }
+ return -EIO;
+}
+
+/*
+ * Reads an shdlc frame from the chip. This is not as straightforward as it
+ * seems. The frame format is data-crc, and corruption can occur anywhere
+ * while transiting on i2c bus, such that we could read an invalid data.
+ * The tricky case is when we read a corrupted data or crc. We must detect
+ * this here in order to determine that data can be transmitted to the hci
+ * core. This is the reason why we check the crc here.
+ * The CLF will repeat a frame until we send a RR on that frame.
+ *
+ * On ST21NFCA, IRQ goes in idle when read starts. As no size information are
+ * available in the incoming data, other IRQ might come. Every IRQ will trigger
+ * a read sequence with different length and will fill the current frame.
+ * The reception is complete once we reach a ST21NFCA_SOF_EOF.
+ */
+static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
+{
+ struct st21nfca_i2c_phy *phy = phy_id;
+ struct i2c_client *client;
+
+ int r;
+
+ if (!phy || irq != phy->i2c_dev->irq) {
+ WARN_ON_ONCE(1);
+ return IRQ_NONE;
+ }
+
+ client = phy->i2c_dev;
+ dev_dbg(&client->dev, "IRQ\n");
+
+ if (phy->hard_fault != 0)
+ return IRQ_HANDLED;
+
+ r = st21nfca_hci_i2c_read(phy, phy->pending_skb);
+ if (r == -EREMOTEIO) {
+ phy->hard_fault = r;
+
+ nfc_hci_recv_frame(phy->hdev, NULL);
+
+ return IRQ_HANDLED;
+ } else if (r == -EAGAIN || r == -EIO) {
+ return IRQ_HANDLED;
+ } else if (r == -EBADMSG && phy->crc_trials < ARRAY_SIZE(wait_tab)) {
+ /*
+ * With ST21NFCA, only one interface (I2C, RF or SWP)
+ * may be active at a time.
+ * Having incorrect crc is usually due to i2c macrocell
+ * deactivation in the middle of a transmission.
+ * It may generate corrupted data on i2c.
+ * We give sometime to get i2c back.
+ * The complete frame will be repeated.
+ */
+ msleep(wait_tab[phy->crc_trials]);
+ phy->crc_trials++;
+ phy->current_read_len = 0;
+ kfree_skb(phy->pending_skb);
+ } else if (r > 0) {
+ /*
+ * We succeeded to read data from the CLF and
+ * data is valid.
+ * Reset counter.
+ */
+ nfc_hci_recv_frame(phy->hdev, phy->pending_skb);
+ phy->crc_trials = 0;
+ }
+
+ phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL);
+ if (phy->pending_skb == NULL) {
+ phy->hard_fault = -ENOMEM;
+ nfc_hci_recv_frame(phy->hdev, NULL);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops i2c_phy_ops = {
+ .write = st21nfca_hci_i2c_write,
+ .enable = st21nfca_hci_i2c_enable,
+ .disable = st21nfca_hci_i2c_disable,
+};
+
+#ifdef CONFIG_OF
+static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client)
+{
+ struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
+ struct device_node *pp;
+ int gpio;
+ int r;
+
+ pp = client->dev.of_node;
+ if (!pp)
+ return -ENODEV;
+
+ /* Get GPIO from device tree */
+ gpio = of_get_named_gpio(pp, "enable-gpios", 0);
+ if (gpio < 0) {
+ nfc_err(&client->dev, "Failed to retrieve enable-gpios from device tree\n");
+ return gpio;
+ }
+
+ /* GPIO request and configuration */
+ r = devm_gpio_request(&client->dev, gpio, "clf_enable");
+ if (r) {
+ nfc_err(&client->dev, "Failed to request enable pin\n");
+ return -ENODEV;
+ }
+
+ r = gpio_direction_output(gpio, 1);
+ if (r) {
+ nfc_err(&client->dev, "Failed to set enable pin direction as output\n");
+ return -ENODEV;
+ }
+ phy->gpio_ena = gpio;
+
+ /* IRQ */
+ r = irq_of_parse_and_map(pp, 0);
+ if (r < 0) {
+ nfc_err(&client->dev,
+ "Unable to get irq, error: %d\n", r);
+ return r;
+ }
+
+ phy->irq_polarity = irq_get_trigger_type(r);
+ client->irq = r;
+
+ return 0;
+}
+#else
+static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client)
+{
+ return -ENODEV;
+}
+#endif
+
+static int st21nfca_hci_i2c_request_resources(struct i2c_client *client)
+{
+ struct st21nfca_nfc_platform_data *pdata;
+ struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
+ int r;
+ int irq;
+
+ pdata = client->dev.platform_data;
+ if (pdata == NULL) {
+ nfc_err(&client->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ /* store for later use */
+ phy->gpio_irq = pdata->gpio_irq;
+ phy->gpio_ena = pdata->gpio_ena;
+ phy->irq_polarity = pdata->irq_polarity;
+
+ r = devm_gpio_request(&client->dev, phy->gpio_irq, "wake_up");
+ if (r) {
+ pr_err("%s : gpio_request failed\n", __FILE__);
+ return -ENODEV;
+ }
+
+ r = gpio_direction_input(phy->gpio_irq);
+ if (r) {
+ pr_err("%s : gpio_direction_input failed\n", __FILE__);
+ return -ENODEV;
+ }
+
+ if (phy->gpio_ena > 0) {
+ r = devm_gpio_request(&client->dev,
+ phy->gpio_ena, "clf_enable");
+ if (r) {
+ pr_err("%s : ena gpio_request failed\n", __FILE__);
+ return -ENODEV;
+ }
+ r = gpio_direction_output(phy->gpio_ena, 1);
+
+ if (r) {
+ pr_err("%s : ena gpio_direction_output failed\n",
+ __FILE__);
+ return -ENODEV;
+ }
+ }
+
+ /* IRQ */
+ irq = gpio_to_irq(phy->gpio_irq);
+ if (irq < 0) {
+ nfc_err(&client->dev,
+ "Unable to get irq number for GPIO %d error %d\n",
+ phy->gpio_irq, r);
+ return -ENODEV;
+ }
+ client->irq = irq;
+
+ return 0;
+}
+
+static int st21nfca_hci_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct st21nfca_i2c_phy *phy;
+ struct st21nfca_nfc_platform_data *pdata;
+ int r;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
+ return -ENODEV;
+ }
+
+ phy = devm_kzalloc(&client->dev, sizeof(struct st21nfca_i2c_phy),
+ GFP_KERNEL);
+ if (!phy) {
+ nfc_err(&client->dev,
+ "Cannot allocate memory for st21nfca i2c phy.\n");
+ return -ENOMEM;
+ }
+
+ phy->i2c_dev = client;
+ phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL);
+ if (phy->pending_skb == NULL)
+ return -ENOMEM;
+
+ phy->current_read_len = 0;
+ phy->crc_trials = 0;
+ mutex_init(&phy->phy_lock);
+ i2c_set_clientdata(client, phy);
+
+ pdata = client->dev.platform_data;
+ if (!pdata && client->dev.of_node) {
+ r = st21nfca_hci_i2c_of_request_resources(client);
+ if (r) {
+ nfc_err(&client->dev, "No platform data\n");
+ return r;
+ }
+ } else if (pdata) {
+ r = st21nfca_hci_i2c_request_resources(client);
+ if (r) {
+ nfc_err(&client->dev, "Cannot get platform resources\n");
+ return r;
+ }
+ } else {
+ nfc_err(&client->dev, "st21nfca platform resources not available\n");
+ return -ENODEV;
+ }
+
+ r = st21nfca_hci_platform_init(phy);
+ if (r < 0) {
+ nfc_err(&client->dev, "Unable to reboot st21nfca\n");
+ return -ENODEV;
+ }
+
+ r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ st21nfca_hci_irq_thread_fn,
+ phy->irq_polarity | IRQF_ONESHOT,
+ ST21NFCA_HCI_DRIVER_NAME, phy);
+ if (r < 0) {
+ nfc_err(&client->dev, "Unable to register IRQ handler\n");
+ return r;
+ }
+
+ return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+ ST21NFCA_FRAME_HEADROOM, ST21NFCA_FRAME_TAILROOM,
+ ST21NFCA_HCI_LLC_MAX_PAYLOAD, &phy->hdev);
+}
+
+static int st21nfca_hci_i2c_remove(struct i2c_client *client)
+{
+ struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ st21nfca_hci_remove(phy->hdev);
+
+ if (phy->powered)
+ st21nfca_hci_i2c_disable(phy);
+
+ return 0;
+}
+
+static const struct of_device_id of_st21nfca_i2c_match[] = {
+ { .compatible = "st,st21nfca_i2c", },
+ {}
+};
+
+static struct i2c_driver st21nfca_hci_i2c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = ST21NFCA_HCI_I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_st21nfca_i2c_match),
+ },
+ .probe = st21nfca_hci_i2c_probe,
+ .id_table = st21nfca_hci_i2c_id_table,
+ .remove = st21nfca_hci_i2c_remove,
+};
+
+module_i2c_driver(st21nfca_hci_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
new file mode 100644
index 000000000000..51e0f00b3a4f
--- /dev/null
+++ b/drivers/nfc/st21nfca/st21nfca.c
@@ -0,0 +1,698 @@
+/*
+ * HCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "st21nfca.h"
+
+#define DRIVER_DESC "HCI NFC driver for ST21NFCA"
+
+#define FULL_VERSION_LEN 3
+
+/* Proprietary gates, events, commands and registers */
+
+/* Commands that apply to all RF readers */
+#define ST21NFCA_RF_READER_CMD_PRESENCE_CHECK 0x30
+
+#define ST21NFCA_RF_READER_ISO15693_GATE 0x12
+#define ST21NFCA_RF_READER_ISO15693_INVENTORY 0x01
+
+/*
+ * Reader gate for communication with contact-less cards using Type A
+ * protocol ISO14443-3 but not compliant with ISO14443-4
+ */
+#define ST21NFCA_RF_READER_14443_3_A_GATE 0x15
+#define ST21NFCA_RF_READER_14443_3_A_UID 0x02
+#define ST21NFCA_RF_READER_14443_3_A_ATQA 0x03
+#define ST21NFCA_RF_READER_14443_3_A_SAK 0x04
+
+#define ST21NFCA_DEVICE_MGNT_GATE 0x01
+#define ST21NFCA_DEVICE_MGNT_PIPE 0x02
+
+#define ST21NFCA_DM_GETINFO 0x13
+#define ST21NFCA_DM_GETINFO_PIPE_LIST 0x02
+#define ST21NFCA_DM_GETINFO_PIPE_INFO 0x01
+#define ST21NFCA_DM_PIPE_CREATED 0x02
+#define ST21NFCA_DM_PIPE_OPEN 0x04
+#define ST21NFCA_DM_RF_ACTIVE 0x80
+
+#define ST21NFCA_DM_IS_PIPE_OPEN(p) \
+ ((p & 0x0f) == (ST21NFCA_DM_PIPE_CREATED | ST21NFCA_DM_PIPE_OPEN))
+
+#define ST21NFCA_NFC_MODE 0x03 /* NFC_MODE parameter*/
+
+static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES);
+
+static struct nfc_hci_gate st21nfca_gates[] = {
+ {NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_PIPE},
+ {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_LINK_MGMT_GATE, NFC_HCI_LINK_MGMT_PIPE},
+ {NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE},
+ {ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE},
+ {ST21NFCA_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
+ {ST21NFCA_RF_READER_14443_3_A_GATE, NFC_HCI_INVALID_PIPE},
+ {ST21NFCA_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
+};
+
+struct st21nfca_pipe_info {
+ u8 pipe_state;
+ u8 src_host_id;
+ u8 src_gate_id;
+ u8 dst_host_id;
+ u8 dst_gate_id;
+} __packed;
+
+/* Largest headroom needed for outgoing custom commands */
+#define ST21NFCA_CMDS_HEADROOM 7
+
+static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+{
+ int i, j, r;
+ struct sk_buff *skb_pipe_list, *skb_pipe_info;
+ struct st21nfca_pipe_info *info;
+
+ u8 pipe_list[] = { ST21NFCA_DM_GETINFO_PIPE_LIST,
+ NFC_HCI_TERMINAL_HOST_ID
+ };
+ u8 pipe_info[] = { ST21NFCA_DM_GETINFO_PIPE_INFO,
+ NFC_HCI_TERMINAL_HOST_ID, 0
+ };
+
+ skb_pipe_list = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
+ if (!skb_pipe_list) {
+ r = -ENOMEM;
+ goto free_list;
+ }
+
+ skb_pipe_info = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
+ if (!skb_pipe_info) {
+ r = -ENOMEM;
+ goto free_info;
+ }
+
+ /* On ST21NFCA device pipes number are dynamics
+ * A maximum of 16 pipes can be created at the same time
+ * If pipes are already created, hci_dev_up will fail.
+ * Doing a clear all pipe is a bad idea because:
+ * - It does useless EEPROM cycling
+ * - It might cause issue for secure elements support
+ * (such as removing connectivity or APDU reader pipe)
+ * A better approach on ST21NFCA is to:
+ * - get a pipe list for each host.
+ * (eg: NFC_HCI_HOST_CONTROLLER_ID for now).
+ * (TODO Later on UICC HOST and eSE HOST)
+ * - get pipe information
+ * - match retrieved pipe list in st21nfca_gates
+ * ST21NFCA_DEVICE_MGNT_GATE is a proprietary gate
+ * with ST21NFCA_DEVICE_MGNT_PIPE.
+ * Pipe can be closed and need to be open.
+ */
+ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
+ ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE);
+ if (r < 0)
+ goto free_info;
+
+ /* Get pipe list */
+ r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
+ &skb_pipe_list);
+ if (r < 0)
+ goto free_info;
+
+ /* Complete the existing gate_pipe table */
+ for (i = 0; i < skb_pipe_list->len; i++) {
+ pipe_info[2] = skb_pipe_list->data[i];
+ r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ ST21NFCA_DM_GETINFO, pipe_info,
+ sizeof(pipe_info), &skb_pipe_info);
+
+ if (r)
+ continue;
+
+ /*
+ * Match pipe ID and gate ID
+ * Output format from ST21NFC_DM_GETINFO is:
+ * - pipe state (1byte)
+ * - source hid (1byte)
+ * - source gid (1byte)
+ * - destination hid (1byte)
+ * - destination gid (1byte)
+ */
+ info = (struct st21nfca_pipe_info *) skb_pipe_info->data;
+ for (j = 0; (j < ARRAY_SIZE(st21nfca_gates)) &&
+ (st21nfca_gates[j].gate != info->dst_gate_id);
+ j++)
+ ;
+
+ if (j < ARRAY_SIZE(st21nfca_gates) &&
+ st21nfca_gates[j].gate == info->dst_gate_id &&
+ ST21NFCA_DM_IS_PIPE_OPEN(info->pipe_state)) {
+ st21nfca_gates[j].pipe = pipe_info[2];
+ hdev->gate2pipe[st21nfca_gates[j].gate] =
+ st21nfca_gates[j].pipe;
+ }
+ }
+
+ /*
+ * 3 gates have a well known pipe ID.
+ * They will never appear in the pipe list
+ */
+ if (skb_pipe_list->len + 3 < ARRAY_SIZE(st21nfca_gates)) {
+ for (i = skb_pipe_list->len + 3;
+ i < ARRAY_SIZE(st21nfca_gates); i++) {
+ r = nfc_hci_connect_gate(hdev,
+ NFC_HCI_HOST_CONTROLLER_ID,
+ st21nfca_gates[i].gate,
+ st21nfca_gates[i].pipe);
+ if (r < 0)
+ goto free_info;
+ }
+ }
+
+ memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
+free_info:
+ kfree_skb(skb_pipe_info);
+free_list:
+ kfree_skb(skb_pipe_list);
+ return r;
+}
+
+static int st21nfca_hci_open(struct nfc_hci_dev *hdev)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+ int r;
+
+ mutex_lock(&info->info_lock);
+
+ if (info->state != ST21NFCA_ST_COLD) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ r = info->phy_ops->enable(info->phy_id);
+
+ if (r == 0)
+ info->state = ST21NFCA_ST_READY;
+
+out:
+ mutex_unlock(&info->info_lock);
+ return r;
+}
+
+static void st21nfca_hci_close(struct nfc_hci_dev *hdev)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ mutex_lock(&info->info_lock);
+
+ if (info->state == ST21NFCA_ST_COLD)
+ goto out;
+
+ info->phy_ops->disable(info->phy_id);
+ info->state = ST21NFCA_ST_COLD;
+
+out:
+ mutex_unlock(&info->info_lock);
+}
+
+static int st21nfca_hci_ready(struct nfc_hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ u8 param;
+ int r;
+
+ param = NFC_HCI_UICC_HOST_ID;
+ r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+ NFC_HCI_ADMIN_WHITELIST, &param, 1);
+ if (r < 0)
+ return r;
+
+ /* Set NFC_MODE in device management gate to enable */
+ r = nfc_hci_get_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ ST21NFCA_NFC_MODE, &skb);
+ if (r < 0)
+ return r;
+
+ if (skb->data[0] == 0) {
+ kfree_skb(skb);
+ param = 1;
+
+ r = nfc_hci_set_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ ST21NFCA_NFC_MODE, &param, 1);
+ if (r < 0)
+ return r;
+ }
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+ NFC_HCI_ID_MGMT_VERSION_SW, &skb);
+ if (r < 0)
+ return r;
+
+ if (skb->len != FULL_VERSION_LEN) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ print_hex_dump(KERN_DEBUG, "FULL VERSION SOFTWARE INFO: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ skb->data, FULL_VERSION_LEN, false);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int st21nfca_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ return info->phy_ops->write(info->phy_id, skb);
+}
+
+static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev,
+ u32 im_protocols, u32 tm_protocols)
+{
+ int r;
+
+ pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
+ __func__, im_protocols, tm_protocols);
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (r < 0)
+ return r;
+ if (im_protocols) {
+ /*
+ * enable polling according to im_protocols & tm_protocols
+ * - CLOSE pipe according to im_protocols & tm_protocols
+ */
+ if ((NFC_HCI_RF_READER_B_GATE & im_protocols) == 0) {
+ r = nfc_hci_disconnect_gate(hdev,
+ NFC_HCI_RF_READER_B_GATE);
+ if (r < 0)
+ return r;
+ }
+
+ if ((NFC_HCI_RF_READER_A_GATE & im_protocols) == 0) {
+ r = nfc_hci_disconnect_gate(hdev,
+ NFC_HCI_RF_READER_A_GATE);
+ if (r < 0)
+ return r;
+ }
+
+ if ((ST21NFCA_RF_READER_F_GATE & im_protocols) == 0) {
+ r = nfc_hci_disconnect_gate(hdev,
+ ST21NFCA_RF_READER_F_GATE);
+ if (r < 0)
+ return r;
+ }
+
+ if ((ST21NFCA_RF_READER_14443_3_A_GATE & im_protocols) == 0) {
+ r = nfc_hci_disconnect_gate(hdev,
+ ST21NFCA_RF_READER_14443_3_A_GATE);
+ if (r < 0)
+ return r;
+ }
+
+ if ((ST21NFCA_RF_READER_ISO15693_GATE & im_protocols) == 0) {
+ r = nfc_hci_disconnect_gate(hdev,
+ ST21NFCA_RF_READER_ISO15693_GATE);
+ if (r < 0)
+ return r;
+ }
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+ if (r < 0)
+ nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ }
+ return r;
+}
+
+static int st21nfca_get_iso14443_3_atqa(struct nfc_hci_dev *hdev, u16 *atqa)
+{
+ int r;
+ struct sk_buff *atqa_skb = NULL;
+
+ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE,
+ ST21NFCA_RF_READER_14443_3_A_ATQA, &atqa_skb);
+ if (r < 0)
+ goto exit;
+
+ if (atqa_skb->len != 2) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ *atqa = be16_to_cpu(*(__be16 *) atqa_skb->data);
+
+exit:
+ kfree_skb(atqa_skb);
+ return r;
+}
+
+static int st21nfca_get_iso14443_3_sak(struct nfc_hci_dev *hdev, u8 *sak)
+{
+ int r;
+ struct sk_buff *sak_skb = NULL;
+
+ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE,
+ ST21NFCA_RF_READER_14443_3_A_SAK, &sak_skb);
+ if (r < 0)
+ goto exit;
+
+ if (sak_skb->len != 1) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ *sak = sak_skb->data[0];
+
+exit:
+ kfree_skb(sak_skb);
+ return r;
+}
+
+static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
+ int *len)
+{
+ int r;
+ struct sk_buff *uid_skb = NULL;
+
+ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE,
+ ST21NFCA_RF_READER_14443_3_A_UID, &uid_skb);
+ if (r < 0)
+ goto exit;
+
+ if (uid_skb->len == 0 || uid_skb->len > NFC_NFCID1_MAXSIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ gate = uid_skb->data;
+ *len = uid_skb->len;
+exit:
+ kfree_skb(uid_skb);
+ return r;
+}
+
+static int st21nfca_get_iso15693_inventory(struct nfc_hci_dev *hdev,
+ struct nfc_target *target)
+{
+ int r;
+ struct sk_buff *inventory_skb = NULL;
+
+ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_ISO15693_GATE,
+ ST21NFCA_RF_READER_ISO15693_INVENTORY,
+ &inventory_skb);
+ if (r < 0)
+ goto exit;
+
+ skb_pull(inventory_skb, 2);
+
+ if (inventory_skb->len == 0 ||
+ inventory_skb->len > NFC_ISO15693_UID_MAXSIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ memcpy(target->iso15693_uid, inventory_skb->data, inventory_skb->len);
+ target->iso15693_dsfid = inventory_skb->data[1];
+ target->is_iso15693 = 1;
+exit:
+ kfree_skb(inventory_skb);
+ return r;
+}
+
+static int st21nfca_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
+ struct nfc_target *target)
+{
+ int r, len;
+ u16 atqa;
+ u8 sak;
+ u8 uid[NFC_NFCID1_MAXSIZE];
+
+ switch (gate) {
+ case ST21NFCA_RF_READER_F_GATE:
+ target->supported_protocols = NFC_PROTO_FELICA_MASK;
+ break;
+ case ST21NFCA_RF_READER_14443_3_A_GATE:
+ /* ISO14443-3 type 1 or 2 tags */
+ r = st21nfca_get_iso14443_3_atqa(hdev, &atqa);
+ if (r < 0)
+ return r;
+ if (atqa == 0x000c) {
+ target->supported_protocols = NFC_PROTO_JEWEL_MASK;
+ target->sens_res = 0x0c00;
+ } else {
+ r = st21nfca_get_iso14443_3_sak(hdev, &sak);
+ if (r < 0)
+ return r;
+
+ r = st21nfca_get_iso14443_3_uid(hdev, uid, &len);
+ if (r < 0)
+ return r;
+
+ target->supported_protocols =
+ nfc_hci_sak_to_protocol(sak);
+ if (target->supported_protocols == 0xffffffff)
+ return -EPROTO;
+
+ target->sens_res = atqa;
+ target->sel_res = sak;
+ memcpy(target->nfcid1, uid, len);
+ target->nfcid1_len = len;
+ }
+
+ break;
+ case ST21NFCA_RF_READER_ISO15693_GATE:
+ target->supported_protocols = NFC_PROTO_ISO15693_MASK;
+ r = st21nfca_get_iso15693_inventory(hdev, target);
+ if (r < 0)
+ return r;
+ break;
+ default:
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+#define ST21NFCA_CB_TYPE_READER_ISO15693 1
+static void st21nfca_hci_data_exchange_cb(void *context, struct sk_buff *skb,
+ int err)
+{
+ struct st21nfca_hci_info *info = context;
+
+ switch (info->async_cb_type) {
+ case ST21NFCA_CB_TYPE_READER_ISO15693:
+ if (err == 0)
+ skb_trim(skb, skb->len - 1);
+ info->async_cb(info->async_cb_context, skb, err);
+ break;
+ default:
+ if (err == 0)
+ kfree_skb(skb);
+ break;
+ }
+}
+
+/*
+ * Returns:
+ * <= 0: driver handled the data exchange
+ * 1: driver doesn't especially handle, please do standard processing
+ */
+static int st21nfca_hci_im_transceive(struct nfc_hci_dev *hdev,
+ struct nfc_target *target,
+ struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ pr_info(DRIVER_DESC ": %s for gate=%d len=%d\n", __func__,
+ target->hci_reader_gate, skb->len);
+
+ switch (target->hci_reader_gate) {
+ case ST21NFCA_RF_READER_F_GATE:
+ *skb_push(skb, 1) = 0x1a;
+ return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+ ST21NFCA_WR_XCHG_DATA, skb->data,
+ skb->len, cb, cb_context);
+ case ST21NFCA_RF_READER_14443_3_A_GATE:
+ *skb_push(skb, 1) = 0x1a; /* CTR, see spec:10.2.2.1 */
+
+ return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+ ST21NFCA_WR_XCHG_DATA, skb->data,
+ skb->len, cb, cb_context);
+ case ST21NFCA_RF_READER_ISO15693_GATE:
+ info->async_cb_type = ST21NFCA_CB_TYPE_READER_ISO15693;
+ info->async_cb = cb;
+ info->async_cb_context = cb_context;
+
+ *skb_push(skb, 1) = 0x17;
+
+ return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+ ST21NFCA_WR_XCHG_DATA, skb->data,
+ skb->len,
+ st21nfca_hci_data_exchange_cb,
+ info);
+ break;
+ default:
+ return 1;
+ }
+}
+
+static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev,
+ struct nfc_target *target)
+{
+ u8 fwi = 0x11;
+ switch (target->hci_reader_gate) {
+ case NFC_HCI_RF_READER_A_GATE:
+ case NFC_HCI_RF_READER_B_GATE:
+ /*
+ * PRESENCE_CHECK on those gates is available
+ * However, the answer to this command is taking 3 * fwi
+ * if the card is no present.
+ * Instead, we send an empty I-Frame with a very short
+ * configurable fwi ~604µs.
+ */
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ ST21NFCA_WR_XCHG_DATA, &fwi, 1, NULL);
+ case ST21NFCA_RF_READER_14443_3_A_GATE:
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ ST21NFCA_RF_READER_CMD_PRESENCE_CHECK,
+ NULL, 0, NULL);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct nfc_hci_ops st21nfca_hci_ops = {
+ .open = st21nfca_hci_open,
+ .close = st21nfca_hci_close,
+ .load_session = st21nfca_hci_load_session,
+ .hci_ready = st21nfca_hci_ready,
+ .xmit = st21nfca_hci_xmit,
+ .start_poll = st21nfca_hci_start_poll,
+ .target_from_gate = st21nfca_hci_target_from_gate,
+ .im_transceive = st21nfca_hci_im_transceive,
+ .check_presence = st21nfca_hci_check_presence,
+};
+
+int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
+ char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, struct nfc_hci_dev **hdev)
+{
+ struct st21nfca_hci_info *info;
+ int r = 0;
+ int dev_num;
+ u32 protocols;
+ struct nfc_hci_init_data init_data;
+ unsigned long quirks = 0;
+
+ info = kzalloc(sizeof(struct st21nfca_hci_info), GFP_KERNEL);
+ if (!info) {
+ r = -ENOMEM;
+ goto err_alloc_hdev;
+ }
+
+ info->phy_ops = phy_ops;
+ info->phy_id = phy_id;
+ info->state = ST21NFCA_ST_COLD;
+ mutex_init(&info->info_lock);
+
+ init_data.gate_count = ARRAY_SIZE(st21nfca_gates);
+
+ memcpy(init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
+
+ /*
+ * Session id must include the driver name + i2c bus addr
+ * persistent info to discriminate 2 identical chips
+ */
+ dev_num = find_first_zero_bit(dev_mask, ST21NFCA_NUM_DEVICES);
+ if (dev_num >= ST21NFCA_NUM_DEVICES)
+ goto err_alloc_hdev;
+
+ scnprintf(init_data.session_id, sizeof(init_data.session_id), "%s%2x",
+ "ST21AH", dev_num);
+
+ protocols = NFC_PROTO_JEWEL_MASK |
+ NFC_PROTO_MIFARE_MASK |
+ NFC_PROTO_FELICA_MASK |
+ NFC_PROTO_ISO14443_MASK |
+ NFC_PROTO_ISO14443_B_MASK |
+ NFC_PROTO_ISO15693_MASK;
+
+ set_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &quirks);
+
+ info->hdev =
+ nfc_hci_allocate_device(&st21nfca_hci_ops, &init_data, quirks,
+ protocols, llc_name,
+ phy_headroom + ST21NFCA_CMDS_HEADROOM,
+ phy_tailroom, phy_payload);
+
+ if (!info->hdev) {
+ pr_err("Cannot allocate nfc hdev.\n");
+ r = -ENOMEM;
+ goto err_alloc_hdev;
+ }
+
+ nfc_hci_set_clientdata(info->hdev, info);
+
+ r = nfc_hci_register_device(info->hdev);
+ if (r)
+ goto err_regdev;
+
+ *hdev = info->hdev;
+
+ return 0;
+
+err_regdev:
+ nfc_hci_free_device(info->hdev);
+
+err_alloc_hdev:
+ kfree(info);
+
+ return r;
+}
+EXPORT_SYMBOL(st21nfca_hci_probe);
+
+void st21nfca_hci_remove(struct nfc_hci_dev *hdev)
+{
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ nfc_hci_unregister_device(hdev);
+ nfc_hci_free_device(hdev);
+ kfree(info);
+}
+EXPORT_SYMBOL(st21nfca_hci_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
new file mode 100644
index 000000000000..334cd90bcc8c
--- /dev/null
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_ST21NFCA_H_
+#define __LOCAL_ST21NFCA_H_
+
+#include <net/nfc/hci.h>
+
+#define HCI_MODE 0
+
+/* framing in HCI mode */
+#define ST21NFCA_SOF_EOF_LEN 2
+
+/* Almost every time value is 0 */
+#define ST21NFCA_HCI_LLC_LEN 1
+
+/* Size in worst case :
+ * In normal case CRC len = 2 but byte stuffing
+ * may appear in case one CRC byte = ST21NFCA_SOF_EOF
+ */
+#define ST21NFCA_HCI_LLC_CRC 4
+
+#define ST21NFCA_HCI_LLC_LEN_CRC (ST21NFCA_SOF_EOF_LEN + \
+ ST21NFCA_HCI_LLC_LEN + \
+ ST21NFCA_HCI_LLC_CRC)
+#define ST21NFCA_HCI_LLC_MIN_SIZE (1 + ST21NFCA_HCI_LLC_LEN_CRC)
+
+/* Worst case when adding byte stuffing between each byte */
+#define ST21NFCA_HCI_LLC_MAX_PAYLOAD 29
+#define ST21NFCA_HCI_LLC_MAX_SIZE (ST21NFCA_HCI_LLC_LEN_CRC + 1 + \
+ ST21NFCA_HCI_LLC_MAX_PAYLOAD)
+
+#define DRIVER_DESC "HCI NFC driver for ST21NFCA"
+
+#define ST21NFCA_HCI_MODE 0
+
+#define ST21NFCA_NUM_DEVICES 256
+
+int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
+ char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, struct nfc_hci_dev **hdev);
+void st21nfca_hci_remove(struct nfc_hci_dev *hdev);
+
+enum st21nfca_state {
+ ST21NFCA_ST_COLD,
+ ST21NFCA_ST_READY,
+};
+
+struct st21nfca_hci_info {
+ struct nfc_phy_ops *phy_ops;
+ void *phy_id;
+
+ struct nfc_hci_dev *hdev;
+
+ enum st21nfca_state state;
+
+ struct mutex info_lock;
+
+ int async_cb_type;
+ data_exchange_cb_t async_cb;
+ void *async_cb_context;
+
+} __packed;
+
+/* Reader RF commands */
+#define ST21NFCA_WR_XCHG_DATA 0x10
+
+#define ST21NFCA_RF_READER_F_GATE 0x14
+#define ST21NFCA_RF_READER_F_DATARATE 0x01
+#define ST21NFCA_RF_READER_F_DATARATE_106 0x01
+#define ST21NFCA_RF_READER_F_DATARATE_212 0x02
+#define ST21NFCA_RF_READER_F_DATARATE_424 0x04
+
+#endif /* __LOCAL_ST21NFCA_H_ */
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index d9babe986473..3b78b031e617 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/nfc.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
@@ -67,14 +68,14 @@
* only the SRX bit set, it means that all of the data has been received
* (once what's in the fifo has been read). However, depending on timing
* an interrupt status with only the SRX bit set may not be recived. In
- * those cases, the timeout mechanism is used to wait 5 ms in case more
- * data arrives. After 5 ms, it is assumed that all of the data has been
+ * those cases, the timeout mechanism is used to wait 20 ms in case more
+ * data arrives. After 20 ms, it is assumed that all of the data has been
* received and the accumulated rx data is sent upstream. The
* 'TRF7970A_ST_WAIT_FOR_RX_DATA_CONT' state is used for this purpose
* (i.e., it indicates that some data has been received but we're not sure
* if there is more coming so a timeout in this state means all data has
- * been received and there isn't an error). The delay is 5 ms since delays
- * over 2 ms have been observed during testing (a little extra just in case).
+ * been received and there isn't an error). The delay is 20 ms since delays
+ * of ~16 ms have been observed during testing.
*
* Type 2 write and sector select commands respond with a 4-bit ACK or NACK.
* Having only 4 bits in the FIFO won't normally generate an interrupt so
@@ -104,8 +105,11 @@
#define TRF7970A_SUPPORTED_PROTOCOLS \
(NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_FELICA_MASK | \
NFC_PROTO_ISO15693_MASK)
+#define TRF7970A_AUTOSUSPEND_DELAY 30000 /* 30 seconds */
+
/* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
* on what the current framing is, the address of the TX length byte 1
* register (0x1d), and the 2 byte length of the data to be transmitted.
@@ -120,7 +124,7 @@
/* TX length is 3 nibbles long ==> 4KB - 1 bytes max */
#define TRF7970A_TX_MAX (4096 - 1)
-#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT 5
+#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT 20
#define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT 3
#define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF 20
@@ -330,13 +334,15 @@ struct trf7970a {
struct regulator *regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
- bool powering_up;
bool aborting;
struct sk_buff *tx_skb;
struct sk_buff *rx_skb;
nfc_digital_cmd_complete_t cb;
void *cb_arg;
+ u8 chip_status_ctrl;
u8 iso_ctrl;
+ u8 iso_ctrl_tech;
+ u8 modulator_sys_clk_ctrl;
u8 special_fcn_reg1;
int technology;
int framing;
@@ -681,7 +687,9 @@ static irqreturn_t trf7970a_irq(int irq, void *dev_id)
trf->ignore_timeout =
!cancel_delayed_work(&trf->timeout_work);
trf7970a_drain_fifo(trf, status);
- } else if (!(status & TRF7970A_IRQ_STATUS_TX)) {
+ } else if (status == TRF7970A_IRQ_STATUS_TX) {
+ trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+ } else {
trf7970a_send_err_upstream(trf, -EIO);
}
break;
@@ -757,8 +765,8 @@ static int trf7970a_init(struct trf7970a *trf)
if (ret)
goto err_out;
- ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
- TRF7970A_MODULATOR_DEPTH_OOK);
+ /* Must clear NFC Target Detection Level reg due to erratum */
+ ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0);
if (ret)
goto err_out;
@@ -774,12 +782,7 @@ static int trf7970a_init(struct trf7970a *trf)
trf->special_fcn_reg1 = 0;
- ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
- TRF7970A_CHIP_STATUS_RF_ON |
- TRF7970A_CHIP_STATUS_VRS5_3);
- if (ret)
- goto err_out;
-
+ trf->iso_ctrl = 0xff;
return 0;
err_out:
@@ -791,53 +794,29 @@ static void trf7970a_switch_rf_off(struct trf7970a *trf)
{
dev_dbg(trf->dev, "Switching rf off\n");
- gpio_set_value(trf->en_gpio, 0);
- gpio_set_value(trf->en2_gpio, 0);
+ trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON;
+
+ trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl);
trf->aborting = false;
trf->state = TRF7970A_ST_OFF;
+
+ pm_runtime_mark_last_busy(trf->dev);
+ pm_runtime_put_autosuspend(trf->dev);
}
-static int trf7970a_switch_rf_on(struct trf7970a *trf)
+static void trf7970a_switch_rf_on(struct trf7970a *trf)
{
- unsigned long delay;
- int ret;
-
dev_dbg(trf->dev, "Switching rf on\n");
- if (trf->powering_up)
- usleep_range(5000, 6000);
-
- gpio_set_value(trf->en2_gpio, 1);
- usleep_range(1000, 2000);
- gpio_set_value(trf->en_gpio, 1);
+ pm_runtime_get_sync(trf->dev);
- /* The delay between enabling the trf7970a and issuing the first
- * command is significantly longer the very first time after powering
- * up. Make sure the longer delay is only done the first time.
- */
- if (trf->powering_up) {
- delay = 20000;
- trf->powering_up = false;
- } else {
- delay = 5000;
- }
-
- usleep_range(delay, delay + 1000);
-
- ret = trf7970a_init(trf);
- if (ret)
- trf7970a_switch_rf_off(trf);
- else
- trf->state = TRF7970A_ST_IDLE;
-
- return ret;
+ trf->state = TRF7970A_ST_IDLE;
}
static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
{
struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
- int ret = 0;
dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on);
@@ -846,7 +825,7 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
if (on) {
switch (trf->state) {
case TRF7970A_ST_OFF:
- ret = trf7970a_switch_rf_on(trf);
+ trf7970a_switch_rf_on(trf);
break;
case TRF7970A_ST_IDLE:
case TRF7970A_ST_IDLE_RX_BLOCKED:
@@ -871,7 +850,7 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
}
mutex_unlock(&trf->lock);
- return ret;
+ return 0;
}
static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
@@ -882,10 +861,24 @@ static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
switch (tech) {
case NFC_DIGITAL_RF_TECH_106A:
- trf->iso_ctrl = TRF7970A_ISO_CTRL_14443A_106;
+ trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443A_106;
+ trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK;
+ break;
+ case NFC_DIGITAL_RF_TECH_106B:
+ trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443B_106;
+ trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10;
+ break;
+ case NFC_DIGITAL_RF_TECH_212F:
+ trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_212;
+ trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10;
+ break;
+ case NFC_DIGITAL_RF_TECH_424F:
+ trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_424;
+ trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10;
break;
case NFC_DIGITAL_RF_TECH_ISO15693:
- trf->iso_ctrl = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+ trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+ trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK;
break;
default:
dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech);
@@ -899,24 +892,31 @@ static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
static int trf7970a_config_framing(struct trf7970a *trf, int framing)
{
+ u8 iso_ctrl = trf->iso_ctrl_tech;
+ int ret;
+
dev_dbg(trf->dev, "framing: %d\n", framing);
switch (framing) {
case NFC_DIGITAL_FRAMING_NFCA_SHORT:
case NFC_DIGITAL_FRAMING_NFCA_STANDARD:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC;
- trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+ iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
break;
case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A:
case NFC_DIGITAL_FRAMING_NFCA_T4T:
+ case NFC_DIGITAL_FRAMING_NFCB:
+ case NFC_DIGITAL_FRAMING_NFCB_T4T:
+ case NFC_DIGITAL_FRAMING_NFCF:
+ case NFC_DIGITAL_FRAMING_NFCF_T3T:
case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY:
case NFC_DIGITAL_FRAMING_ISO15693_T5T:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
- trf->iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
+ iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
break;
case NFC_DIGITAL_FRAMING_NFCA_T2T:
trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
- trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+ iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
break;
default:
dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing);
@@ -925,24 +925,46 @@ static int trf7970a_config_framing(struct trf7970a *trf, int framing)
trf->framing = framing;
- return trf7970a_write(trf, TRF7970A_ISO_CTRL, trf->iso_ctrl);
+ if (iso_ctrl != trf->iso_ctrl) {
+ ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl);
+ if (ret)
+ return ret;
+
+ trf->iso_ctrl = iso_ctrl;
+
+ ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
+ trf->modulator_sys_clk_ctrl);
+ if (ret)
+ return ret;
+ }
+
+ if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) {
+ ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
+ trf->chip_status_ctrl |
+ TRF7970A_CHIP_STATUS_RF_ON);
+ if (ret)
+ return ret;
+
+ trf->chip_status_ctrl |= TRF7970A_CHIP_STATUS_RF_ON;
+
+ usleep_range(5000, 6000);
+ }
+
+ return 0;
}
static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
int param)
{
struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
- int ret = 0;
+ int ret;
dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param);
mutex_lock(&trf->lock);
- if (trf->state == TRF7970A_ST_OFF) {
- ret = trf7970a_switch_rf_on(trf);
- if (ret)
- goto err_out;
- }
+ if (trf->state == TRF7970A_ST_OFF)
+ trf7970a_switch_rf_on(trf);
switch (type) {
case NFC_DIGITAL_CONFIG_RF_TECH:
@@ -956,7 +978,6 @@ static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
ret = -EINVAL;
}
-err_out:
mutex_unlock(&trf->lock);
return ret;
}
@@ -1191,7 +1212,18 @@ static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev)
dev_dbg(trf->dev, "Abort process initiated\n");
mutex_lock(&trf->lock);
- trf->aborting = true;
+
+ switch (trf->state) {
+ case TRF7970A_ST_WAIT_FOR_TX_FIFO:
+ case TRF7970A_ST_WAIT_FOR_RX_DATA:
+ case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
+ case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
+ trf->aborting = true;
+ break;
+ default:
+ break;
+ }
+
mutex_unlock(&trf->lock);
}
@@ -1206,12 +1238,25 @@ static struct nfc_digital_ops trf7970a_nfc_ops = {
.abort_cmd = trf7970a_abort_cmd,
};
+static int trf7970a_get_autosuspend_delay(struct device_node *np)
+{
+ int autosuspend_delay, ret;
+
+ ret = of_property_read_u32(np, "autosuspend-delay", &autosuspend_delay);
+ if (ret)
+ autosuspend_delay = TRF7970A_AUTOSUSPEND_DELAY;
+
+ of_node_put(np);
+
+ return autosuspend_delay;
+}
+
static int trf7970a_probe(struct spi_device *spi)
{
struct device_node *np = spi->dev.of_node;
const struct spi_device_id *id = spi_get_device_id(spi);
struct trf7970a *trf;
- int ret;
+ int uvolts, autosuspend_delay, ret;
if (!np) {
dev_err(&spi->dev, "No Device Tree entry\n");
@@ -1281,7 +1326,10 @@ static int trf7970a_probe(struct spi_device *spi)
goto err_destroy_lock;
}
- trf->powering_up = true;
+ uvolts = regulator_get_voltage(trf->regulator);
+
+ if (uvolts > 4000000)
+ trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops,
TRF7970A_SUPPORTED_PROTOCOLS,
@@ -1297,6 +1345,12 @@ static int trf7970a_probe(struct spi_device *spi)
nfc_digital_set_drvdata(trf->ddev, trf);
spi_set_drvdata(spi, trf);
+ autosuspend_delay = trf7970a_get_autosuspend_delay(np);
+
+ pm_runtime_set_autosuspend_delay(trf->dev, autosuspend_delay);
+ pm_runtime_use_autosuspend(trf->dev);
+ pm_runtime_enable(trf->dev);
+
ret = nfc_digital_register_device(trf->ddev);
if (ret) {
dev_err(trf->dev, "Can't register NFC digital device: %d\n",
@@ -1307,6 +1361,7 @@ static int trf7970a_probe(struct spi_device *spi)
return 0;
err_free_ddev:
+ pm_runtime_disable(trf->dev);
nfc_digital_free_device(trf->ddev);
err_disable_regulator:
regulator_disable(trf->regulator);
@@ -1321,15 +1376,16 @@ static int trf7970a_remove(struct spi_device *spi)
mutex_lock(&trf->lock);
- trf7970a_switch_rf_off(trf);
- trf7970a_init(trf);
-
switch (trf->state) {
case TRF7970A_ST_WAIT_FOR_TX_FIFO:
case TRF7970A_ST_WAIT_FOR_RX_DATA:
case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
trf7970a_send_err_upstream(trf, -ECANCELED);
+ /* FALLTHROUGH */
+ case TRF7970A_ST_IDLE:
+ case TRF7970A_ST_IDLE_RX_BLOCKED:
+ pm_runtime_put_sync(trf->dev);
break;
default:
break;
@@ -1337,6 +1393,8 @@ static int trf7970a_remove(struct spi_device *spi)
mutex_unlock(&trf->lock);
+ pm_runtime_disable(trf->dev);
+
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
@@ -1347,6 +1405,70 @@ static int trf7970a_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_PM_RUNTIME
+static int trf7970a_pm_runtime_suspend(struct device *dev)
+{
+ struct spi_device *spi = container_of(dev, struct spi_device, dev);
+ struct trf7970a *trf = spi_get_drvdata(spi);
+ int ret;
+
+ dev_dbg(dev, "Runtime suspend\n");
+
+ if (trf->state != TRF7970A_ST_OFF) {
+ dev_dbg(dev, "Can't suspend - not in OFF state (%d)\n",
+ trf->state);
+ return -EBUSY;
+ }
+
+ gpio_set_value(trf->en_gpio, 0);
+ gpio_set_value(trf->en2_gpio, 0);
+
+ ret = regulator_disable(trf->regulator);
+ if (ret)
+ dev_err(dev, "%s - Can't disable VIN: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int trf7970a_pm_runtime_resume(struct device *dev)
+{
+ struct spi_device *spi = container_of(dev, struct spi_device, dev);
+ struct trf7970a *trf = spi_get_drvdata(spi);
+ int ret;
+
+ dev_dbg(dev, "Runtime resume\n");
+
+ ret = regulator_enable(trf->regulator);
+ if (ret) {
+ dev_err(dev, "%s - Can't enable VIN: %d\n", __func__, ret);
+ return ret;
+ }
+
+ usleep_range(5000, 6000);
+
+ gpio_set_value(trf->en2_gpio, 1);
+ usleep_range(1000, 2000);
+ gpio_set_value(trf->en_gpio, 1);
+
+ usleep_range(20000, 21000);
+
+ ret = trf7970a_init(trf);
+ if (ret) {
+ dev_err(dev, "%s - Can't initialize: %d\n", __func__, ret);
+ return ret;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops trf7970a_pm_ops = {
+ SET_RUNTIME_PM_OPS(trf7970a_pm_runtime_suspend,
+ trf7970a_pm_runtime_resume, NULL)
+};
+
static const struct spi_device_id trf7970a_id_table[] = {
{ "trf7970a", TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA },
{ }
@@ -1360,6 +1482,7 @@ static struct spi_driver trf7970a_spi_driver = {
.driver = {
.name = "trf7970a",
.owner = THIS_MODULE,
+ .pm = &trf7970a_pm_ops,
},
};
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 8368d96ae7b4..b9864806e9b8 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -227,7 +227,8 @@ static int __of_node_add(struct device_node *np)
np->kobj.kset = of_kset;
if (!np->parent) {
/* Nodes without parents are new top level trees */
- rc = kobject_add(&np->kobj, NULL, safe_name(&of_kset->kobj, "base"));
+ rc = kobject_add(&np->kobj, NULL, "%s",
+ safe_name(&of_kset->kobj, "base"));
} else {
name = safe_name(&np->parent->kobj, kbasename(np->full_name));
if (!name || !name[0])
@@ -1960,9 +1961,9 @@ int of_attach_node(struct device_node *np)
raw_spin_lock_irqsave(&devtree_lock, flags);
np->sibling = np->parent->child;
- np->allnext = of_allnodes;
+ np->allnext = np->parent->allnext;
+ np->parent->allnext = np;
np->parent->child = np;
- of_allnodes = np;
of_node_clear_flag(np, OF_DETACHED);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c4cddf0cd96d..b777d8f46bd5 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -880,6 +880,21 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
const u64 phys_offset = __pa(PAGE_OFFSET);
base &= PAGE_MASK;
size &= PAGE_MASK;
+
+ if (sizeof(phys_addr_t) < sizeof(u64)) {
+ if (base > ULONG_MAX) {
+ pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
+ return;
+ }
+
+ if (base + size > ULONG_MAX) {
+ pr_warning("Ignoring memory range 0x%lx - 0x%llx\n",
+ ULONG_MAX, base + size);
+ size = ULONG_MAX - base;
+ }
+ }
+
if (base + size < phys_offset) {
pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
base, base + size);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 9a95831bd065..a3bf2122a8d5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/err.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
@@ -22,27 +23,6 @@
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_LICENSE("GPL");
-static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
-{
- /* The default values for phydev->supported are provided by the PHY
- * driver "features" member, we want to reset to sane defaults fist
- * before supporting higher speeds.
- */
- phydev->supported &= PHY_DEFAULT_FEATURES;
-
- switch (max_speed) {
- default:
- return;
-
- case SPEED_1000:
- phydev->supported |= PHY_1000BT_FEATURES;
- case SPEED_100:
- phydev->supported |= PHY_100BT_FEATURES;
- case SPEED_10:
- phydev->supported |= PHY_10BT_FEATURES;
- }
-}
-
/* Extract the clause 22 phy ID from the compatible string of the form
* ethernet-phy-idAAAA.BBBB */
static int of_get_phy_id(struct device_node *device, u32 *phy_id)
@@ -66,7 +46,6 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
struct phy_device *phy;
bool is_c45;
int rc;
- u32 max_speed = 0;
u32 phy_id;
is_c45 = of_device_is_compatible(child,
@@ -103,17 +82,33 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
return 1;
}
- /* Set phydev->supported based on the "max-speed" property
- * if present */
- if (!of_property_read_u32(child, "max-speed", &max_speed))
- of_set_phy_supported(phy, max_speed);
-
dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
child->name, addr);
return 0;
}
+static int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
+{
+ u32 addr;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &addr);
+ if (ret < 0) {
+ dev_err(dev, "%s has invalid PHY address\n", np->full_name);
+ return ret;
+ }
+
+ /* A PHY must have a reg property in the range [0-31] */
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "%s PHY address %i is too large\n",
+ np->full_name, addr);
+ return -EINVAL;
+ }
+
+ return addr;
+}
+
/**
* of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -126,9 +121,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
struct device_node *child;
const __be32 *paddr;
- u32 addr;
bool scanphys = false;
- int rc, i, len;
+ int addr, rc, i;
/* Mask out all PHYs from auto probing. Instead the PHYs listed in
* the device tree are populated after the bus has been registered */
@@ -148,19 +142,9 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
/* Loop over the child nodes and register a phy_device for each one */
for_each_available_child_of_node(np, child) {
- /* A PHY must have a reg property in the range [0-31] */
- paddr = of_get_property(child, "reg", &len);
- if (!paddr || len < sizeof(*paddr)) {
+ addr = of_mdio_parse_addr(&mdio->dev, child);
+ if (addr < 0) {
scanphys = true;
- dev_err(&mdio->dev, "%s has invalid PHY address\n",
- child->full_name);
- continue;
- }
-
- addr = be32_to_cpup(paddr);
- if (addr >= PHY_MAX_ADDR) {
- dev_err(&mdio->dev, "%s PHY address %i is too large\n",
- child->full_name, addr);
continue;
}
@@ -175,7 +159,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
/* auto scan for PHYs with empty reg property */
for_each_available_child_of_node(np, child) {
/* Skip PHYs with reg property set */
- paddr = of_get_property(child, "reg", &len);
+ paddr = of_get_property(child, "reg", NULL);
if (paddr)
continue;
@@ -198,6 +182,40 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
}
EXPORT_SYMBOL(of_mdiobus_register);
+/**
+ * of_mdiobus_link_phydev - Find a device node for a phy
+ * @mdio: pointer to mii_bus structure
+ * @phydev: phydev for which the of_node pointer should be set
+ *
+ * Walk the list of subnodes of a mdio bus and look for a node that matches the
+ * phy's address with its 'reg' property. If found, set the of_node pointer for
+ * the phy. This allows auto-probed pyh devices to be supplied with information
+ * passed in via DT.
+ */
+void of_mdiobus_link_phydev(struct mii_bus *mdio,
+ struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *child;
+
+ if (dev->of_node || !mdio->dev.of_node)
+ return;
+
+ for_each_available_child_of_node(mdio->dev.of_node, child) {
+ int addr;
+
+ addr = of_mdio_parse_addr(&mdio->dev, child);
+ if (addr < 0)
+ continue;
+
+ if (addr == phydev->addr) {
+ dev->of_node = child;
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL(of_mdiobus_link_phydev);
+
/* Helper function for of_phy_find_device */
static int of_phy_match(struct device *dev, void *phy_np)
{
@@ -245,44 +263,6 @@ struct phy_device *of_phy_connect(struct net_device *dev,
EXPORT_SYMBOL(of_phy_connect);
/**
- * of_phy_connect_fixed_link - Parse fixed-link property and return a dummy phy
- * @dev: pointer to net_device claiming the phy
- * @hndlr: Link state callback for the network device
- * @iface: PHY data interface type
- *
- * This function is a temporary stop-gap and will be removed soon. It is
- * only to support the fs_enet, ucc_geth and gianfar Ethernet drivers. Do
- * not call this function from new drivers.
- */
-struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
- void (*hndlr)(struct net_device *),
- phy_interface_t iface)
-{
- struct device_node *net_np;
- char bus_id[MII_BUS_ID_SIZE + 3];
- struct phy_device *phy;
- const __be32 *phy_id;
- int sz;
-
- if (!dev->dev.parent)
- return NULL;
-
- net_np = dev->dev.parent->of_node;
- if (!net_np)
- return NULL;
-
- phy_id = of_get_property(net_np, "fixed-link", &sz);
- if (!phy_id || sz < sizeof(*phy_id))
- return NULL;
-
- sprintf(bus_id, PHY_ID_FMT, "fixed-0", be32_to_cpu(phy_id[0]));
-
- phy = phy_connect(dev, bus_id, hndlr, iface);
- return IS_ERR(phy) ? NULL : phy;
-}
-EXPORT_SYMBOL(of_phy_connect_fixed_link);
-
-/**
* of_phy_attach - Attach to a PHY without starting the state machine
* @dev: pointer to net_device claiming the phy
* @phy_np: Node pointer for the PHY
@@ -301,3 +281,71 @@ struct phy_device *of_phy_attach(struct net_device *dev,
return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy;
}
EXPORT_SYMBOL(of_phy_attach);
+
+#if defined(CONFIG_FIXED_PHY)
+/*
+ * of_phy_is_fixed_link() and of_phy_register_fixed_link() must
+ * support two DT bindings:
+ * - the old DT binding, where 'fixed-link' was a property with 5
+ * cells encoding various informations about the fixed PHY
+ * - the new DT binding, where 'fixed-link' is a sub-node of the
+ * Ethernet device.
+ */
+bool of_phy_is_fixed_link(struct device_node *np)
+{
+ struct device_node *dn;
+ int len;
+
+ /* New binding */
+ dn = of_get_child_by_name(np, "fixed-link");
+ if (dn) {
+ of_node_put(dn);
+ return true;
+ }
+
+ /* Old binding */
+ if (of_get_property(np, "fixed-link", &len) &&
+ len == (5 * sizeof(__be32)))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(of_phy_is_fixed_link);
+
+int of_phy_register_fixed_link(struct device_node *np)
+{
+ struct fixed_phy_status status = {};
+ struct device_node *fixed_link_node;
+ const __be32 *fixed_link_prop;
+ int len;
+
+ /* New binding */
+ fixed_link_node = of_get_child_by_name(np, "fixed-link");
+ if (fixed_link_node) {
+ status.link = 1;
+ status.duplex = of_property_read_bool(fixed_link_node,
+ "full-duplex");
+ if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
+ return -EINVAL;
+ status.pause = of_property_read_bool(fixed_link_node, "pause");
+ status.asym_pause = of_property_read_bool(fixed_link_node,
+ "asym-pause");
+ of_node_put(fixed_link_node);
+ return fixed_phy_register(PHY_POLL, &status, np);
+ }
+
+ /* Old binding */
+ fixed_link_prop = of_get_property(np, "fixed-link", &len);
+ if (fixed_link_prop && len == (5 * sizeof(__be32))) {
+ status.link = 1;
+ status.duplex = be32_to_cpu(fixed_link_prop[1]);
+ status.speed = be32_to_cpu(fixed_link_prop[2]);
+ status.pause = be32_to_cpu(fixed_link_prop[3]);
+ status.asym_pause = be32_to_cpu(fixed_link_prop[4]);
+ return fixed_phy_register(PHY_POLL, &status, np);
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(of_phy_register_fixed_link);
+#endif
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 6c48d73a7fd7..500436f9be7f 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -166,10 +166,6 @@ static void of_dma_configure(struct platform_device *pdev)
int ret;
struct device *dev = &pdev->dev;
-#if defined(CONFIG_MICROBLAZE)
- pdev->archdata.dma_mask = 0xffffffffUL;
-#endif
-
/*
* Set default dma-mask to 32 bit. Drivers are expected to setup
* the correct supported dma_mask.
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 8c148f39e8d7..d292d7cb3417 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -231,10 +231,7 @@ static int pci_vpd_pci22_wait(struct pci_dev *dev)
}
if (time_after(jiffies, timeout)) {
- dev_printk(KERN_DEBUG, &dev->dev,
- "vpd r/w failed. This is likely a firmware "
- "bug on this device. Contact the card "
- "vendor for a firmware update.");
+ dev_printk(KERN_DEBUG, &dev->dev, "vpd r/w failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
return -ETIMEDOUT;
}
if (fatal_signal_pending(current))
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 447d393725e1..73aef51a28f0 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -226,6 +226,7 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
type_mask, alignf, alignf_data,
&pci_32_bit);
}
+EXPORT_SYMBOL(pci_bus_alloc_resource);
void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
@@ -253,6 +254,7 @@ void pci_bus_add_device(struct pci_dev *dev)
dev->is_added = 1;
}
+EXPORT_SYMBOL_GPL(pci_bus_add_device);
/**
* pci_bus_add_devices - start driver for PCI devices
@@ -279,6 +281,7 @@ void pci_bus_add_devices(const struct pci_bus *bus)
pci_bus_add_devices(child);
}
}
+EXPORT_SYMBOL(pci_bus_add_devices);
/** pci_walk_bus - walk devices on/under bus, calling callback.
* @top bus whose devices should be walked
@@ -344,6 +347,3 @@ void pci_bus_put(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_put);
-EXPORT_SYMBOL(pci_bus_alloc_resource);
-EXPORT_SYMBOL_GPL(pci_bus_add_device);
-EXPORT_SYMBOL(pci_bus_add_devices);
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 1632661c5b7f..c5d0ca384502 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -545,7 +545,6 @@ static int __init add_pcie_port(struct pcie_port *pp,
pp->root_bus_nr = -1;
pp->ops = &exynos_pcie_host_ops;
- spin_lock_init(&pp->conf_lock);
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index a5645ae4aef0..a568efaa331c 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -507,7 +507,6 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
pp->root_bus_nr = -1;
pp->ops = &imx6_pcie_host_ops;
- spin_lock_init(&pp->conf_lock);
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index e384e2534594..ce23e0f076b6 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -113,7 +113,6 @@ struct mvebu_pcie {
struct mvebu_pcie_port {
char *name;
void __iomem *base;
- spinlock_t conf_lock;
u32 port;
u32 lane;
int devfn;
@@ -329,9 +328,11 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
sz, remap);
if (ret) {
+ phys_addr_t end = base + sz - 1;
+
dev_err(&port->pcie->pdev->dev,
- "Could not create MBus window at 0x%x, size 0x%x: %d\n",
- base, sz, ret);
+ "Could not create MBus window at [mem %pa-%pa]: %d\n",
+ &base, &end, ret);
mvebu_pcie_del_windows(port, base - size_mapped,
size_mapped);
return;
@@ -613,9 +614,9 @@ static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
return sys->private_data;
}
-static struct mvebu_pcie_port *
-mvebu_pcie_find_port(struct mvebu_pcie *pcie, struct pci_bus *bus,
- int devfn)
+static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
+ struct pci_bus *bus,
+ int devfn)
{
int i;
@@ -638,7 +639,6 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
{
struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
struct mvebu_pcie_port *port;
- unsigned long flags;
int ret;
port = mvebu_pcie_find_port(pcie, bus, devfn);
@@ -664,10 +664,8 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
/* Access the real PCIe interface */
- spin_lock_irqsave(&port->conf_lock, flags);
ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
where, size, val);
- spin_unlock_irqrestore(&port->conf_lock, flags);
return ret;
}
@@ -678,7 +676,6 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
{
struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
struct mvebu_pcie_port *port;
- unsigned long flags;
int ret;
port = mvebu_pcie_find_port(pcie, bus, devfn);
@@ -710,10 +707,8 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
}
/* Access the real PCIe interface */
- spin_lock_irqsave(&port->conf_lock, flags);
ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
where, size, val);
- spin_unlock_irqrestore(&port->conf_lock, flags);
return ret;
}
@@ -786,10 +781,10 @@ static void mvebu_pcie_add_bus(struct pci_bus *bus)
}
static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
- const struct resource *res,
- resource_size_t start,
- resource_size_t size,
- resource_size_t align)
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align)
{
if (dev->bus->number != 0)
return start;
@@ -839,7 +834,8 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
* found, maps it.
*/
static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
- struct device_node *np, struct mvebu_pcie_port *port)
+ struct device_node *np,
+ struct mvebu_pcie_port *port)
{
struct resource regs;
int ret = 0;
@@ -1060,7 +1056,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
mvebu_pcie_set_local_dev_nr(port, 1);
port->dn = child;
- spin_lock_init(&port->conf_lock);
mvebu_sw_pci_bridge_init(port);
i++;
}
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index e3bf9e6d5d9a..1eaf4df3618a 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -643,7 +643,6 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
- unsigned long flags;
int ret;
if (!pp) {
@@ -656,13 +655,11 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
return PCIBIOS_DEVICE_NOT_FOUND;
}
- spin_lock_irqsave(&pp->conf_lock, flags);
if (bus->number != pp->root_bus_nr)
ret = dw_pcie_rd_other_conf(pp, bus, devfn,
where, size, val);
else
ret = dw_pcie_rd_own_conf(pp, where, size, val);
- spin_unlock_irqrestore(&pp->conf_lock, flags);
return ret;
}
@@ -671,7 +668,6 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
- unsigned long flags;
int ret;
if (!pp) {
@@ -682,13 +678,11 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
- spin_lock_irqsave(&pp->conf_lock, flags);
if (bus->number != pp->root_bus_nr)
ret = dw_pcie_wr_other_conf(pp, bus, devfn,
where, size, val);
else
ret = dw_pcie_wr_own_conf(pp, where, size, val);
- spin_unlock_irqrestore(&pp->conf_lock, flags);
return ret;
}
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index a169d22d517e..77f592faa7bf 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -41,7 +41,6 @@ struct pcie_port {
void __iomem *va_cfg1_base;
u64 io_base;
u64 mem_base;
- spinlock_t conf_lock;
struct resource cfg;
struct resource io;
struct resource mem;
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 8e06124aa80f..f7d3de32c9a0 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -277,9 +277,8 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
else if (size == 2)
*val = (*val >> (8 * (where & 2))) & 0xffff;
- dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x "
- "where=0x%04x size=%d val=0x%08lx\n", bus->number,
- devfn, where, size, (unsigned long)*val);
+ dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
+ bus->number, devfn, where, size, (unsigned long)*val);
return ret;
}
@@ -302,9 +301,8 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
- dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x "
- "where=0x%04x size=%d val=0x%08lx\n", bus->number,
- devfn, where, size, (unsigned long)val);
+ dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
+ bus->number, devfn, where, size, (unsigned long)val);
if (size == 1) {
shift = 8 * (where & 3);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 2b859249303b..b0e61bf261a7 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -142,6 +142,16 @@ static inline acpi_handle func_to_handle(struct acpiphp_func *func)
return func_to_acpi_device(func)->handle;
}
+struct acpiphp_root_context {
+ struct acpi_hotplug_context hp;
+ struct acpiphp_bridge *root_bridge;
+};
+
+static inline struct acpiphp_root_context *to_acpiphp_root_context(struct acpi_hotplug_context *hp)
+{
+ return container_of(hp, struct acpiphp_root_context, hp);
+}
+
/*
* struct acpiphp_attention_info - device specific attention registration
*
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 728c31f4c2c5..e291efcd02a2 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -63,10 +63,6 @@ MODULE_LICENSE("GPL");
MODULE_PARM_DESC(disable, "disable acpiphp driver");
module_param_named(disable, acpiphp_disabled, bool, 0444);
-/* export the attention callback registration methods */
-EXPORT_SYMBOL_GPL(acpiphp_register_attention);
-EXPORT_SYMBOL_GPL(acpiphp_unregister_attention);
-
static int enable_slot (struct hotplug_slot *slot);
static int disable_slot (struct hotplug_slot *slot);
static int set_attention_status (struct hotplug_slot *slot, u8 value);
@@ -104,6 +100,7 @@ int acpiphp_register_attention(struct acpiphp_attention_info *info)
}
return retval;
}
+EXPORT_SYMBOL_GPL(acpiphp_register_attention);
/**
@@ -124,6 +121,7 @@ int acpiphp_unregister_attention(struct acpiphp_attention_info *info)
}
return retval;
}
+EXPORT_SYMBOL_GPL(acpiphp_unregister_attention);
/**
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 75e178330215..602d153c7055 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -351,11 +351,9 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
slot->slot = NULL;
bridge->nr_slots--;
if (retval == -EBUSY)
- pr_warn("Slot %llu already registered by another "
- "hotplug driver\n", sun);
+ pr_warn("Slot %llu already registered by another hotplug driver\n", sun);
else
- pr_warn("acpiphp_register_hotplug_slot failed "
- "(err code = 0x%x)\n", retval);
+ pr_warn("acpiphp_register_hotplug_slot failed (err code = 0x%x)\n", retval);
}
/* Even if the slot registration fails, we can still use it. */
}
@@ -373,17 +371,13 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
{
- struct acpiphp_context *context;
struct acpiphp_bridge *bridge = NULL;
acpi_lock_hp_context();
- context = acpiphp_get_context(adev);
- if (context) {
- bridge = context->bridge;
+ if (adev->hp) {
+ bridge = to_acpiphp_root_context(adev->hp)->root_bridge;
if (bridge)
get_bridge(bridge);
-
- acpiphp_put_context(context);
}
acpi_unlock_hp_context();
return bridge;
@@ -881,7 +875,17 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
*/
get_device(&bus->dev);
- if (!pci_is_root_bus(bridge->pci_bus)) {
+ acpi_lock_hp_context();
+ if (pci_is_root_bus(bridge->pci_bus)) {
+ struct acpiphp_root_context *root_context;
+
+ root_context = kzalloc(sizeof(*root_context), GFP_KERNEL);
+ if (!root_context)
+ goto err;
+
+ root_context->root_bridge = bridge;
+ acpi_set_hp_context(adev, &root_context->hp, NULL, NULL, NULL);
+ } else {
struct acpiphp_context *context;
/*
@@ -890,21 +894,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
* parent is going to be handled by pciehp, in which case this
* bridge is not interesting to us either.
*/
- acpi_lock_hp_context();
context = acpiphp_get_context(adev);
- if (!context) {
- acpi_unlock_hp_context();
- put_device(&bus->dev);
- pci_dev_put(bridge->pci_dev);
- kfree(bridge);
- return;
- }
+ if (!context)
+ goto err;
+
bridge->context = context;
context->bridge = bridge;
/* Get a reference to the parent bridge. */
get_bridge(context->func.parent);
- acpi_unlock_hp_context();
}
+ acpi_unlock_hp_context();
/* Must be added to the list prior to calling acpiphp_add_context(). */
mutex_lock(&bridge_mutex);
@@ -919,6 +918,30 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
cleanup_bridge(bridge);
put_bridge(bridge);
}
+ return;
+
+ err:
+ acpi_unlock_hp_context();
+ put_device(&bus->dev);
+ pci_dev_put(bridge->pci_dev);
+ kfree(bridge);
+}
+
+void acpiphp_drop_bridge(struct acpiphp_bridge *bridge)
+{
+ if (pci_is_root_bus(bridge->pci_bus)) {
+ struct acpiphp_root_context *root_context;
+ struct acpi_device *adev;
+
+ acpi_lock_hp_context();
+ adev = ACPI_COMPANION(bridge->pci_bus->bridge);
+ root_context = to_acpiphp_root_context(adev->hp);
+ adev->hp = NULL;
+ acpi_unlock_hp_context();
+ kfree(root_context);
+ }
+ cleanup_bridge(bridge);
+ put_bridge(bridge);
}
/**
@@ -936,8 +959,7 @@ void acpiphp_remove_slots(struct pci_bus *bus)
list_for_each_entry(bridge, &bridge_list, list)
if (bridge->pci_bus == bus) {
mutex_unlock(&bridge_mutex);
- cleanup_bridge(bridge);
- put_bridge(bridge);
+ acpiphp_drop_bridge(bridge);
return;
}
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 1356211431d0..6a0ddf757349 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -56,9 +56,9 @@ struct cpci_hp_controller_ops {
int (*enable_irq) (void);
int (*disable_irq) (void);
int (*check_irq) (void *dev_id);
- int (*hardware_test) (struct slot* slot, u32 value);
- u8 (*get_power) (struct slot* slot);
- int (*set_power) (struct slot* slot, int value);
+ int (*hardware_test) (struct slot *slot, u32 value);
+ u8 (*get_power) (struct slot *slot);
+ int (*set_power) (struct slot *slot, int value);
};
struct cpci_hp_controller {
@@ -89,13 +89,13 @@ int cpci_hp_stop(void);
u8 cpci_get_attention_status(struct slot *slot);
u8 cpci_get_latch_status(struct slot *slot);
u8 cpci_get_adapter_status(struct slot *slot);
-u16 cpci_get_hs_csr(struct slot * slot);
+u16 cpci_get_hs_csr(struct slot *slot);
int cpci_set_attention_status(struct slot *slot, int status);
-int cpci_check_and_clear_ins(struct slot * slot);
-int cpci_check_ext(struct slot * slot);
-int cpci_clear_ext(struct slot * slot);
-int cpci_led_on(struct slot * slot);
-int cpci_led_off(struct slot * slot);
+int cpci_check_and_clear_ins(struct slot *slot);
+int cpci_check_ext(struct slot *slot);
+int cpci_clear_ext(struct slot *slot);
+int cpci_led_on(struct slot *slot);
+int cpci_led_off(struct slot *slot);
int cpci_configure_slot(struct slot *slot);
int cpci_unconfigure_slot(struct slot *slot);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 00c81a3cefc9..e09cf7827d68 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -65,10 +65,10 @@ static int thread_finished;
static int enable_slot(struct hotplug_slot *slot);
static int disable_slot(struct hotplug_slot *slot);
static int set_attention_status(struct hotplug_slot *slot, u8 value);
-static int get_power_status(struct hotplug_slot *slot, u8 * value);
-static int get_attention_status(struct hotplug_slot *slot, u8 * value);
-static int get_adapter_status(struct hotplug_slot *slot, u8 * value);
-static int get_latch_status(struct hotplug_slot *slot, u8 * value);
+static int get_power_status(struct hotplug_slot *slot, u8 *value);
+static int get_attention_status(struct hotplug_slot *slot, u8 *value);
+static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
+static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
.enable_slot = enable_slot,
@@ -168,7 +168,7 @@ cpci_get_power_status(struct slot *slot)
}
static int
-get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
+get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
@@ -177,7 +177,7 @@ get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
}
static int
-get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
+get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
@@ -192,14 +192,14 @@ set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
}
static int
-get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value)
+get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
*value = hotplug_slot->info->adapter_status;
return 0;
}
static int
-get_latch_status(struct hotplug_slot *hotplug_slot, u8 * value)
+get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
*value = hotplug_slot->info->latch_status;
return 0;
@@ -299,6 +299,7 @@ error_slot:
error:
return status;
}
+EXPORT_SYMBOL_GPL(cpci_hp_register_bus);
int
cpci_hp_unregister_bus(struct pci_bus *bus)
@@ -329,6 +330,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
up_write(&list_rwsem);
return status;
}
+EXPORT_SYMBOL_GPL(cpci_hp_unregister_bus);
/* This is the interrupt mode interrupt handler */
static irqreturn_t
@@ -360,7 +362,7 @@ static int
init_slots(int clear_ins)
{
struct slot *slot;
- struct pci_dev* dev;
+ struct pci_dev *dev;
dbg("%s - enter", __func__);
down_read(&list_rwsem);
@@ -614,6 +616,7 @@ cpci_hp_register_controller(struct cpci_hp_controller *new_controller)
controller = new_controller;
return status;
}
+EXPORT_SYMBOL_GPL(cpci_hp_register_controller);
static void
cleanup_slots(void)
@@ -653,6 +656,7 @@ cpci_hp_unregister_controller(struct cpci_hp_controller *old_controller)
status = -ENODEV;
return status;
}
+EXPORT_SYMBOL_GPL(cpci_hp_unregister_controller);
int
cpci_hp_start(void)
@@ -690,6 +694,7 @@ cpci_hp_start(void)
dbg("%s - exit", __func__);
return 0;
}
+EXPORT_SYMBOL_GPL(cpci_hp_start);
int
cpci_hp_stop(void)
@@ -704,6 +709,7 @@ cpci_hp_stop(void)
cpci_stop_thread();
return 0;
}
+EXPORT_SYMBOL_GPL(cpci_hp_stop);
int __init
cpci_hotplug_init(int debug)
@@ -721,10 +727,3 @@ cpci_hotplug_exit(void)
cpci_hp_stop();
cpci_hp_unregister_controller(controller);
}
-
-EXPORT_SYMBOL_GPL(cpci_hp_register_controller);
-EXPORT_SYMBOL_GPL(cpci_hp_unregister_controller);
-EXPORT_SYMBOL_GPL(cpci_hp_register_bus);
-EXPORT_SYMBOL_GPL(cpci_hp_unregister_bus);
-EXPORT_SYMBOL_GPL(cpci_hp_start);
-EXPORT_SYMBOL_GPL(cpci_hp_stop);
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index f6ef64c2ccb5..7d48ecae6695 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -46,7 +46,7 @@ extern int cpci_debug;
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
-u8 cpci_get_attention_status(struct slot* slot)
+u8 cpci_get_attention_status(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -66,7 +66,7 @@ u8 cpci_get_attention_status(struct slot* slot)
return hs_csr & 0x0008 ? 1 : 0;
}
-int cpci_set_attention_status(struct slot* slot, int status)
+int cpci_set_attention_status(struct slot *slot, int status)
{
int hs_cap;
u16 hs_csr;
@@ -93,7 +93,7 @@ int cpci_set_attention_status(struct slot* slot, int status)
return 1;
}
-u16 cpci_get_hs_csr(struct slot* slot)
+u16 cpci_get_hs_csr(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -111,7 +111,7 @@ u16 cpci_get_hs_csr(struct slot* slot)
return hs_csr;
}
-int cpci_check_and_clear_ins(struct slot* slot)
+int cpci_check_and_clear_ins(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -140,7 +140,7 @@ int cpci_check_and_clear_ins(struct slot* slot)
return ins;
}
-int cpci_check_ext(struct slot* slot)
+int cpci_check_ext(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -161,7 +161,7 @@ int cpci_check_ext(struct slot* slot)
return ext;
}
-int cpci_clear_ext(struct slot* slot)
+int cpci_clear_ext(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -187,7 +187,7 @@ int cpci_clear_ext(struct slot* slot)
return 0;
}
-int cpci_led_on(struct slot* slot)
+int cpci_led_on(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -216,7 +216,7 @@ int cpci_led_on(struct slot* slot)
return 0;
}
-int cpci_led_off(struct slot* slot)
+int cpci_led_off(struct slot *slot)
{
int hs_cap;
u16 hs_csr;
@@ -303,7 +303,7 @@ int cpci_configure_slot(struct slot *slot)
return ret;
}
-int cpci_unconfigure_slot(struct slot* slot)
+int cpci_unconfigure_slot(struct slot *slot)
{
struct pci_dev *dev, *temp;
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index 7536eef620b0..04fcd7811400 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -78,8 +78,8 @@ static struct cpci_hp_controller generic_hpc;
static int __init validate_parameters(void)
{
- char* str;
- char* p;
+ char *str;
+ char *p;
unsigned long tmp;
if(!bridge) {
@@ -142,8 +142,8 @@ static int query_enum(void)
static int __init cpcihp_generic_init(void)
{
int status;
- struct resource* r;
- struct pci_dev* dev;
+ struct resource *r;
+ struct pci_dev *dev;
info(DRIVER_DESC " version: " DRIVER_VERSION);
status = validate_parameters();
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index e8c4a7ccf578..6757b3ef7e10 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -295,7 +295,7 @@ static struct pci_driver zt5550_hc_driver = {
static int __init zt5550_init(void)
{
- struct resource* r;
+ struct resource *r;
int rc;
info(DRIVER_DESC " version: " DRIVER_VERSION);
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 516b87738b6e..0450f405807d 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -255,7 +255,7 @@ struct pci_func {
struct pci_resource *io_head;
struct pci_resource *bus_head;
struct timer_list *p_task_event;
- struct pci_dev* pci_dev;
+ struct pci_dev *pci_dev;
};
struct slot {
@@ -278,7 +278,7 @@ struct slot {
};
struct pci_resource {
- struct pci_resource * next;
+ struct pci_resource *next;
u32 base;
u32 length;
};
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 037e2612c5bd..4aaee746df88 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -94,7 +94,7 @@ static inline int is_slot66mhz(struct slot *slot)
*
* Returns pointer to the head of the SMBIOS tables (or %NULL).
*/
-static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *end)
+static void __iomem *detect_SMBIOS_pointer(void __iomem *begin, void __iomem *end)
{
void __iomem *fp;
void __iomem *endp;
@@ -131,7 +131,7 @@ static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *e
*
* For unexpected switch opens
*/
-static int init_SERR(struct controller * ctrl)
+static int init_SERR(struct controller *ctrl)
{
u32 tempdword;
u32 number_of_slots;
@@ -291,7 +291,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(slot);
}
-static int ctrl_slot_cleanup (struct controller * ctrl)
+static int ctrl_slot_cleanup (struct controller *ctrl)
{
struct slot *old_slot, *next_slot;
@@ -706,8 +706,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
hotplug_slot_info->adapter_status =
get_presence_status(ctrl, slot);
- dbg("registering bus %d, dev %d, number %d, "
- "ctrl->slot_device_offset %d, slot %d\n",
+ dbg("registering bus %d, dev %d, number %d, ctrl->slot_device_offset %d, slot %d\n",
slot->bus, slot->device,
slot->number, ctrl->slot_device_offset,
slot_number);
@@ -837,8 +836,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bus = pdev->subordinate;
if (!bus) {
- dev_notice(&pdev->dev, "the device is not a bridge, "
- "skipping\n");
+ dev_notice(&pdev->dev, "the device is not a bridge, skipping\n");
rc = -ENODEV;
goto err_disable_device;
}
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index f593585f2784..bde47fce3248 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -39,9 +39,9 @@
#include <linux/kthread.h>
#include "cpqphp.h"
-static u32 configure_new_device(struct controller* ctrl, struct pci_func *func,
+static u32 configure_new_device(struct controller *ctrl, struct pci_func *func,
u8 behind_bridge, struct resource_lists *resources);
-static int configure_new_function(struct controller* ctrl, struct pci_func *func,
+static int configure_new_function(struct controller *ctrl, struct pci_func *func,
u8 behind_bridge, struct resource_lists *resources);
static void interrupt_event_handler(struct controller *ctrl);
@@ -64,7 +64,7 @@ static void long_delay(int delay)
/* FIXME: The following line needs to be somewhere else... */
#define WRONG_BUS_FREQUENCY 0x07
-static u8 handle_switch_change(u8 change, struct controller * ctrl)
+static u8 handle_switch_change(u8 change, struct controller *ctrl)
{
int hp_slot;
u8 rc = 0;
@@ -138,7 +138,7 @@ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device)
}
-static u8 handle_presence_change(u16 change, struct controller * ctrl)
+static u8 handle_presence_change(u16 change, struct controller *ctrl)
{
int hp_slot;
u8 rc = 0;
@@ -232,7 +232,7 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
}
-static u8 handle_power_fault(u8 change, struct controller * ctrl)
+static u8 handle_power_fault(u8 change, struct controller *ctrl)
{
int hp_slot;
u8 rc = 0;
@@ -997,7 +997,7 @@ struct pci_func *cpqhp_slot_create(u8 busnumber)
*
* Returns %0 if successful, !0 otherwise.
*/
-static int slot_remove(struct pci_func * old_slot)
+static int slot_remove(struct pci_func *old_slot)
{
struct pci_func *next;
@@ -1109,7 +1109,7 @@ struct pci_func *cpqhp_slot_find(u8 bus, u8 device, u8 index)
/* DJZ: I don't think is_bridge will work as is.
* FIXME */
-static int is_bridge(struct pci_func * func)
+static int is_bridge(struct pci_func *func)
{
/* Check the header type */
if (((func->config_space[0x03] >> 16) & 0xFF) == 0x01)
@@ -1625,7 +1625,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
* @replace_flag: whether replacing or adding a new device
* @ctrl: target controller
*/
-static u32 remove_board(struct pci_func * func, u32 replace_flag, struct controller * ctrl)
+static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controller *ctrl)
{
int index;
u8 skip = 0;
@@ -1742,7 +1742,7 @@ static void pushbutton_helper_thread(unsigned long data)
/* this is the main worker thread */
-static int event_thread(void* data)
+static int event_thread(void *data)
{
struct controller *ctrl;
@@ -1992,7 +1992,7 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
u16 temp_word;
u32 tempdword;
int rc;
- struct slot* p_slot;
+ struct slot *p_slot;
int physical_slot = 0;
tempdword = 0;
@@ -2088,7 +2088,7 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
u8 replace_flag;
u32 rc = 0;
unsigned int devfn;
- struct slot* p_slot;
+ struct slot *p_slot;
struct pci_bus *pci_bus = ctrl->pci_bus;
int physical_slot=0;
@@ -2270,8 +2270,8 @@ int cpqhp_hardware_test(struct controller *ctrl, int test_num)
*
* Returns 0 if success.
*/
-static u32 configure_new_device(struct controller * ctrl, struct pci_func * func,
- u8 behind_bridge, struct resource_lists * resources)
+static u32 configure_new_device(struct controller *ctrl, struct pci_func *func,
+ u8 behind_bridge, struct resource_lists *resources)
{
u8 temp_byte, function, max_functions, stop_it;
int rc;
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 9600a392eaae..0968a9bcb345 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -107,7 +107,7 @@ static spinlock_t int15_lock;
*/
-static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
+static u32 add_byte(u32 **p_buffer, u8 value, u32 *used, u32 *avail)
{
u8 **tByte;
@@ -122,7 +122,7 @@ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
}
-static u32 add_dword( u32 **p_buffer, u32 value, u32 *used, u32 *avail)
+static u32 add_dword(u32 **p_buffer, u32 value, u32 *used, u32 *avail)
{
if ((*used + 4) > *avail)
return(1);
@@ -267,12 +267,12 @@ static u32 store_HRT (void __iomem *rom_start)
ctrl = cpqhp_ctrl_list;
/* The revision of this structure */
- rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
+ rc = add_byte(&pFill, 1 + ctrl->push_flag, &usedbytes, &available);
if (rc)
return(rc);
/* The number of controllers */
- rc = add_byte( &pFill, 1, &usedbytes, &available);
+ rc = add_byte(&pFill, 1, &usedbytes, &available);
if (rc)
return(rc);
@@ -282,22 +282,22 @@ static u32 store_HRT (void __iomem *rom_start)
numCtrl++;
/* The bus number */
- rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
+ rc = add_byte(&pFill, ctrl->bus, &usedbytes, &available);
if (rc)
return(rc);
/* The device Number */
- rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
+ rc = add_byte(&pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
/* The function Number */
- rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
+ rc = add_byte(&pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
/* Skip the number of available entries */
- rc = add_dword( &pFill, 0, &usedbytes, &available);
+ rc = add_dword(&pFill, 0, &usedbytes, &available);
if (rc)
return(rc);
@@ -311,12 +311,12 @@ static u32 store_HRT (void __iomem *rom_start)
loop ++;
/* base */
- rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
/* length */
- rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -336,12 +336,12 @@ static u32 store_HRT (void __iomem *rom_start)
loop ++;
/* base */
- rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
/* length */
- rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -361,12 +361,12 @@ static u32 store_HRT (void __iomem *rom_start)
loop ++;
/* base */
- rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
/* length */
- rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -386,12 +386,12 @@ static u32 store_HRT (void __iomem *rom_start)
loop ++;
/* base */
- rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
/* length */
- rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index a3e3c2002b58..1c8c2f130d31 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -81,7 +81,7 @@ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iom
}
-int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
+int cpqhp_configure_device (struct controller *ctrl, struct pci_func *func)
{
struct pci_bus *child;
int num;
@@ -121,7 +121,7 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
}
-int cpqhp_unconfigure_device(struct pci_func* func)
+int cpqhp_unconfigure_device(struct pci_func *func)
{
int j;
@@ -129,7 +129,7 @@ int cpqhp_unconfigure_device(struct pci_func* func)
pci_lock_rescan_remove();
for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
+ struct pci_dev *temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
if (temp) {
pci_dev_put(temp);
pci_stop_and_remove_bus_device(temp);
@@ -203,7 +203,7 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
}
-static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
+static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_num)
{
u16 tdevice;
u32 work;
@@ -280,7 +280,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
}
-int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
+int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot)
{
/* plain (bridges allowed) */
return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
@@ -465,7 +465,7 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
*
* returns 0 if success
*/
-int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
+int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func *new_slot)
{
long rc;
u8 class_code;
@@ -549,7 +549,7 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
*
* returns 0 if success
*/
-int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
+int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func)
{
u8 cloop;
u8 header_type;
@@ -686,7 +686,7 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
*
* returns 0 if success
*/
-int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
+int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func *func)
{
u8 cloop;
u8 header_type;
@@ -949,7 +949,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
*
* returns 0 if success
*/
-int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
+int cpqhp_configure_board(struct controller *ctrl, struct pci_func *func)
{
int cloop;
u8 header_type;
@@ -1027,7 +1027,7 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
*
* returns 0 if the board is the same nonzero otherwise
*/
-int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
+int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func)
{
u8 cloop;
u8 header_type;
@@ -1419,7 +1419,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
*
* returns 0 if success
*/
-int cpqhp_return_board_resources(struct pci_func * func, struct resource_lists * resources)
+int cpqhp_return_board_resources(struct pci_func *func, struct resource_lists *resources)
{
int rc = 0;
struct pci_resource *node;
@@ -1475,7 +1475,7 @@ int cpqhp_return_board_resources(struct pci_func * func, struct resource_lists *
*
* Puts node back in the resource list pointed to by head
*/
-void cpqhp_destroy_resource_list (struct resource_lists * resources)
+void cpqhp_destroy_resource_list (struct resource_lists *resources)
{
struct pci_resource *res, *tres;
@@ -1522,7 +1522,7 @@ void cpqhp_destroy_resource_list (struct resource_lists * resources)
*
* Puts node back in the resource list pointed to by head
*/
-void cpqhp_destroy_board_resources (struct pci_func * func)
+void cpqhp_destroy_board_resources (struct pci_func *func)
{
struct pci_resource *res, *tres;
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index 17c1f36315d1..4a392c44e3d3 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -79,7 +79,7 @@ static int show_ctrl (struct controller *ctrl, char *buf)
static int show_dev (struct controller *ctrl, char *buf)
{
- char * out = buf;
+ char *out = buf;
int index;
struct pci_resource *res;
struct pci_func *new_slot;
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index cf3ac1e4b099..f7b8684a7739 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -74,7 +74,7 @@ static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value)
static inline int get_cur_bus_info(struct slot **sl)
{
int rc = 1;
- struct slot * slot_cur = *sl;
+ struct slot *slot_cur = *sl;
debug("options = %x\n", slot_cur->ctrl->options);
debug("revision = %x\n", slot_cur->ctrl->revision);
@@ -114,8 +114,8 @@ static inline int slot_update(struct slot **sl)
static int __init get_max_slots (void)
{
- struct slot * slot_cur;
- struct list_head * tmp;
+ struct slot *slot_cur;
+ struct list_head *tmp;
u8 slot_count = 0;
list_for_each(tmp, &ibmphp_slot_head) {
@@ -280,7 +280,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
return rc;
}
-static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -311,7 +311,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
return rc;
}
-static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -338,7 +338,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 * value)
}
-static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -364,7 +364,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
return rc;
}
-static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -433,7 +433,7 @@ static int get_max_bus_speed(struct slot *slot)
}
/*
-static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 * value, u8 flag)
+static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 *value, u8 flag)
{
int rc = -ENODEV;
struct slot *pslot;
@@ -471,7 +471,7 @@ static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 * value
return rc;
}
-static int get_bus_name(struct hotplug_slot *hotplug_slot, char * value)
+static int get_bus_name(struct hotplug_slot *hotplug_slot, char *value)
{
int rc = -ENODEV;
struct slot *pslot = NULL;
@@ -671,7 +671,7 @@ static struct pci_func *ibm_slot_find(u8 busno, u8 device, u8 function)
{
struct pci_func *func_cur;
struct slot *slot_cur;
- struct list_head * tmp;
+ struct list_head *tmp;
list_for_each(tmp, &ibmphp_slot_head) {
slot_cur = list_entry(tmp, struct slot, ibm_slot_list);
if (slot_cur->func) {
@@ -696,8 +696,8 @@ static struct pci_func *ibm_slot_find(u8 busno, u8 device, u8 function)
static void free_slots(void)
{
struct slot *slot_cur;
- struct list_head * tmp;
- struct list_head * next;
+ struct list_head *tmp;
+ struct list_head *next;
debug("%s -- enter\n", __func__);
@@ -825,10 +825,10 @@ static int ibm_configure_device(struct pci_func *func)
/*******************************************************
* Returns whether the bus is empty or not
*******************************************************/
-static int is_bus_empty(struct slot * slot_cur)
+static int is_bus_empty(struct slot *slot_cur)
{
int rc;
- struct slot * tmp_slot;
+ struct slot *tmp_slot;
u8 i = slot_cur->bus_on->slot_min;
while (i <= slot_cur->bus_on->slot_max) {
@@ -856,7 +856,7 @@ static int is_bus_empty(struct slot * slot_cur)
* Parameters: slot
* Returns: bus is set (0) or error code
***********************************************************/
-static int set_bus(struct slot * slot_cur)
+static int set_bus(struct slot *slot_cur)
{
int rc;
u8 speed;
@@ -956,7 +956,7 @@ static int set_bus(struct slot * slot_cur)
static int check_limitations(struct slot *slot_cur)
{
u8 i;
- struct slot * tmp_slot;
+ struct slot *tmp_slot;
u8 count = 0;
u8 limitation = 0;
@@ -1045,8 +1045,7 @@ static int enable_slot(struct hotplug_slot *hs)
rc = check_limitations(slot_cur);
if (rc) {
err("Adding this card exceeds the limitations of this bus.\n");
- err("(i.e., >1 133MHz cards running on same bus, or "
- ">2 66 PCI cards running on same bus.\n");
+ err("(i.e., >1 133MHz cards running on same bus, or >2 66 PCI cards running on same bus.\n");
err("Try hot-adding into another bus\n");
rc = -EINVAL;
goto error_nopower;
@@ -1070,12 +1069,10 @@ static int enable_slot(struct hotplug_slot *hs)
!(SLOT_PWRGD(slot_cur->status)))
err("power fault occurred trying to power up\n");
else if (SLOT_BUS_SPEED(slot_cur->status)) {
- err("bus speed mismatch occurred. please check "
- "current bus speed and card capability\n");
+ err("bus speed mismatch occurred. please check current bus speed and card capability\n");
print_card_capability(slot_cur);
} else if (SLOT_BUS_MODE(slot_cur->ext_status)) {
- err("bus mode mismatch occurred. please check "
- "current bus mode and card capability\n");
+ err("bus mode mismatch occurred. please check current bus mode and card capability\n");
print_card_capability(slot_cur);
}
ibmphp_update_slot_info(slot_cur);
@@ -1098,8 +1095,7 @@ static int enable_slot(struct hotplug_slot *hs)
goto error_power;
}
if (SLOT_POWER(slot_cur->status) && (SLOT_BUS_SPEED(slot_cur->status))) {
- err("bus speed mismatch occurred. please check current bus "
- "speed and card capability\n");
+ err("bus speed mismatch occurred. please check current bus speed and card capability\n");
print_card_capability(slot_cur);
goto error_power;
}
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index bd044158b36c..0f65ac555434 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -563,7 +563,7 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
return rc;
}
-static struct opt_rio_lo * find_rxe_num (u8 slot_num)
+static struct opt_rio_lo *find_rxe_num (u8 slot_num)
{
struct opt_rio_lo *opt_lo_ptr;
@@ -575,7 +575,7 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num)
return NULL;
}
-static struct opt_rio * find_chassis_num (u8 slot_num)
+static struct opt_rio *find_chassis_num (u8 slot_num)
{
struct opt_rio *opt_vg_ptr;
@@ -593,7 +593,7 @@ static struct opt_rio * find_chassis_num (u8 slot_num)
static u8 calculate_first_slot (u8 slot_num)
{
u8 first_slot = 1;
- struct slot * slot_cur;
+ struct slot *slot_cur;
list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
if (slot_cur->ctrl) {
@@ -607,7 +607,7 @@ static u8 calculate_first_slot (u8 slot_num)
#define SLOT_NAME_SIZE 30
-static char *create_file_name (struct slot * slot_cur)
+static char *create_file_name (struct slot *slot_cur)
{
struct opt_rio *opt_vg_ptr = NULL;
struct opt_rio_lo *opt_lo_ptr = NULL;
@@ -1192,7 +1192,7 @@ int ibmphp_register_pci (void)
}
return rc;
}
-static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids)
+static int ibmphp_probe (struct pci_dev *dev, const struct pci_device_id *ids)
{
struct controller *ctrl;
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index 5fc7a089f532..a936022956e6 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -533,7 +533,7 @@ static u8 hpc_readcmdtoindex (u8 cmd, u8 index)
*
* Return 0 or error codes
*---------------------------------------------------------------------*/
-int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus)
+int ibmphp_hpc_readslot (struct slot *pslot, u8 cmd, u8 *pstatus)
{
void __iomem *wpg_bbar = NULL;
struct controller *ctlr_ptr;
@@ -672,7 +672,7 @@ int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus)
*
* Action: issue a WRITE command to HPC
*---------------------------------------------------------------------*/
-int ibmphp_hpc_writeslot (struct slot * pslot, u8 cmd)
+int ibmphp_hpc_writeslot (struct slot *pslot, u8 cmd)
{
void __iomem *wpg_bbar = NULL;
struct controller *ctlr_ptr;
@@ -1102,7 +1102,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void)
* Value:
*---------------------------------------------------------------------*/
static int hpc_wait_ctlr_notworking (int timeout, struct controller *ctlr_ptr, void __iomem *wpg_bbar,
- u8 * pstatus)
+ u8 *pstatus)
{
int rc = 0;
u8 done = 0;
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 639ea3a75e14..2fd296706ce7 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -47,7 +47,7 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno);
* We also assign the same irq numbers for multi function devices.
* These are PIC mode, so shouldn't matter n.e.ways (hopefully)
*/
-static void assign_alt_irq (struct pci_func * cur_func, u8 class_code)
+static void assign_alt_irq (struct pci_func *cur_func, u8 class_code)
{
int j;
for (j = 0; j < 4; j++) {
@@ -137,8 +137,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
"Please choose another device.\n", cur_func->device);
return -ENODEV;
} else if (class == PCI_CLASS_DISPLAY_VGA) {
- err ("The device %x is not supported for hot plugging. "
- "Please choose another device.\n", cur_func->device);
+ err ("The device %x is not supported for hot plugging. Please choose another device.\n",
+ cur_func->device);
return -ENODEV;
}
switch (hdr_type) {
@@ -179,8 +179,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
case PCI_HEADER_TYPE_MULTIBRIDGE:
class >>= 8;
if (class != PCI_CLASS_BRIDGE_PCI) {
- err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. "
- "Please insert another card.\n", cur_func->device);
+ err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. Please insert another card.\n",
+ cur_func->device);
return -ENODEV;
}
assign_alt_irq (cur_func, class_code);
@@ -247,8 +247,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
class >>= 8;
debug ("class now is %x\n", class);
if (class != PCI_CLASS_BRIDGE_PCI) {
- err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. "
- "Please insert another card.\n", cur_func->device);
+ err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. Please insert another card.\n",
+ cur_func->device);
return -ENODEV;
}
@@ -1073,7 +1073,7 @@ error:
* Input: bridge function
* Output: amount of resources needed
*****************************************************************************/
-static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
+static struct res_needed *scan_behind_bridge (struct pci_func *func, u8 busno)
{
int count, len[6];
u16 vendor_id;
@@ -1125,13 +1125,11 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno)
class >>= 8; /* to take revision out, class = class.subclass.prog i/f */
if (class == PCI_CLASS_NOT_DEFINED_VGA) {
- err ("The device %x is VGA compatible and as is not supported for hot plugging. "
- "Please choose another device.\n", device);
+ err ("The device %x is VGA compatible and as is not supported for hot plugging. Please choose another device.\n", device);
amount->not_correct = 1;
return amount;
} else if (class == PCI_CLASS_DISPLAY_VGA) {
- err ("The device %x is not supported for hot plugging. "
- "Please choose another device.\n", device);
+ err ("The device %x is not supported for hot plugging. Please choose another device.\n", device);
amount->not_correct = 1;
return amount;
}
@@ -1483,12 +1481,10 @@ static int unconfigure_boot_card (struct slot *slot_cur)
debug ("hdr_type %x, class %x\n", hdr_type, class);
class >>= 8; /* to take revision out, class = class.subclass.prog i/f */
if (class == PCI_CLASS_NOT_DEFINED_VGA) {
- err ("The device %x function %x is VGA compatible and is not supported for hot removing. "
- "Please choose another device.\n", device, function);
+ err ("The device %x function %x is VGA compatible and is not supported for hot removing. Please choose another device.\n", device, function);
return -ENODEV;
} else if (class == PCI_CLASS_DISPLAY_VGA) {
- err ("The device %x function %x is not supported for hot removing. "
- "Please choose another device.\n", device, function);
+ err ("The device %x function %x is not supported for hot removing. Please choose another device.\n", device, function);
return -ENODEV;
}
@@ -1513,9 +1509,7 @@ static int unconfigure_boot_card (struct slot *slot_cur)
case PCI_HEADER_TYPE_BRIDGE:
class >>= 8;
if (class != PCI_CLASS_BRIDGE_PCI) {
- err ("This device %x function %x is not PCI-to-PCI bridge, "
- "and is not supported for hot-removing. "
- "Please try another card.\n", device, function);
+ err ("This device %x function %x is not PCI-to-PCI bridge, and is not supported for hot-removing. Please try another card.\n", device, function);
return -ENODEV;
}
rc = unconfigure_boot_bridge (busno, device, function);
@@ -1529,9 +1523,7 @@ static int unconfigure_boot_card (struct slot *slot_cur)
case PCI_HEADER_TYPE_MULTIBRIDGE:
class >>= 8;
if (class != PCI_CLASS_BRIDGE_PCI) {
- err ("This device %x function %x is not PCI-to-PCI bridge, "
- "and is not supported for hot-removing. "
- "Please try another card.\n", device, function);
+ err ("This device %x function %x is not PCI-to-PCI bridge, and is not supported for hot-removing. Please try another card.\n", device, function);
return -ENODEV;
}
rc = unconfigure_boot_bridge (busno, device, function);
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index a265acb2d518..f34745abd5b6 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -46,9 +46,9 @@ static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8);
static LIST_HEAD(gbuses);
-static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8 busno, int flag)
+static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc *curr, u8 busno, int flag)
{
- struct bus_node * newbus;
+ struct bus_node *newbus;
if (!(curr) && !(flag)) {
err ("NULL pointer passed\n");
@@ -69,7 +69,7 @@ static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8
return newbus;
}
-static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * curr)
+static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc *curr)
{
struct resource_node *rs;
@@ -93,7 +93,7 @@ static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * cur
static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node **new_range, struct ebda_pci_rsrc *curr, int flag, u8 first_bus)
{
- struct bus_node * newbus;
+ struct bus_node *newbus;
struct range_node *newrange;
u8 num_ranges = 0;
@@ -789,8 +789,7 @@ int ibmphp_remove_resource (struct resource_node *res)
bus_cur = find_bus_wprev (res->busno, NULL, 0);
if (!bus_cur) {
- err ("cannot find corresponding bus of the io resource to remove "
- "bailing out...\n");
+ err ("cannot find corresponding bus of the io resource to remove bailing out...\n");
return -ENODEV;
}
@@ -934,9 +933,9 @@ int ibmphp_remove_resource (struct resource_node *res)
return 0;
}
-static struct range_node * find_range (struct bus_node *bus_cur, struct resource_node * res)
+static struct range_node *find_range (struct bus_node *bus_cur, struct resource_node *res)
{
- struct range_node * range = NULL;
+ struct range_node *range = NULL;
switch (res->type) {
case IO:
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index cfa92a984e62..56d8486dc167 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -59,14 +59,12 @@ static bool debug;
#define DRIVER_DESC "PCI Hot Plug PCI Core"
-//////////////////////////////////////////////////////////////////
-
static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_MUTEX(pci_hp_mutex);
/* Weee, fun with macros... */
-#define GET_STATUS(name,type) \
-static int get_##name (struct hotplug_slot *slot, type *value) \
+#define GET_STATUS(name, type) \
+static int get_##name(struct hotplug_slot *slot, type *value) \
{ \
struct hotplug_slot_ops *ops = slot->ops; \
int retval = 0; \
@@ -92,42 +90,41 @@ static ssize_t power_read_file(struct pci_slot *slot, char *buf)
retval = get_power_status(slot->hotplug, &value);
if (retval)
- goto exit;
- retval = sprintf (buf, "%d\n", value);
-exit:
- return retval;
+ return retval;
+
+ return sprintf(buf, "%d\n", value);
}
static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
- size_t count)
+ size_t count)
{
struct hotplug_slot *slot = pci_slot->hotplug;
unsigned long lpower;
u8 power;
int retval = 0;
- lpower = simple_strtoul (buf, NULL, 10);
+ lpower = simple_strtoul(buf, NULL, 10);
power = (u8)(lpower & 0xff);
- dbg ("power = %d\n", power);
+ dbg("power = %d\n", power);
if (!try_module_get(slot->ops->owner)) {
retval = -ENODEV;
goto exit;
}
switch (power) {
- case 0:
- if (slot->ops->disable_slot)
- retval = slot->ops->disable_slot(slot);
- break;
-
- case 1:
- if (slot->ops->enable_slot)
- retval = slot->ops->enable_slot(slot);
- break;
-
- default:
- err ("Illegal value specified for power\n");
- retval = -EINVAL;
+ case 0:
+ if (slot->ops->disable_slot)
+ retval = slot->ops->disable_slot(slot);
+ break;
+
+ case 1:
+ if (slot->ops->enable_slot)
+ retval = slot->ops->enable_slot(slot);
+ break;
+
+ default:
+ err("Illegal value specified for power\n");
+ retval = -EINVAL;
}
module_put(slot->ops->owner);
@@ -150,24 +147,22 @@ static ssize_t attention_read_file(struct pci_slot *slot, char *buf)
retval = get_attention_status(slot->hotplug, &value);
if (retval)
- goto exit;
- retval = sprintf(buf, "%d\n", value);
+ return retval;
-exit:
- return retval;
+ return sprintf(buf, "%d\n", value);
}
static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
- size_t count)
+ size_t count)
{
struct hotplug_slot_ops *ops = slot->hotplug->ops;
unsigned long lattention;
u8 attention;
int retval = 0;
- lattention = simple_strtoul (buf, NULL, 10);
+ lattention = simple_strtoul(buf, NULL, 10);
attention = (u8)(lattention & 0xff);
- dbg (" - attention = %d\n", attention);
+ dbg(" - attention = %d\n", attention);
if (!try_module_get(ops->owner)) {
retval = -ENODEV;
@@ -196,11 +191,9 @@ static ssize_t latch_read_file(struct pci_slot *slot, char *buf)
retval = get_latch_status(slot->hotplug, &value);
if (retval)
- goto exit;
- retval = sprintf (buf, "%d\n", value);
+ return retval;
-exit:
- return retval;
+ return sprintf(buf, "%d\n", value);
}
static struct pci_slot_attribute hotplug_slot_attr_latch = {
@@ -215,11 +208,9 @@ static ssize_t presence_read_file(struct pci_slot *slot, char *buf)
retval = get_adapter_status(slot->hotplug, &value);
if (retval)
- goto exit;
- retval = sprintf (buf, "%d\n", value);
+ return retval;
-exit:
- return retval;
+ return sprintf(buf, "%d\n", value);
}
static struct pci_slot_attribute hotplug_slot_attr_presence = {
@@ -228,7 +219,7 @@ static struct pci_slot_attribute hotplug_slot_attr_presence = {
};
static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
- size_t count)
+ size_t count)
{
struct hotplug_slot *slot = pci_slot->hotplug;
unsigned long ltest;
@@ -237,7 +228,7 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
ltest = simple_strtoul (buf, NULL, 10);
test = (u32)(ltest & 0xffffffff);
- dbg ("test = %d\n", test);
+ dbg("test = %d\n", test);
if (!try_module_get(slot->ops->owner)) {
retval = -ENODEV;
@@ -261,6 +252,7 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
static bool has_power_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if ((slot->ops->enable_slot) ||
@@ -273,6 +265,7 @@ static bool has_power_file(struct pci_slot *pci_slot)
static bool has_attention_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if ((slot->ops->set_attention_status) ||
@@ -284,6 +277,7 @@ static bool has_attention_file(struct pci_slot *pci_slot)
static bool has_latch_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if (slot->ops->get_latch_status)
@@ -294,6 +288,7 @@ static bool has_latch_file(struct pci_slot *pci_slot)
static bool has_adapter_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if (slot->ops->get_adapter_status)
@@ -304,6 +299,7 @@ static bool has_adapter_file(struct pci_slot *pci_slot)
static bool has_test_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
+
if ((!slot) || (!slot->ops))
return false;
if (slot->ops->hardware_test)
@@ -397,13 +393,13 @@ static void fs_remove_slot(struct pci_slot *slot)
pci_hp_remove_module_link(slot);
}
-static struct hotplug_slot *get_slot_from_name (const char *name)
+static struct hotplug_slot *get_slot_from_name(const char *name)
{
struct hotplug_slot *slot;
struct list_head *tmp;
- list_for_each (tmp, &pci_hotplug_slot_list) {
- slot = list_entry (tmp, struct hotplug_slot, slot_list);
+ list_for_each(tmp, &pci_hotplug_slot_list) {
+ slot = list_entry(tmp, struct hotplug_slot, slot_list);
if (strcmp(hotplug_slot_name(slot), name) == 0)
return slot;
}
@@ -436,8 +432,7 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
if ((slot->info == NULL) || (slot->ops == NULL))
return -EINVAL;
if (slot->release == NULL) {
- dbg("Why are you trying to register a hotplug slot "
- "without a proper release function?\n");
+ dbg("Why are you trying to register a hotplug slot without a proper release function?\n");
return -EINVAL;
}
@@ -468,6 +463,7 @@ out:
mutex_unlock(&pci_hp_mutex);
return result;
}
+EXPORT_SYMBOL_GPL(__pci_hp_register);
/**
* pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
@@ -506,6 +502,7 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
return 0;
}
+EXPORT_SYMBOL_GPL(pci_hp_deregister);
/**
* pci_hp_change_slot_info - changes the slot's information structure in the core
@@ -527,24 +524,23 @@ int pci_hp_change_slot_info(struct hotplug_slot *hotplug,
return 0;
}
+EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
-static int __init pci_hotplug_init (void)
+static int __init pci_hotplug_init(void)
{
int result;
result = cpci_hotplug_init(debug);
if (result) {
- err ("cpci_hotplug_init with error %d\n", result);
- goto err_cpci;
+ err("cpci_hotplug_init with error %d\n", result);
+ return result;
}
- info (DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
-err_cpci:
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return result;
}
-static void __exit pci_hotplug_exit (void)
+static void __exit pci_hotplug_exit(void)
{
cpci_hotplug_exit();
}
@@ -557,7 +553,3 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-
-EXPORT_SYMBOL_GPL(__pci_hp_register);
-EXPORT_SYMBOL_GPL(pci_hp_deregister);
-EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 20fea57d2149..93cc9266e8cb 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -103,10 +103,10 @@ static int __init dummy_probe(struct pcie_device *dev)
}
static struct pcie_port_service_driver __initdata dummy_driver = {
- .name = "pciehp_dummy",
+ .name = "pciehp_dummy",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_HP,
- .probe = dummy_probe,
+ .probe = dummy_probe,
};
static int __init select_detection_mode(void)
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 0e0a2fff20a3..a2297db80813 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -266,8 +266,7 @@ static int pciehp_probe(struct pcie_device *dev)
rc = init_slot(ctrl);
if (rc) {
if (rc == -EBUSY)
- ctrl_warn(ctrl, "Slot already registered by another "
- "hotplug driver\n");
+ ctrl_warn(ctrl, "Slot already registered by another hotplug driver\n");
else
ctrl_err(ctrl, "Slot initialization failed\n");
goto err_out_release_ctlr;
@@ -312,12 +311,12 @@ static void pciehp_remove(struct pcie_device *dev)
}
#ifdef CONFIG_PM
-static int pciehp_suspend (struct pcie_device *dev)
+static int pciehp_suspend(struct pcie_device *dev)
{
return 0;
}
-static int pciehp_resume (struct pcie_device *dev)
+static int pciehp_resume(struct pcie_device *dev)
{
struct controller *ctrl;
struct slot *slot;
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index c75e6a678dcc..ff32e85e1de6 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -175,7 +175,7 @@ void pciehp_handle_linkstate_change(struct slot *p_slot)
hotplug controller logic
*/
-static void set_slot_off(struct controller *ctrl, struct slot * pslot)
+static void set_slot_off(struct controller *ctrl, struct slot *pslot)
{
/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
if (POWER_CTRL(ctrl)) {
@@ -376,14 +376,12 @@ static void handle_button_press_event(struct slot *p_slot)
pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
- ctrl_info(ctrl,
- "PCI slot #%s - powering off due to button "
- "press.\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
+ slot_name(p_slot));
} else {
p_slot->state = BLINKINGON_STATE;
- ctrl_info(ctrl,
- "PCI slot #%s - powering on due to button "
- "press.\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - powering on due to button press\n",
+ slot_name(p_slot));
}
/* blink green LED and turn off amber */
pciehp_green_led_blink(p_slot);
@@ -404,8 +402,8 @@ static void handle_button_press_event(struct slot *p_slot)
else
pciehp_green_led_off(p_slot);
pciehp_set_attention_status(p_slot, 0);
- ctrl_info(ctrl, "PCI slot #%s - action canceled "
- "due to button press\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
+ slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 1463412cf7f8..42914e04d110 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -174,12 +174,10 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
* event even though it supports none of power
* controller, attention led, power led and EMI.
*/
- ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to "
- "wait for command completed event.\n");
+ ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to wait for command completed event\n");
ctrl->no_cmd_complete = 0;
} else {
- ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe "
- "the controller is broken.\n");
+ ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe the controller is broken\n");
}
}
@@ -203,7 +201,7 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) ||
!(slot_ctrl & PCI_EXP_SLTCTL_CCIE))
poll = 1;
- pcie_wait_cmd(ctrl, poll);
+ pcie_wait_cmd(ctrl, poll);
}
mutex_unlock(&ctrl->ctrl_lock);
}
@@ -276,15 +274,15 @@ int pciehp_check_link_status(struct controller *ctrl)
bool found;
u16 lnk_status;
- /*
- * Data Link Layer Link Active Reporting must be capable for
- * hot-plug capable downstream port. But old controller might
- * not implement it. In this case, we wait for 1000 ms.
- */
- if (ctrl->link_active_reporting)
- pcie_wait_link_active(ctrl);
- else
- msleep(1000);
+ /*
+ * Data Link Layer Link Active Reporting must be capable for
+ * hot-plug capable downstream port. But old controller might
+ * not implement it. In this case, we wait for 1000 ms.
+ */
+ if (ctrl->link_active_reporting)
+ pcie_wait_link_active(ctrl);
+ else
+ msleep(1000);
/* wait 100ms before read pci conf, and try in 1s */
msleep(100);
@@ -295,7 +293,7 @@ int pciehp_check_link_status(struct controller *ctrl)
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
!(lnk_status & PCI_EXP_LNKSTA_NLW)) {
- ctrl_err(ctrl, "Link Training Error occurs \n");
+ ctrl_err(ctrl, "Link Training Error occurs\n");
return -1;
}
@@ -414,7 +412,7 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
return;
switch (value) {
- case 0 : /* turn off */
+ case 0: /* turn off */
slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
break;
case 1: /* turn on */
@@ -470,7 +468,7 @@ void pciehp_green_led_blink(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_BLINK);
}
-int pciehp_power_on_slot(struct slot * slot)
+int pciehp_power_on_slot(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
@@ -496,7 +494,7 @@ int pciehp_power_on_slot(struct slot * slot)
return retval;
}
-void pciehp_power_off_slot(struct slot * slot)
+void pciehp_power_off_slot(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
@@ -756,7 +754,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
}
-#define FLAG(x,y) (((x) & (y)) ? '+' : '-')
+#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
struct controller *pcie_init(struct pcie_device *dev)
{
@@ -783,14 +781,14 @@ struct controller *pcie_init(struct pcie_device *dev)
*/
if (NO_CMD_CMPL(ctrl) ||
!(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
- ctrl->no_cmd_complete = 1;
-
- /* Check if Data Link Layer Link Active Reporting is implemented */
- pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
- if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
- ctrl_dbg(ctrl, "Link Active Reporting supported\n");
- ctrl->link_active_reporting = 1;
- }
+ ctrl->no_cmd_complete = 1;
+
+ /* Check if Data Link Layer Link Active Reporting is implemented */
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
+ if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
+ ctrl_dbg(ctrl, "Link Active Reporting supported\n");
+ ctrl->link_active_reporting = 1;
+ }
/* Clear all remaining event bits in Slot Status register */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index b6cb1df67097..5f871f4c4af1 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -46,9 +46,8 @@ int pciehp_configure_device(struct slot *p_slot)
dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
if (dev) {
- ctrl_err(ctrl, "Device %s already exists "
- "at %04x:%02x:00, cannot hot-add\n", pci_name(dev),
- pci_domain_nr(parent), parent->number);
+ ctrl_err(ctrl, "Device %s already exists at %04x:%02x:00, cannot hot-add\n",
+ pci_name(dev), pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
ret = -EEXIST;
goto out;
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index ac69094e4b20..d062c008fc95 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -51,7 +51,7 @@ static LIST_HEAD(slot_list);
#define dbg(format, arg...) \
do { \
if (debug) \
- printk (KERN_DEBUG "%s: " format "\n", \
+ printk(KERN_DEBUG "%s: " format "\n", \
MY_NAME , ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
@@ -128,18 +128,18 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
switch (status) {
- case 0:
- /*
- * Fill in code here to turn light off
- */
- break;
-
- case 1:
- default:
- /*
- * Fill in code here to turn light on
- */
- break;
+ case 0:
+ /*
+ * Fill in code here to turn light off
+ */
+ break;
+
+ case 1:
+ default:
+ /*
+ * Fill in code here to turn light on
+ */
+ break;
}
return retval;
@@ -153,12 +153,12 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
switch (value) {
- case 0:
- /* Specify a test here */
- break;
- case 1:
- /* Specify another test here */
- break;
+ case 0:
+ /* Specify a test here */
+ break;
+ case 1:
+ /* Specify another test here */
+ break;
}
return retval;
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 984d708552f6..93aa29f6d39c 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -39,6 +39,7 @@
bool rpaphp_debug;
LIST_HEAD(rpaphp_slot_head);
+EXPORT_SYMBOL_GPL(rpaphp_slot_head);
#define DRIVER_VERSION "0.1"
#define DRIVER_AUTHOR "Linda Xie <lxie@us.ibm.com>"
@@ -88,7 +89,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
* @hotplug_slot: slot to get status
* @value: pointer to store status
*/
-static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int retval, level;
struct slot *slot = (struct slot *)hotplug_slot->private;
@@ -104,14 +105,14 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value)
* @hotplug_slot: slot to get status
* @value: pointer to store status
*/
-static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = (struct slot *)hotplug_slot->private;
*value = slot->hotplug_slot->info->attention_status;
return 0;
}
-static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value)
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = (struct slot *)hotplug_slot->private;
int rc, state;
@@ -241,6 +242,7 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(rpaphp_get_drc_props);
static int is_php_type(char *drc_type)
{
@@ -350,6 +352,7 @@ int rpaphp_add_slot(struct device_node *dn)
/* XXX FIXME: reports a failure only if last entry in loop failed */
return retval;
}
+EXPORT_SYMBOL_GPL(rpaphp_add_slot);
static void __exit cleanup_slots(void)
{
@@ -443,7 +446,3 @@ struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
module_init(rpaphp_init);
module_exit(rpaphp_exit);
-
-EXPORT_SYMBOL_GPL(rpaphp_add_slot);
-EXPORT_SYMBOL_GPL(rpaphp_slot_head);
-EXPORT_SYMBOL_GPL(rpaphp_get_drc_props);
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 613043f7576f..bada20999870 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -188,7 +188,7 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
return 0;
}
-static struct hotplug_slot * sn_hp_destroy(void)
+static struct hotplug_slot *sn_hp_destroy(void)
{
struct slot *slot;
struct pci_slot *pci_slot;
@@ -250,15 +250,13 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
}
if (rc == PCI_L1_ERR) {
- dev_dbg(&slot->pci_bus->self->dev,
- "L1 failure %d with message: %s",
+ dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message: %s",
resp.resp_sub_errno, resp.resp_l1_msg);
return -EPERM;
}
if (rc) {
- dev_dbg(&slot->pci_bus->self->dev,
- "insert failed with error %d sub-error %d\n",
+ dev_dbg(&slot->pci_bus->self->dev, "insert failed with error %d sub-error %d\n",
rc, resp.resp_sub_errno);
return -EIO;
}
@@ -288,21 +286,18 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) {
- dev_dbg(&slot->pci_bus->self->dev,
- "Cannot remove last 33MHz card\n");
+ dev_dbg(&slot->pci_bus->self->dev, "Cannot remove last 33MHz card\n");
return -EPERM;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) {
- dev_dbg(&slot->pci_bus->self->dev,
- "L1 failure %d with message \n%s\n",
+ dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message \n%s\n",
resp.resp_sub_errno, resp.resp_l1_msg);
return -EPERM;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) {
- dev_dbg(&slot->pci_bus->self->dev,
- "remove failed with error %d sub-error %d\n",
+ dev_dbg(&slot->pci_bus->self->dev, "remove failed with error %d sub-error %d\n",
rc, resp.resp_sub_errno);
return -EIO;
}
@@ -417,8 +412,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
if (acpi_bus_get_device(phandle, &pdevice)) {
- dev_dbg(&slot->pci_bus->self->dev,
- "no parent device, assuming NULL\n");
+ dev_dbg(&slot->pci_bus->self->dev, "no parent device, assuming NULL\n");
pdevice = NULL;
}
@@ -447,10 +441,8 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
ret = acpi_bus_scan(chandle);
if (ACPI_FAILURE(ret)) {
- printk(KERN_ERR "%s: acpi_bus_scan "
- "failed (0x%x) for slot %d "
- "func %d\n", __func__,
- ret, (int)(adr>>16),
+ printk(KERN_ERR "%s: acpi_bus_scan failed (0x%x) for slot %d func %d\n",
+ __func__, ret, (int)(adr>>16),
(int)(adr&0xffff));
/* try to continue on */
}
@@ -471,11 +463,9 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
mutex_unlock(&sn_hotplug_mutex);
if (rc == 0)
- dev_dbg(&slot->pci_bus->self->dev,
- "insert operation successful\n");
+ dev_dbg(&slot->pci_bus->self->dev, "insert operation successful\n");
else
- dev_dbg(&slot->pci_bus->self->dev,
- "insert operation failed rc = %d\n", rc);
+ dev_dbg(&slot->pci_bus->self->dev, "insert operation failed rc = %d\n", rc);
return rc;
}
@@ -561,8 +551,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
acpi_status ret;
ret = acpi_unload_table_id(ssdt_id);
if (ACPI_FAILURE(ret)) {
- printk(KERN_ERR "%s: acpi_unload_table_id "
- "failed (0x%x) for id %d\n",
+ printk(KERN_ERR "%s: acpi_unload_table_id failed (0x%x) for id %d\n",
__func__, ret, ssdt_id);
/* try to continue on */
}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 61529097464d..5897d516427b 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -180,7 +180,7 @@ int shpchp_configure_device(struct slot *p_slot);
int shpchp_unconfigure_device(struct slot *p_slot);
void cleanup_slots(struct controller *ctrl);
void shpchp_queue_pushbutton_work(struct work_struct *work);
-int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
+int shpc_init(struct controller *ctrl, struct pci_dev *pdev);
static inline const char *slot_name(struct slot *slot)
{
@@ -295,7 +295,7 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, rse_set);
}
/* restore MiscII register */
- pci_read_config_dword( p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, &pcix_misc2_temp );
+ pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, &pcix_misc2_temp );
if (p_slot->ctrl->pcix_misc2_reg & SERRFATALENABLE_MASK)
pcix_misc2_temp |= SERRFATALENABLE_MASK;
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index faf13abd5b99..294ef4b10cf1 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -143,8 +143,7 @@ static int init_slots(struct controller *ctrl)
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
- ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
- "hp_slot=%x sun=%x slot_device_offset=%x\n",
+ ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x hp_slot=%x sun=%x slot_device_offset=%x\n",
pci_domain_nr(ctrl->pci_dev->subordinate),
slot->bus, slot->device, slot->hp_slot, slot->number,
ctrl->slot_device_offset);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 6efc2ec5e4db..a81fb67ea9a1 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -162,7 +162,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) {
+ if (!(p_slot->hpc_ops->query_power_fault(p_slot))) {
/*
* Power fault Cleared
*/
@@ -196,8 +196,8 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
ctrl_dbg(ctrl, "Change speed to %d\n", speed);
if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) {
- ctrl_err(ctrl, "%s: Issue of set bus speed mode command "
- "failed\n", __func__);
+ ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
+ __func__);
return WRONG_BUS_FREQUENCY;
}
return rc;
@@ -215,8 +215,8 @@ static int fix_bus_speed(struct controller *ctrl, struct slot *pslot,
*/
if (flag) {
if (asp < bsp) {
- ctrl_err(ctrl, "Speed of bus %x and adapter %x "
- "mismatch\n", bsp, asp);
+ ctrl_err(ctrl, "Speed of bus %x and adapter %x mismatch\n",
+ bsp, asp);
rc = WRONG_BUS_FREQUENCY;
}
return rc;
@@ -250,8 +250,7 @@ static int board_added(struct slot *p_slot)
hp_slot = p_slot->device - ctrl->slot_device_offset;
- ctrl_dbg(ctrl,
- "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n",
+ ctrl_dbg(ctrl, "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n",
__func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
/* Power on slot without connecting to bus */
@@ -263,8 +262,8 @@ static int board_added(struct slot *p_slot)
if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) {
- ctrl_err(ctrl, "%s: Issue of set bus speed mode command"
- " failed\n", __func__);
+ ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
+ __func__);
return WRONG_BUS_FREQUENCY;
}
@@ -277,8 +276,7 @@ static int board_added(struct slot *p_slot)
rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
if (rc) {
- ctrl_err(ctrl, "Can't get adapter speed or "
- "bus mode mismatch\n");
+ ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n");
return WRONG_BUS_FREQUENCY;
}
@@ -289,8 +287,8 @@ static int board_added(struct slot *p_slot)
if (!list_empty(&ctrl->pci_dev->subordinate->devices))
slots_not_empty = 1;
- ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d,"
- " max_bus_speed %d\n", __func__, slots_not_empty, asp,
+ ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d, max_bus_speed %d\n",
+ __func__, slots_not_empty, asp,
bsp, msp);
rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp);
@@ -490,12 +488,12 @@ static void handle_button_press_event(struct slot *p_slot)
p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
- ctrl_info(ctrl, "PCI slot #%s - powering off due to "
- "button press.\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
+ slot_name(p_slot));
} else {
p_slot->state = BLINKINGON_STATE;
- ctrl_info(ctrl, "PCI slot #%s - powering on due to "
- "button press.\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - powering on due to button press\n",
+ slot_name(p_slot));
}
/* blink green LED and turn off amber */
p_slot->hpc_ops->green_led_blink(p_slot);
@@ -518,8 +516,8 @@ static void handle_button_press_event(struct slot *p_slot)
else
p_slot->hpc_ops->green_led_off(p_slot);
p_slot->hpc_ops->set_attention_status(p_slot, 0);
- ctrl_info(ctrl, "PCI slot #%s - action canceled due to "
- "button press\n", slot_name(p_slot));
+ ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
+ slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 2d7f474ca0ec..29e22352822c 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -341,8 +341,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
cmd_status = hpc_check_cmd_status(slot->ctrl);
if (cmd_status) {
- ctrl_err(ctrl,
- "Failed to issued command 0x%x (error code = %d)\n",
+ ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n",
cmd, cmd_status);
retval = -EIO;
}
@@ -404,7 +403,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_power_status(struct slot * slot, u8 *status)
+static int hpc_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -528,7 +527,7 @@ static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
return retval;
}
-static int hpc_query_power_fault(struct slot * slot)
+static int hpc_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -614,7 +613,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
}
-static int hpc_power_on_slot(struct slot * slot)
+static int hpc_power_on_slot(struct slot *slot)
{
int retval;
@@ -625,7 +624,7 @@ static int hpc_power_on_slot(struct slot * slot)
return retval;
}
-static int hpc_slot_enable(struct slot * slot)
+static int hpc_slot_enable(struct slot *slot)
{
int retval;
@@ -638,7 +637,7 @@ static int hpc_slot_enable(struct slot * slot)
return retval;
}
-static int hpc_slot_disable(struct slot * slot)
+static int hpc_slot_disable(struct slot *slot)
{
int retval;
@@ -720,7 +719,7 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl)
}
-static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
+static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
{
int retval;
struct controller *ctrl = slot->ctrl;
@@ -974,8 +973,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
for (i = 0; i < 9 + num_slots; i++) {
rc = shpc_indirect_read(ctrl, i, &tempdword);
if (rc) {
- ctrl_err(ctrl,
- "Cannot read creg (index = %d)\n", i);
+ ctrl_err(ctrl, "Cannot read creg (index = %d)\n",
+ i);
goto abort;
}
ctrl_dbg(ctrl, " offset %d: value %x\n", i, tempdword);
@@ -1060,10 +1059,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
/* Installs the interrupt handler */
rc = pci_enable_msi(pdev);
if (rc) {
- ctrl_info(ctrl,
- "Can't get msi for the hotplug controller\n");
- ctrl_info(ctrl,
- "Use INTx for the hotplug controller\n");
+ ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
+ ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
}
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
@@ -1071,8 +1068,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
ctrl_dbg(ctrl, "request_irq %d (returns %d)\n",
ctrl->pci_dev->irq, rc);
if (rc) {
- ctrl_err(ctrl, "Can't get irq %d for the hotplug "
- "controller\n", ctrl->pci_dev->irq);
+ ctrl_err(ctrl, "Can't get irq %d for the hotplug controller\n",
+ ctrl->pci_dev->irq);
goto abort_iounmap;
}
}
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 9202d133485c..469454e0cc48 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -46,9 +46,9 @@ int shpchp_configure_device(struct slot *p_slot)
dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (dev) {
- ctrl_err(ctrl, "Device %s already exists "
- "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
- pci_domain_nr(parent), p_slot->bus, p_slot->device);
+ ctrl_err(ctrl, "Device %s already exists at %04x:%02x:%02x, cannot hot-add\n",
+ pci_name(dev), pci_domain_nr(parent),
+ p_slot->bus, p_slot->device);
pci_dev_put(dev);
ret = -EINVAL;
goto out;
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c
index e8c31fe20566..52875b360463 100644
--- a/drivers/pci/hotplug/shpchp_sysfs.c
+++ b/drivers/pci/hotplug/shpchp_sysfs.c
@@ -38,7 +38,7 @@
static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
- char * out = buf;
+ char *out = buf;
int index, busnr;
struct resource *res;
struct pci_bus *bus;
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index d68b030ab533..a94dd2c4183a 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -102,7 +102,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
spin_unlock_irqrestore(&ht_irq_lock, flags);
max_irq = (data >> 16) & 0xff;
- if ( idx > max_irq)
+ if (idx > max_irq)
return -EINVAL;
cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
@@ -131,6 +131,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
return irq;
}
+EXPORT_SYMBOL(__ht_create_irq);
/**
* ht_create_irq - create an irq and attach it to a device.
@@ -146,6 +147,7 @@ int ht_create_irq(struct pci_dev *dev, int idx)
{
return __ht_create_irq(dev, idx, NULL);
}
+EXPORT_SYMBOL(ht_create_irq);
/**
* ht_destroy_irq - destroy an irq created with ht_create_irq
@@ -165,7 +167,4 @@ void ht_destroy_irq(unsigned int irq)
kfree(cfg);
}
-
-EXPORT_SYMBOL(__ht_create_irq);
-EXPORT_SYMBOL(ht_create_irq);
EXPORT_SYMBOL(ht_destroy_irq);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 27a7e67ddfe4..13f3d3037272 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -413,7 +413,7 @@ static void free_msi_irqs(struct pci_dev *dev)
if (dev->msi_irq_groups) {
sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
msi_attrs = dev->msi_irq_groups[0]->attrs;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ while (msi_attrs[count]) {
dev_attr = container_of(msi_attrs[count],
struct device_attribute, attr);
kfree(dev_attr->attr.name);
@@ -980,8 +980,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
/* Check whether driver already requested for MSI irq */
if (dev->msi_enabled) {
- dev_info(&dev->dev, "can't enable MSI-X "
- "(MSI IRQ already assigned)\n");
+ dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
}
status = msix_capability_init(dev, entries, nvec);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 837d71f5390b..3f8e3dbcaa7c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -77,6 +77,7 @@ int pci_add_dynid(struct pci_driver *drv,
return retval;
}
+EXPORT_SYMBOL_GPL(pci_add_dynid);
static void pci_free_dynids(struct pci_driver *drv)
{
@@ -98,15 +99,15 @@ static void pci_free_dynids(struct pci_driver *drv)
*
* Allow PCI IDs to be added to an existing driver via sysfs.
*/
-static ssize_t
-store_new_id(struct device_driver *driver, const char *buf, size_t count)
+static ssize_t store_new_id(struct device_driver *driver, const char *buf,
+ size_t count)
{
struct pci_driver *pdrv = to_pci_driver(driver);
const struct pci_device_id *ids = pdrv->id_table;
- __u32 vendor, device, subvendor=PCI_ANY_ID,
- subdevice=PCI_ANY_ID, class=0, class_mask=0;
- unsigned long driver_data=0;
- int fields=0;
+ __u32 vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
+ unsigned long driver_data = 0;
+ int fields = 0;
int retval = 0;
fields = sscanf(buf, "%x %x %x %x %x %x %lx",
@@ -166,8 +167,8 @@ static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
*
* Removes a dynamic pci device ID to this driver.
*/
-static ssize_t
-store_remove_id(struct device_driver *driver, const char *buf, size_t count)
+static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
+ size_t count)
{
struct pci_dynid *dynid, *n;
struct pci_driver *pdrv = to_pci_driver(driver);
@@ -235,6 +236,7 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
}
return NULL;
}
+EXPORT_SYMBOL(pci_match_id);
static const struct pci_device_id pci_device_id_any = {
.vendor = PCI_ANY_ID,
@@ -372,8 +374,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
* returns 0 on success, else error.
* side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
*/
-static int
-__pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
+static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
{
const struct pci_device_id *id;
int error = 0;
@@ -390,7 +391,7 @@ __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
return error;
}
-static int pci_device_probe(struct device * dev)
+static int pci_device_probe(struct device *dev)
{
int error = 0;
struct pci_driver *drv;
@@ -406,10 +407,10 @@ static int pci_device_probe(struct device * dev)
return error;
}
-static int pci_device_remove(struct device * dev)
+static int pci_device_remove(struct device *dev)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
if (drv) {
if (drv->remove) {
@@ -537,8 +538,8 @@ static int pci_pm_reenable_device(struct pci_dev *pci_dev)
static int pci_legacy_suspend(struct device *dev, pm_message_t state)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
if (drv && drv->suspend) {
pci_power_t prev = pci_dev->current_state;
@@ -564,8 +565,8 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
if (drv && drv->suspend_late) {
pci_power_t prev = pci_dev->current_state;
@@ -595,8 +596,8 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
static int pci_legacy_resume_early(struct device *dev)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
return drv && drv->resume_early ?
drv->resume_early(pci_dev) : 0;
@@ -604,8 +605,8 @@ static int pci_legacy_resume_early(struct device *dev)
static int pci_legacy_resume(struct device *dev)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- struct pci_driver * drv = pci_dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
pci_fixup_device(pci_fixup_resume, pci_dev);
@@ -1255,6 +1256,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
/* register with core */
return driver_register(&drv->driver);
}
+EXPORT_SYMBOL(__pci_register_driver);
/**
* pci_unregister_driver - unregister a pci driver
@@ -1266,12 +1268,12 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
* driverless.
*/
-void
-pci_unregister_driver(struct pci_driver *drv)
+void pci_unregister_driver(struct pci_driver *drv)
{
driver_unregister(&drv->driver);
pci_free_dynids(drv);
}
+EXPORT_SYMBOL(pci_unregister_driver);
static struct pci_driver pci_compat_driver = {
.name = "compat"
@@ -1284,19 +1286,19 @@ static struct pci_driver pci_compat_driver = {
* Returns the appropriate pci_driver structure or %NULL if there is no
* registered driver for the device.
*/
-struct pci_driver *
-pci_dev_driver(const struct pci_dev *dev)
+struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
{
if (dev->driver)
return dev->driver;
else {
int i;
- for(i=0; i<=PCI_ROM_RESOURCE; i++)
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
if (dev->resource[i].flags & IORESOURCE_BUSY)
return &pci_compat_driver;
}
return NULL;
}
+EXPORT_SYMBOL(pci_dev_driver);
/**
* pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
@@ -1342,6 +1344,7 @@ struct pci_dev *pci_dev_get(struct pci_dev *dev)
get_device(&dev->dev);
return dev;
}
+EXPORT_SYMBOL(pci_dev_get);
/**
* pci_dev_put - release a use of the pci device structure
@@ -1355,6 +1358,7 @@ void pci_dev_put(struct pci_dev *dev)
if (dev)
put_device(&dev->dev);
}
+EXPORT_SYMBOL(pci_dev_put);
static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -1400,19 +1404,10 @@ struct bus_type pci_bus_type = {
.drv_groups = pci_drv_groups,
.pm = PCI_PM_OPS_PTR,
};
+EXPORT_SYMBOL(pci_bus_type);
static int __init pci_driver_init(void)
{
return bus_register(&pci_bus_type);
}
-
postcore_initcall(pci_driver_init);
-
-EXPORT_SYMBOL_GPL(pci_add_dynid);
-EXPORT_SYMBOL(pci_match_id);
-EXPORT_SYMBOL(__pci_register_driver);
-EXPORT_SYMBOL(pci_unregister_driver);
-EXPORT_SYMBOL(pci_dev_driver);
-EXPORT_SYMBOL(pci_bus_type);
-EXPORT_SYMBOL(pci_dev_get);
-EXPORT_SYMBOL(pci_dev_put);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 45113daaa778..a3fbe2012ea3 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -40,9 +40,8 @@ enum smbios_attr_enum {
SMBIOS_ATTR_INSTANCE_SHOW,
};
-static size_t
-find_smbios_instance_string(struct pci_dev *pdev, char *buf,
- enum smbios_attr_enum attribute)
+static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
+ enum smbios_attr_enum attribute)
{
const struct dmi_device *dmi;
struct dmi_dev_onboard *donboard;
@@ -74,9 +73,8 @@ find_smbios_instance_string(struct pci_dev *pdev, char *buf,
return 0;
}
-static umode_t
-smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
- int n)
+static umode_t smbios_instance_string_exist(struct kobject *kobj,
+ struct attribute *attr, int n)
{
struct device *dev;
struct pci_dev *pdev;
@@ -88,8 +86,8 @@ smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
S_IRUGO : 0;
}
-static ssize_t
-smbioslabel_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t smbioslabel_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
@@ -98,9 +96,8 @@ smbioslabel_show(struct device *dev, struct device_attribute *attr, char *buf)
SMBIOS_ATTR_LABEL_SHOW);
}
-static ssize_t
-smbiosinstance_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t smbiosinstance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
@@ -130,26 +127,22 @@ static struct attribute_group smbios_attr_group = {
.is_visible = smbios_instance_string_exist,
};
-static int
-pci_create_smbiosname_file(struct pci_dev *pdev)
+static int pci_create_smbiosname_file(struct pci_dev *pdev)
{
return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group);
}
-static void
-pci_remove_smbiosname_file(struct pci_dev *pdev)
+static void pci_remove_smbiosname_file(struct pci_dev *pdev)
{
sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group);
}
#else
-static inline int
-pci_create_smbiosname_file(struct pci_dev *pdev)
+static inline int pci_create_smbiosname_file(struct pci_dev *pdev)
{
return -1;
}
-static inline void
-pci_remove_smbiosname_file(struct pci_dev *pdev)
+static inline void pci_remove_smbiosname_file(struct pci_dev *pdev)
{
}
#endif
@@ -175,8 +168,8 @@ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
buf[len] = '\n';
}
-static int
-dsm_get_label(struct device *dev, char *buf, enum acpi_attr_enum attr)
+static int dsm_get_label(struct device *dev, char *buf,
+ enum acpi_attr_enum attr)
{
acpi_handle handle;
union acpi_object *obj, *tmp;
@@ -212,8 +205,7 @@ dsm_get_label(struct device *dev, char *buf, enum acpi_attr_enum attr)
return len;
}
-static bool
-device_has_dsm(struct device *dev)
+static bool device_has_dsm(struct device *dev)
{
acpi_handle handle;
@@ -225,8 +217,8 @@ device_has_dsm(struct device *dev)
1 << DEVICE_LABEL_DSM);
}
-static umode_t
-acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
+static umode_t acpi_index_string_exist(struct kobject *kobj,
+ struct attribute *attr, int n)
{
struct device *dev;
@@ -238,14 +230,14 @@ acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
return 0;
}
-static ssize_t
-acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t acpilabel_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return dsm_get_label(dev, buf, ACPI_ATTR_LABEL_SHOW);
}
-static ssize_t
-acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t acpiindex_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return dsm_get_label(dev, buf, ACPI_ATTR_INDEX_SHOW);
}
@@ -271,33 +263,28 @@ static struct attribute_group acpi_attr_group = {
.is_visible = acpi_index_string_exist,
};
-static int
-pci_create_acpi_index_label_files(struct pci_dev *pdev)
+static int pci_create_acpi_index_label_files(struct pci_dev *pdev)
{
return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group);
}
-static int
-pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+static int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
{
sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group);
return 0;
}
#else
-static inline int
-pci_create_acpi_index_label_files(struct pci_dev *pdev)
+static inline int pci_create_acpi_index_label_files(struct pci_dev *pdev)
{
return -1;
}
-static inline int
-pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+static inline int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
{
return -1;
}
-static inline bool
-device_has_dsm(struct device *dev)
+static inline bool device_has_dsm(struct device *dev)
{
return false;
}
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 2ff77509d8e5..886fb3570278 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -55,7 +55,7 @@ static int __init pci_stub_init(void)
p = ids;
while ((id = strsep(&p, ","))) {
unsigned int vendor, device, subvendor = PCI_ANY_ID,
- subdevice = PCI_ANY_ID, class=0, class_mask=0;
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields;
if (!strlen(id))
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 84c350994b06..9ff0a901ecf7 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -41,8 +41,8 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pci_dev *pdev; \
\
- pdev = to_pci_dev (dev); \
- return sprintf (buf, format_string, pdev->field); \
+ pdev = to_pci_dev(dev); \
+ return sprintf(buf, format_string, pdev->field); \
} \
static DEVICE_ATTR_RO(field)
@@ -58,7 +58,7 @@ static ssize_t broken_parity_status_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf (buf, "%u\n", pdev->broken_parity_status);
+ return sprintf(buf, "%u\n", pdev->broken_parity_status);
}
static ssize_t broken_parity_status_store(struct device *dev,
@@ -77,10 +77,8 @@ static ssize_t broken_parity_status_store(struct device *dev,
}
static DEVICE_ATTR_RW(broken_parity_status);
-static ssize_t pci_dev_show_local_cpu(struct device *dev,
- int type,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pci_dev_show_local_cpu(struct device *dev, int type,
+ struct device_attribute *attr, char *buf)
{
const struct cpumask *mask;
int len;
@@ -101,14 +99,14 @@ static ssize_t pci_dev_show_local_cpu(struct device *dev,
}
static ssize_t local_cpus_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
return pci_dev_show_local_cpu(dev, 1, attr, buf);
}
static DEVICE_ATTR_RO(local_cpus);
static ssize_t local_cpulist_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
return pci_dev_show_local_cpu(dev, 0, attr, buf);
}
@@ -117,8 +115,7 @@ static DEVICE_ATTR_RO(local_cpulist);
/*
* PCI Bus Class Devices
*/
-static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
- int type,
+static ssize_t pci_bus_show_cpuaffinity(struct device *dev, int type,
struct device_attribute *attr,
char *buf)
{
@@ -149,11 +146,11 @@ static ssize_t cpulistaffinity_show(struct device *dev,
static DEVICE_ATTR_RO(cpulistaffinity);
/* show resources */
-static ssize_t
-resource_show(struct device * dev, struct device_attribute *attr, char * buf)
+static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct pci_dev * pci_dev = to_pci_dev(dev);
- char * str = buf;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ char *str = buf;
int i;
int max;
resource_size_t start, end;
@@ -166,7 +163,7 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf)
for (i = 0; i < max; i++) {
struct resource *res = &pci_dev->resource[i];
pci_resource_to_user(pci_dev, i, res, &start, &end);
- str += sprintf(str,"0x%016llx 0x%016llx 0x%016llx\n",
+ str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n",
(unsigned long long)start,
(unsigned long long)end,
(unsigned long long)res->flags);
@@ -175,7 +172,8 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf)
}
static DEVICE_ATTR_RO(resource);
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -187,9 +185,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(modalias);
-static ssize_t enabled_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
@@ -213,57 +210,56 @@ static ssize_t enabled_store(struct device *dev,
return result < 0 ? result : count;
}
-static ssize_t enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pdev;
- pdev = to_pci_dev (dev);
- return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt));
+ pdev = to_pci_dev(dev);
+ return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
}
static DEVICE_ATTR_RW(enabled);
#ifdef CONFIG_NUMA
-static ssize_t
-numa_node_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- return sprintf (buf, "%d\n", dev->numa_node);
+ return sprintf(buf, "%d\n", dev->numa_node);
}
static DEVICE_ATTR_RO(numa_node);
#endif
-static ssize_t
-dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dma_mask_bits_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf (buf, "%d\n", fls64(pdev->dma_mask));
+ return sprintf(buf, "%d\n", fls64(pdev->dma_mask));
}
static DEVICE_ATTR_RO(dma_mask_bits);
-static ssize_t
-consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t consistent_dma_mask_bits_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask));
+ return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask));
}
static DEVICE_ATTR_RO(consistent_dma_mask_bits);
-static ssize_t
-msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
if (!pdev->subordinate)
return 0;
- return sprintf (buf, "%u\n",
- !(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI));
+ return sprintf(buf, "%u\n",
+ !(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI));
}
-static ssize_t
-msi_bus_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
@@ -290,8 +286,8 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
!!val) {
pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI;
- dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI,"
- " bad things could happen\n", val ? "" : " not");
+ dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI, bad things could happen\n",
+ val ? "" : " not");
}
return count;
@@ -331,9 +327,9 @@ const struct attribute_group *pci_bus_groups[] = {
NULL,
};
-static ssize_t
-dev_rescan_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t dev_rescan_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
unsigned long val;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -352,9 +348,8 @@ static struct device_attribute dev_rescan_attr = __ATTR(rescan,
(S_IWUSR|S_IWGRP),
NULL, dev_rescan_store);
-static ssize_t
-remove_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
unsigned long val;
@@ -369,9 +364,9 @@ static struct device_attribute dev_remove_attr = __ATTR(remove,
(S_IWUSR|S_IWGRP),
NULL, remove_store);
-static ssize_t
-dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t dev_bus_rescan_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
unsigned long val;
struct pci_bus *bus = to_pci_bus(dev);
@@ -412,7 +407,7 @@ static ssize_t d3cold_allowed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf (buf, "%u\n", pdev->d3cold_allowed);
+ return sprintf(buf, "%u\n", pdev->d3cold_allowed);
}
static DEVICE_ATTR_RW(d3cold_allowed);
#endif
@@ -607,8 +602,8 @@ const struct attribute_group *pcibus_groups[] = {
NULL,
};
-static ssize_t
-boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_dev *vga_dev = vga_default_device();
@@ -622,22 +617,21 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static struct device_attribute vga_attr = __ATTR_RO(boot_vga);
-static ssize_t
-pci_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
- struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
+ struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device,
+ kobj));
unsigned int size = 64;
loff_t init_off = off;
- u8 *data = (u8*) buf;
+ u8 *data = (u8 *) buf;
/* Several chips lock up trying to read undefined config space */
- if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0) {
+ if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
size = dev->cfg_size;
- } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
+ else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
size = 128;
- }
if (off > size)
return 0;
@@ -700,15 +694,15 @@ pci_read_config(struct file *filp, struct kobject *kobj,
return count;
}
-static ssize_t
-pci_write_config(struct file* filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
- struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
+ struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device,
+ kobj));
unsigned int size = count;
loff_t init_off = off;
- u8 *data = (u8*) buf;
+ u8 *data = (u8 *) buf;
if (off > dev->cfg_size)
return 0;
@@ -728,10 +722,10 @@ pci_write_config(struct file* filp, struct kobject *kobj,
if ((off & 3) && size > 2) {
u16 val = data[off - init_off];
val |= (u16) data[off - init_off + 1] << 8;
- pci_user_write_config_word(dev, off, val);
- off += 2;
- size -= 2;
- }
+ pci_user_write_config_word(dev, off, val);
+ off += 2;
+ size -= 2;
+ }
while (size > 3) {
u32 val = data[off - init_off];
@@ -762,10 +756,9 @@ pci_write_config(struct file* filp, struct kobject *kobj,
return count;
}
-static ssize_t
-read_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev =
to_pci_dev(container_of(kobj, struct device, kobj));
@@ -778,10 +771,9 @@ read_vpd_attr(struct file *filp, struct kobject *kobj,
return pci_read_vpd(dev, off, count, buf);
}
-static ssize_t
-write_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev =
to_pci_dev(container_of(kobj, struct device, kobj));
@@ -807,20 +799,18 @@ write_vpd_attr(struct file *filp, struct kobject *kobj,
* Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
* callback routine (pci_legacy_read).
*/
-static ssize_t
-pci_read_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
- struct pci_bus *bus = to_pci_bus(container_of(kobj,
- struct device,
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
kobj));
- /* Only support 1, 2 or 4 byte accesses */
- if (count != 1 && count != 2 && count != 4)
- return -EINVAL;
+ /* Only support 1, 2 or 4 byte accesses */
+ if (count != 1 && count != 2 && count != 4)
+ return -EINVAL;
- return pci_legacy_read(bus, off, (u32 *)buf, count);
+ return pci_legacy_read(bus, off, (u32 *)buf, count);
}
/**
@@ -835,19 +825,18 @@ pci_read_legacy_io(struct file *filp, struct kobject *kobj,
* Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
* callback routine (pci_legacy_write).
*/
-static ssize_t
-pci_write_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
- struct pci_bus *bus = to_pci_bus(container_of(kobj,
- struct device,
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
kobj));
- /* Only support 1, 2 or 4 byte accesses */
- if (count != 1 && count != 2 && count != 4)
- return -EINVAL;
- return pci_legacy_write(bus, off, *(u32 *)buf, count);
+ /* Only support 1, 2 or 4 byte accesses */
+ if (count != 1 && count != 2 && count != 4)
+ return -EINVAL;
+
+ return pci_legacy_write(bus, off, *(u32 *)buf, count);
}
/**
@@ -861,16 +850,14 @@ pci_write_legacy_io(struct file *filp, struct kobject *kobj,
* legacy memory space (first meg of bus space) into application virtual
* memory space.
*/
-static int
-pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
{
- struct pci_bus *bus = to_pci_bus(container_of(kobj,
- struct device,
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
kobj));
- return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
+ return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
}
/**
@@ -884,16 +871,14 @@ pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
* legacy IO space (first meg of bus space) into application virtual
* memory space. Returns -ENOSYS if the operation isn't supported
*/
-static int
-pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
{
- struct pci_bus *bus = to_pci_bus(container_of(kobj,
- struct device,
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
kobj));
- return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
+ return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
}
/**
@@ -903,10 +888,9 @@ pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
*
* Stub implementation. Can be overridden by arch if necessary.
*/
-void __weak
-pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type)
+void __weak pci_adjust_legacy_attr(struct pci_bus *b,
+ enum pci_mmap_state mmap_type)
{
- return;
}
/**
@@ -961,8 +945,7 @@ legacy_io_err:
kfree(b->legacy_io);
b->legacy_io = NULL;
kzalloc_err:
- printk(KERN_WARNING "pci: warning: could not create legacy I/O port "
- "and ISA memory resources to sysfs\n");
+ printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n");
return;
}
@@ -1005,9 +988,8 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
*
* Use the regular PCI mapping routines to map a PCI resource into userspace.
*/
-static int
-pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
- struct vm_area_struct *vma, int write_combine)
+static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
+ struct vm_area_struct *vma, int write_combine)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
@@ -1023,8 +1005,7 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
return -ENODEV;
if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
- WARN(1, "process \"%s\" tried to map 0x%08lx bytes "
- "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
+ WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
pci_name(pdev), i,
(u64)pci_resource_start(pdev, i),
@@ -1046,26 +1027,23 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
}
-static int
-pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
-static int
-pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
}
-static ssize_t
-pci_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count, bool write)
+static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, bool write)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
@@ -1110,18 +1088,16 @@ pci_resource_io(struct file *filp, struct kobject *kobj,
return -EINVAL;
}
-static ssize_t
-pci_read_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
return pci_resource_io(filp, kobj, attr, buf, off, count, false);
}
-static ssize_t
-pci_write_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
return pci_resource_io(filp, kobj, attr, buf, off, count, true);
}
@@ -1133,8 +1109,7 @@ pci_write_resource_io(struct file *filp, struct kobject *kobj,
* If we created resource files for @pdev, remove them from sysfs and
* free their resources.
*/
-static void
-pci_remove_resource_files(struct pci_dev *pdev)
+static void pci_remove_resource_files(struct pci_dev *pdev)
{
int i;
@@ -1237,10 +1212,9 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
*
* writing anything except 0 enables it
*/
-static ssize_t
-pci_write_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
@@ -1264,10 +1238,9 @@ pci_write_rom(struct file *filp, struct kobject *kobj,
* Put @count bytes starting at @off into @buf from the ROM in the PCI
* device corresponding to @kobj.
*/
-static ssize_t
-pci_read_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
void __iomem *rom;
@@ -1313,9 +1286,8 @@ static struct bin_attribute pcie_config_attr = {
.write = pci_write_config,
};
-static ssize_t reset_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
@@ -1382,7 +1354,7 @@ error:
return retval;
}
-int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
+int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
{
int retval;
int rom_size = 0;
@@ -1520,7 +1492,6 @@ static int __init pci_sysfs_init(void)
return 0;
}
-
late_initcall(pci_sysfs_init);
static struct attribute *pci_dev_dev_attrs[] = {
@@ -1529,7 +1500,7 @@ static struct attribute *pci_dev_dev_attrs[] = {
};
static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
- struct attribute *a, int n)
+ struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1548,7 +1519,7 @@ static struct attribute *pci_dev_hp_attrs[] = {
};
static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
- struct attribute *a, int n)
+ struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1572,7 +1543,7 @@ static struct attribute *sriov_dev_attrs[] = {
};
static umode_t sriov_attrs_are_visible(struct kobject *kobj,
- struct attribute *a, int n)
+ struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 436a76ab4bb1..1c8592b0e146 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -114,7 +114,7 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus)
max = bus->busn_res.end;
list_for_each_entry(tmp, &bus->children, node) {
n = pci_bus_max_busnr(tmp);
- if(n > max)
+ if (n > max)
max = n;
}
return max;
@@ -226,6 +226,7 @@ int pci_find_capability(struct pci_dev *dev, int cap)
return pos;
}
+EXPORT_SYMBOL(pci_find_capability);
/**
* pci_bus_find_capability - query for devices' capabilities
@@ -253,6 +254,7 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
return pos;
}
+EXPORT_SYMBOL(pci_bus_find_capability);
/**
* pci_find_next_ext_capability - Find an extended capability
@@ -403,8 +405,8 @@ EXPORT_SYMBOL_GPL(pci_find_ht_capability);
* For given resource region of given device, return the resource
* region of parent bus the given region is contained in.
*/
-struct resource *
-pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
+struct resource *pci_find_parent_resource(const struct pci_dev *dev,
+ struct resource *res)
{
const struct pci_bus *bus = dev->bus;
struct resource *r;
@@ -436,6 +438,7 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
}
return NULL;
}
+EXPORT_SYMBOL(pci_find_parent_resource);
/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
@@ -470,8 +473,7 @@ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
* Restore the BAR values for a given device, so as to make it
* accessible by its driver.
*/
-static void
-pci_restore_bars(struct pci_dev *dev)
+static void pci_restore_bars(struct pci_dev *dev)
{
int i;
@@ -496,7 +498,7 @@ static inline bool platform_pci_power_manageable(struct pci_dev *dev)
}
static inline int platform_pci_set_power_state(struct pci_dev *dev,
- pci_power_t t)
+ pci_power_t t)
{
return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
}
@@ -553,8 +555,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
*/
if (state != PCI_D0 && dev->current_state <= PCI_D3cold
&& dev->current_state > state) {
- dev_err(&dev->dev, "invalid power transition "
- "(from state %d to %d)\n", dev->current_state, state);
+ dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
+ dev->current_state, state);
return -EINVAL;
}
@@ -601,8 +603,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
if (dev->current_state != state && printk_ratelimit())
- dev_info(&dev->dev, "Refused to change power state, "
- "currently in D%d\n", dev->current_state);
+ dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
+ dev->current_state);
/*
* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
@@ -846,6 +848,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return error;
}
+EXPORT_SYMBOL(pci_set_power_state);
/**
* pci_choose_state - Choose the power state of a PCI device
@@ -884,12 +887,10 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
}
return PCI_D0;
}
-
EXPORT_SYMBOL(pci_choose_state);
#define PCI_EXP_SAVE_REGS 7
-
static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
u16 cap, bool extended)
{
@@ -1001,8 +1002,7 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
* pci_save_state - save the PCI configuration space of a device before suspending
* @dev: - PCI device that we're dealing with
*/
-int
-pci_save_state(struct pci_dev *dev)
+int pci_save_state(struct pci_dev *dev)
{
int i;
/* XXX: 100% dword access ok here? */
@@ -1017,6 +1017,7 @@ pci_save_state(struct pci_dev *dev)
return i;
return 0;
}
+EXPORT_SYMBOL(pci_save_state);
static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
u32 saved_val, int retry)
@@ -1028,8 +1029,8 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
return;
for (;;) {
- dev_dbg(&pdev->dev, "restoring config space at offset "
- "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
+ dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
+ offset, val, saved_val);
pci_write_config_dword(pdev, offset, saved_val);
if (retry-- <= 0)
return;
@@ -1087,6 +1088,7 @@ void pci_restore_state(struct pci_dev *dev)
dev->state_saved = false;
}
+EXPORT_SYMBOL(pci_restore_state);
struct pci_saved_state {
u32 config_space[16];
@@ -1231,6 +1233,7 @@ int pci_reenable_device(struct pci_dev *dev)
return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
return 0;
}
+EXPORT_SYMBOL(pci_reenable_device);
static void pci_enable_bridge(struct pci_dev *dev)
{
@@ -1305,6 +1308,7 @@ int pci_enable_device_io(struct pci_dev *dev)
{
return pci_enable_device_flags(dev, IORESOURCE_IO);
}
+EXPORT_SYMBOL(pci_enable_device_io);
/**
* pci_enable_device_mem - Initialize a device for use with Memory space
@@ -1318,6 +1322,7 @@ int pci_enable_device_mem(struct pci_dev *dev)
{
return pci_enable_device_flags(dev, IORESOURCE_MEM);
}
+EXPORT_SYMBOL(pci_enable_device_mem);
/**
* pci_enable_device - Initialize device before it's used by a driver.
@@ -1334,6 +1339,7 @@ int pci_enable_device(struct pci_dev *dev)
{
return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
}
+EXPORT_SYMBOL(pci_enable_device);
/*
* Managed PCI resources. This manages device on/off, intx/msi/msix
@@ -1416,6 +1422,7 @@ int pcim_enable_device(struct pci_dev *pdev)
}
return rc;
}
+EXPORT_SYMBOL(pcim_enable_device);
/**
* pcim_pin_device - Pin managed PCI device
@@ -1434,6 +1441,7 @@ void pcim_pin_device(struct pci_dev *pdev)
if (dr)
dr->pinned = 1;
}
+EXPORT_SYMBOL(pcim_pin_device);
/*
* pcibios_add_device - provide arch specific hooks when adding device dev
@@ -1443,7 +1451,7 @@ void pcim_pin_device(struct pci_dev *pdev)
* devices are added. This is the default implementation. Architecture
* implementations can override this.
*/
-int __weak pcibios_add_device (struct pci_dev *dev)
+int __weak pcibios_add_device(struct pci_dev *dev)
{
return 0;
}
@@ -1515,8 +1523,7 @@ void pci_disable_enabled_device(struct pci_dev *dev)
* Note we don't actually disable the device until all callers of
* pci_enable_device() have called pci_disable_device().
*/
-void
-pci_disable_device(struct pci_dev *dev)
+void pci_disable_device(struct pci_dev *dev)
{
struct pci_devres *dr;
@@ -1534,6 +1541,7 @@ pci_disable_device(struct pci_dev *dev)
dev->is_busmaster = 0;
}
+EXPORT_SYMBOL(pci_disable_device);
/**
* pcibios_set_pcie_reset_state - set reset state for device dev
@@ -1562,6 +1570,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
{
return pcibios_set_pcie_reset_state(dev, state);
}
+EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
/**
* pci_check_pme_status - Check if given device has generated PME.
@@ -1641,6 +1650,7 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
return !!(dev->pme_support & (1 << state));
}
+EXPORT_SYMBOL(pci_pme_capable);
static void pci_pme_list_scan(struct work_struct *work)
{
@@ -1745,6 +1755,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
}
+EXPORT_SYMBOL(pci_pme_active);
/**
* __pci_enable_wake - enable PCI device as wakeup event source
@@ -1830,6 +1841,7 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable)
pci_enable_wake(dev, PCI_D3cold, enable) :
pci_enable_wake(dev, PCI_D3hot, enable);
}
+EXPORT_SYMBOL(pci_wake_from_d3);
/**
* pci_target_state - find an appropriate low power state for a given PCI dev
@@ -1908,6 +1920,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
return error;
}
+EXPORT_SYMBOL(pci_prepare_to_sleep);
/**
* pci_back_from_sleep - turn PCI device on during system-wide transition into working state
@@ -1920,6 +1933,7 @@ int pci_back_from_sleep(struct pci_dev *dev)
pci_enable_wake(dev, PCI_D0, false);
return pci_set_power_state(dev, PCI_D0);
}
+EXPORT_SYMBOL(pci_back_from_sleep);
/**
* pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
@@ -2415,8 +2429,7 @@ u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
return (((pin - 1) + slot) % 4) + 1;
}
-int
-pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
{
u8 pin;
@@ -2478,6 +2491,7 @@ void pci_release_region(struct pci_dev *pdev, int bar)
if (dr)
dr->region_mask &= ~(1 << bar);
}
+EXPORT_SYMBOL(pci_release_region);
/**
* __pci_request_region - Reserved PCI I/O and memory resource
@@ -2498,8 +2512,8 @@ void pci_release_region(struct pci_dev *pdev, int bar)
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
-static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
- int exclusive)
+static int __pci_request_region(struct pci_dev *pdev, int bar,
+ const char *res_name, int exclusive)
{
struct pci_devres *dr;
@@ -2510,8 +2524,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n
if (!request_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name))
goto err_out;
- }
- else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
+ } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
if (!__request_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar), res_name,
exclusive))
@@ -2548,6 +2561,7 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
{
return __pci_request_region(pdev, bar, res_name, 0);
}
+EXPORT_SYMBOL(pci_request_region);
/**
* pci_request_region_exclusive - Reserved PCI I/O and memory resource
@@ -2567,10 +2581,13 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
* explicitly not allowed to map the resource via /dev/mem or
* sysfs.
*/
-int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
+int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
+ const char *res_name)
{
return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
}
+EXPORT_SYMBOL(pci_request_region_exclusive);
+
/**
* pci_release_selected_regions - Release selected PCI I/O and memory resources
* @pdev: PCI device whose resources were previously reserved
@@ -2587,9 +2604,10 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
if (bars & (1 << i))
pci_release_region(pdev, i);
}
+EXPORT_SYMBOL(pci_release_selected_regions);
static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name, int excl)
+ const char *res_name, int excl)
{
int i;
@@ -2600,7 +2618,7 @@ static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
return 0;
err_out:
- while(--i >= 0)
+ while (--i >= 0)
if (bars & (1 << i))
pci_release_region(pdev, i);
@@ -2619,13 +2637,15 @@ int pci_request_selected_regions(struct pci_dev *pdev, int bars,
{
return __pci_request_selected_regions(pdev, bars, res_name, 0);
}
+EXPORT_SYMBOL(pci_request_selected_regions);
-int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
- int bars, const char *res_name)
+int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
+ const char *res_name)
{
return __pci_request_selected_regions(pdev, bars, res_name,
IORESOURCE_EXCLUSIVE);
}
+EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
/**
* pci_release_regions - Release reserved PCI I/O and memory resources
@@ -2640,6 +2660,7 @@ void pci_release_regions(struct pci_dev *pdev)
{
pci_release_selected_regions(pdev, (1 << 6) - 1);
}
+EXPORT_SYMBOL(pci_release_regions);
/**
* pci_request_regions - Reserved PCI I/O and memory resources
@@ -2658,6 +2679,7 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
}
+EXPORT_SYMBOL(pci_request_regions);
/**
* pci_request_regions_exclusive - Reserved PCI I/O and memory resources
@@ -2680,6 +2702,7 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
return pci_request_selected_regions_exclusive(pdev,
((1 << 6) - 1), res_name);
}
+EXPORT_SYMBOL(pci_request_regions_exclusive);
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
@@ -2749,6 +2772,7 @@ void pci_set_master(struct pci_dev *dev)
__pci_set_master(dev, true);
pcibios_set_master(dev);
}
+EXPORT_SYMBOL(pci_set_master);
/**
* pci_clear_master - disables bus-mastering for device dev
@@ -2758,6 +2782,7 @@ void pci_clear_master(struct pci_dev *dev)
{
__pci_set_master(dev, false);
}
+EXPORT_SYMBOL(pci_clear_master);
/**
* pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
@@ -2790,30 +2815,13 @@ int pci_set_cacheline_size(struct pci_dev *dev)
if (cacheline_size == pci_cache_line_size)
return 0;
- dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
- "supported\n", pci_cache_line_size << 2);
+ dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
+ pci_cache_line_size << 2);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
-#ifdef PCI_DISABLE_MWI
-int pci_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-int pci_try_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-void pci_clear_mwi(struct pci_dev *dev)
-{
-}
-
-#else
-
/**
* pci_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
@@ -2822,9 +2830,11 @@ void pci_clear_mwi(struct pci_dev *dev)
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-int
-pci_set_mwi(struct pci_dev *dev)
+int pci_set_mwi(struct pci_dev *dev)
{
+#ifdef PCI_DISABLE_MWI
+ return 0;
+#else
int rc;
u16 cmd;
@@ -2833,14 +2843,15 @@ pci_set_mwi(struct pci_dev *dev)
return rc;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
- if (! (cmd & PCI_COMMAND_INVALIDATE)) {
+ if (!(cmd & PCI_COMMAND_INVALIDATE)) {
dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
cmd |= PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
-
return 0;
+#endif
}
+EXPORT_SYMBOL(pci_set_mwi);
/**
* pci_try_set_mwi - enables memory-write-invalidate PCI transaction
@@ -2853,9 +2864,13 @@ pci_set_mwi(struct pci_dev *dev)
*/
int pci_try_set_mwi(struct pci_dev *dev)
{
- int rc = pci_set_mwi(dev);
- return rc;
+#ifdef PCI_DISABLE_MWI
+ return 0;
+#else
+ return pci_set_mwi(dev);
+#endif
}
+EXPORT_SYMBOL(pci_try_set_mwi);
/**
* pci_clear_mwi - disables Memory-Write-Invalidate for device dev
@@ -2863,9 +2878,9 @@ int pci_try_set_mwi(struct pci_dev *dev)
*
* Disables PCI Memory-Write-Invalidate transaction on the device
*/
-void
-pci_clear_mwi(struct pci_dev *dev)
+void pci_clear_mwi(struct pci_dev *dev)
{
+#ifndef PCI_DISABLE_MWI
u16 cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
@@ -2873,8 +2888,9 @@ pci_clear_mwi(struct pci_dev *dev)
cmd &= ~PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
+#endif
}
-#endif /* ! PCI_DISABLE_MWI */
+EXPORT_SYMBOL(pci_clear_mwi);
/**
* pci_intx - enables/disables PCI INTx for device dev
@@ -2883,18 +2899,16 @@ pci_clear_mwi(struct pci_dev *dev)
*
* Enables/disables PCI INTx for device dev
*/
-void
-pci_intx(struct pci_dev *pdev, int enable)
+void pci_intx(struct pci_dev *pdev, int enable)
{
u16 pci_command, new;
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
- if (enable) {
+ if (enable)
new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
- } else {
+ else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- }
if (new != pci_command) {
struct pci_devres *dr;
@@ -2908,6 +2922,7 @@ pci_intx(struct pci_dev *pdev, int enable)
}
}
}
+EXPORT_SYMBOL_GPL(pci_intx);
/**
* pci_intx_mask_supported - probe for INTx masking support
@@ -2937,8 +2952,8 @@ bool pci_intx_mask_supported(struct pci_dev *dev)
* go ahead and check it.
*/
if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
- dev_err(&dev->dev, "Command register changed from "
- "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
+ dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
+ orig, new);
} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
mask_supported = true;
pci_write_config_word(dev, PCI_COMMAND, orig);
@@ -3120,12 +3135,16 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
if (probe)
return 0;
- /* Wait for Transaction Pending bit clean */
- if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
+ /*
+ * Wait for Transaction Pending bit to clear. A word-aligned test
+ * is used, so we use the conrol offset rather than status and shift
+ * the test bit to match.
+ */
+ if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
+ PCI_AF_STATUS_TP << 8))
goto clear;
- dev_err(&dev->dev, "transaction is not cleared; "
- "proceeding with reset anyway\n");
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
clear:
pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
@@ -3179,14 +3198,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return 0;
}
-/**
- * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
- * @dev: Bridge device
- *
- * Use the bridge control register to assert reset on the secondary bus.
- * Devices on the secondary bus are left in power-on state.
- */
-void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
+void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
@@ -3211,6 +3223,18 @@ void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
*/
ssleep(1);
}
+
+/**
+ * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
+ * @dev: Bridge device
+ *
+ * Use the bridge control register to assert reset on the secondary bus.
+ * Devices on the secondary bus are left in power-on state.
+ */
+void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
+{
+ pcibios_reset_secondary_bus(dev);
+}
EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
@@ -4095,6 +4119,7 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
bars |= (1 << i);
return bars;
}
+EXPORT_SYMBOL(pci_select_bars);
/**
* pci_resource_bar - get position of the BAR associated with a resource
@@ -4134,7 +4159,7 @@ void __init pci_register_set_vga_state(arch_set_vga_state_t func)
}
static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
- unsigned int command_bits, u32 flags)
+ unsigned int command_bits, u32 flags)
{
if (arch_set_vga_state)
return arch_set_vga_state(dev, decode, command_bits,
@@ -4246,11 +4271,10 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
bus == dev->bus->number &&
slot == PCI_SLOT(dev->devfn) &&
func == PCI_FUNC(dev->devfn)) {
- if (align_order == -1) {
+ if (align_order == -1)
align = PAGE_SIZE;
- } else {
+ else
align = 1 << align_order;
- }
/* Found */
break;
}
@@ -4368,7 +4392,6 @@ static int __init pci_resource_alignment_sysfs_init(void)
return bus_create_file(&pci_bus_type,
&bus_attr_resource_alignment);
}
-
late_initcall(pci_resource_alignment_sysfs_init);
static void pci_no_domains(void)
@@ -4447,41 +4470,3 @@ static int __init pci_setup(char *str)
return 0;
}
early_param("pci", pci_setup);
-
-EXPORT_SYMBOL(pci_reenable_device);
-EXPORT_SYMBOL(pci_enable_device_io);
-EXPORT_SYMBOL(pci_enable_device_mem);
-EXPORT_SYMBOL(pci_enable_device);
-EXPORT_SYMBOL(pcim_enable_device);
-EXPORT_SYMBOL(pcim_pin_device);
-EXPORT_SYMBOL(pci_disable_device);
-EXPORT_SYMBOL(pci_find_capability);
-EXPORT_SYMBOL(pci_bus_find_capability);
-EXPORT_SYMBOL(pci_release_regions);
-EXPORT_SYMBOL(pci_request_regions);
-EXPORT_SYMBOL(pci_request_regions_exclusive);
-EXPORT_SYMBOL(pci_release_region);
-EXPORT_SYMBOL(pci_request_region);
-EXPORT_SYMBOL(pci_request_region_exclusive);
-EXPORT_SYMBOL(pci_release_selected_regions);
-EXPORT_SYMBOL(pci_request_selected_regions);
-EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
-EXPORT_SYMBOL(pci_set_master);
-EXPORT_SYMBOL(pci_clear_master);
-EXPORT_SYMBOL(pci_set_mwi);
-EXPORT_SYMBOL(pci_try_set_mwi);
-EXPORT_SYMBOL(pci_clear_mwi);
-EXPORT_SYMBOL_GPL(pci_intx);
-EXPORT_SYMBOL(pci_assign_resource);
-EXPORT_SYMBOL(pci_find_parent_resource);
-EXPORT_SYMBOL(pci_select_bars);
-
-EXPORT_SYMBOL(pci_set_power_state);
-EXPORT_SYMBOL(pci_save_state);
-EXPORT_SYMBOL(pci_restore_state);
-EXPORT_SYMBOL(pci_pme_capable);
-EXPORT_SYMBOL(pci_pme_active);
-EXPORT_SYMBOL(pci_wake_from_d3);
-EXPORT_SYMBOL(pci_prepare_to_sleep);
-EXPORT_SYMBOL(pci_back_from_sleep);
-EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 587e7e853107..182224acedbe 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -397,16 +397,14 @@ static int aer_inject(struct aer_error_inj *einj)
if (!aer_mask_override && einj->cor_status &&
!(einj->cor_status & ~cor_mask)) {
ret = -EINVAL;
- printk(KERN_WARNING "The correctable error(s) is masked "
- "by device\n");
+ printk(KERN_WARNING "The correctable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
if (!aer_mask_override && einj->uncor_status &&
!(einj->uncor_status & ~uncor_mask)) {
ret = -EINVAL;
- printk(KERN_WARNING "The uncorrectable error(s) is masked "
- "by device\n");
+ printk(KERN_WARNING "The uncorrectable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
@@ -464,8 +462,7 @@ static int aer_inject(struct aer_error_inj *einj)
goto out_put;
}
aer_irq(-1, edev);
- }
- else
+ } else
ret = -EINVAL;
out_put:
kfree(err_alloc);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index b2c8881da764..5653ea94547f 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -542,8 +542,7 @@ static void aer_recover_work_func(struct work_struct *work);
#define AER_RECOVER_RING_ORDER 4
#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
-struct aer_recover_entry
-{
+struct aer_recover_entry {
u8 bus;
u8 devfn;
u16 domain;
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 34ff7026440c..36ed31b52198 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -172,9 +172,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
int id = ((dev->bus->number << 8) | dev->devfn);
if (!info->status) {
- dev_err(&dev->dev,
- "PCIe Bus Error: severity=%s, type=Unaccessible, "
- "id=%04x(Unregistered Agent ID)\n",
+ dev_err(&dev->dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n",
aer_error_severity_string[info->severity], id);
goto out;
}
@@ -182,13 +180,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- dev_err(&dev->dev,
- "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ dev_err(&dev->dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], id, aer_agent_string[agent]);
- dev_err(&dev->dev,
- " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ dev_err(&dev->dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
dev->vendor, dev->device,
info->status, info->mask);
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index bbc3bdd2b189..82e06a86cd77 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -199,8 +199,7 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
* assuming that the PME was reported by a PCIe-PCI bridge that
* used devfn different from zero.
*/
- dev_dbg(&port->dev, "PME interrupt generated for "
- "non-existent device %02x:%02x.%d\n",
+ dev_dbg(&port->dev, "PME interrupt generated for non-existent device %02x:%02x.%d\n",
busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
found = pcie_pme_from_pci_bridge(bus, 0);
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 0d8fdc48e642..80887eaa0668 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -204,8 +204,8 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
return -ENODEV;
if (!dev->irq && dev->pin) {
- dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
- "check vendor BIOS\n", dev->vendor, dev->device);
+ dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; check vendor BIOS\n",
+ dev->vendor, dev->device);
}
status = pcie_port_device_register(dev);
if (status)
@@ -397,7 +397,7 @@ static struct pci_driver pcie_portdriver = {
static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
{
pr_notice("%s detected: will not use MSI for PCIe PME signaling\n",
- d->ident);
+ d->ident);
pcie_pme_disable_msi();
return 0;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2bbf5221afb3..e3cf8a2e6292 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -168,7 +168,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
* Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
*/
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int pos)
+ struct resource *res, unsigned int pos)
{
u32 l, sz, mask;
u64 l64, sz64, mask64;
@@ -433,8 +433,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
limit |= ((unsigned long) mem_limit_hi) << 32;
#else
if (mem_base_hi || mem_limit_hi) {
- dev_err(&dev->dev, "can't handle 64-bit "
- "address space for bridge\n");
+ dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n");
return;
}
#endif
@@ -604,7 +603,6 @@ static enum pci_bus_speed agp_speed(int agp3, int agpstat)
return agp_speeds[index];
}
-
static void pci_set_bus_speed(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
@@ -636,11 +634,10 @@ static void pci_set_bus_speed(struct pci_bus *bus)
} else if (status & PCI_X_SSTATUS_266MHZ) {
max = PCI_SPEED_133MHz_PCIX_266;
} else if (status & PCI_X_SSTATUS_133MHZ) {
- if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
+ if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
max = PCI_SPEED_133MHz_PCIX_ECC;
- } else {
+ else
max = PCI_SPEED_133MHz_PCIX;
- }
} else {
max = PCI_SPEED_66MHz_PCIX;
}
@@ -664,7 +661,6 @@ static void pci_set_bus_speed(struct pci_bus *bus)
}
}
-
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -729,7 +725,8 @@ add_dev:
return child;
}
-struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
+struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
+ int busnr)
{
struct pci_bus *child;
@@ -741,6 +738,7 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int
}
return child;
}
+EXPORT_SYMBOL(pci_add_new_bus);
/*
* If it's a bridge, configure it and scan the bus behind it.
@@ -887,7 +885,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
* as cards with a PCI-to-PCI bridge can be
* inserted later.
*/
- for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
+ for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
struct pci_bus *parent = bus;
if (pci_find_bus(pci_domain_nr(bus),
max+i+1))
@@ -934,8 +932,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
(child->number > bus->busn_res.end) ||
(child->number < bus->number) ||
(child->busn_res.end < bus->number)) {
- dev_info(&child->dev, "%pR %s "
- "hidden behind%s bridge %s %pR\n",
+ dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
&child->busn_res,
(bus->number > child->busn_res.end &&
bus->busn_res.end < child->number) ?
@@ -952,6 +949,7 @@ out:
return max;
}
+EXPORT_SYMBOL(pci_scan_bridge);
/*
* Read interrupt line and base address registers.
@@ -992,7 +990,6 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pdev->is_hotplug_bridge = 1;
}
-
/**
* pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
* @dev: PCI device
@@ -1225,13 +1222,13 @@ int pci_setup_device(struct pci_dev *dev)
break;
default: /* unknown header */
- dev_err(&dev->dev, "unknown header type %02x, "
- "ignoring device\n", dev->hdr_type);
+ dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
+ dev->hdr_type);
return -EIO;
bad:
- dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
- "type %02x)\n", dev->class, dev->hdr_type);
+ dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
+ dev->class, dev->hdr_type);
dev->class = PCI_CLASS_NOT_DEFINED;
}
@@ -1283,7 +1280,7 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
EXPORT_SYMBOL(pci_alloc_dev);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
- int crs_timeout)
+ int crs_timeout)
{
int delay = 1;
@@ -1306,10 +1303,9 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
return false;
/* Card hasn't responded in 60 seconds? Must be stuck. */
if (delay > crs_timeout) {
- printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
- "responding\n", pci_domain_nr(bus),
- bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
+ printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
return false;
}
}
@@ -1519,6 +1515,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
return nr;
}
+EXPORT_SYMBOL(pci_scan_slot);
static int pcie_find_smpss(struct pci_dev *dev, void *data)
{
@@ -1613,9 +1610,7 @@ static void pcie_write_mrrs(struct pci_dev *dev)
}
if (mrrs < 128)
- dev_err(&dev->dev, "MRRS was unable to be configured with a "
- "safe value. If problems are experienced, try running "
- "with pci=pcie_bus_safe.\n");
+ dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
}
static void pcie_bus_detect_mps(struct pci_dev *dev)
@@ -1652,8 +1647,8 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
pcie_write_mps(dev, mps);
pcie_write_mrrs(dev);
- dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), "
- "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
+ dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
+ pcie_get_mps(dev), 128 << dev->pcie_mpss,
orig_mps, pcie_get_readrq(dev));
return 0;
@@ -1716,7 +1711,7 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
bus->is_added = 1;
}
- for (pass=0; pass < 2; pass++)
+ for (pass = 0; pass < 2; pass++)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (pci_is_bridge(dev))
max = pci_scan_bridge(bus, dev, max, pass);
@@ -1732,6 +1727,7 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
return max;
}
+EXPORT_SYMBOL_GPL(pci_scan_child_bus);
/**
* pcibios_root_bridge_prepare - Platform-specific host bridge setup.
@@ -2040,11 +2036,6 @@ unsigned int pci_rescan_bus(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pci_rescan_bus);
-EXPORT_SYMBOL(pci_add_new_bus);
-EXPORT_SYMBOL(pci_scan_slot);
-EXPORT_SYMBOL(pci_scan_bridge);
-EXPORT_SYMBOL_GPL(pci_scan_child_bus);
-
/*
* pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
* routines should always be executed under this mutex.
@@ -2063,7 +2054,8 @@ void pci_unlock_rescan_remove(void)
}
EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
-static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
+static int __init pci_sort_bf_cmp(const struct device *d_a,
+ const struct device *d_b)
{
const struct pci_dev *a = to_pci_dev(d_a);
const struct pci_dev *b = to_pci_dev(d_b);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 46d1378f2e9e..3f155e78513f 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -17,15 +17,14 @@
static int proc_initialized; /* = 0 */
-static loff_t
-proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
+static loff_t proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
{
struct pci_dev *dev = PDE_DATA(file_inode(file));
return fixed_size_llseek(file, off, whence, dev->cfg_size);
}
-static ssize_t
-proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
struct pci_dev *dev = PDE_DATA(file_inode(file));
unsigned int pos = *ppos;
@@ -108,8 +107,8 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
return nbytes;
}
-static ssize_t
-proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
struct inode *ino = file_inode(file);
struct pci_dev *dev = PDE_DATA(ino);
@@ -413,7 +412,7 @@ int pci_proc_detach_device(struct pci_dev *dev)
return 0;
}
-int pci_proc_detach_bus(struct pci_bus* bus)
+int pci_proc_detach_bus(struct pci_bus *bus)
{
proc_remove(bus->procdir);
return 0;
@@ -423,6 +422,7 @@ static int proc_bus_pci_dev_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_bus_pci_devices_op);
}
+
static const struct file_operations proc_bus_pci_dev_operations = {
.owner = THIS_MODULE,
.open = proc_bus_pci_dev_open,
@@ -443,6 +443,4 @@ static int __init pci_proc_init(void)
return 0;
}
-
device_initcall(pci_proc_init);
-
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 92e68c7747f7..d0f69269eb6c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -48,8 +48,8 @@ static void quirk_mellanox_tavor(struct pci_dev *dev)
{
dev->broken_parity_status = 1; /* This device gives false positives */
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
/* Deal with broken BIOSes that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */
@@ -82,7 +82,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p
static void quirk_isa_dma_hangs(struct pci_dev *dev)
{
if (!isa_dma_bridge_buggy) {
- isa_dma_bridge_buggy=1;
+ isa_dma_bridge_buggy = 1;
dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n");
}
}
@@ -123,7 +123,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk
*/
static void quirk_nopcipci(struct pci_dev *dev)
{
- if ((pci_pci_problems & PCIPCI_FAIL)==0) {
+ if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_FAIL;
}
@@ -148,7 +148,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopci
*/
static void quirk_triton(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_TRITON)==0) {
+ if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_TRITON;
}
@@ -163,8 +163,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_
* Made according to a windows driver based patch by George E. Breese
* see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
* and http://www.georgebreese.com/net/software/#PCI
- * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
- * the info on which Mr Breese based his work.
+ * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
+ * the info on which Mr Breese based his work.
*
* Updated based on further information from the site and also on
* information provided by VIA
@@ -177,14 +177,14 @@ static void quirk_vialatency(struct pci_dev *dev)
a buggy southbridge */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
- if (p!=NULL) {
+ if (p != NULL) {
/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
/* Check for buggy part revisions */
if (p->revision < 0x40 || p->revision > 0x42)
goto exit;
} else {
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
- if (p==NULL) /* No problem parts */
+ if (p == NULL) /* No problem parts */
goto exit;
/* Check for buggy part revisions */
if (p->revision < 0x10 || p->revision > 0x12)
@@ -227,7 +227,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_viala
*/
static void quirk_viaetbf(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_VIAETBF)==0) {
+ if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VIAETBF;
}
@@ -236,7 +236,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_via
static void quirk_vsfx(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_VSFX)==0) {
+ if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VSFX;
}
@@ -251,7 +251,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx)
*/
static void quirk_alimagik(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
+ if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
}
@@ -265,7 +265,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagi
*/
static void quirk_natoma(struct pci_dev *dev)
{
- if ((pci_pci_problems&PCIPCI_NATOMA)==0) {
+ if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_NATOMA;
}
@@ -315,8 +315,7 @@ static void quirk_cs5536_vsa(struct pci_dev *dev)
if (pci_resource_len(dev, 0) != 8) {
struct resource *res = &dev->resource[0];
res->end = res->start + 8 - 1;
- dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
- "(incorrect header); workaround applied.\n");
+ dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
@@ -400,7 +399,8 @@ static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int p
* let's get enough confirmation reports first.
*/
base &= -size;
- dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
+ dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base,
+ base + size - 1);
}
static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
@@ -425,7 +425,8 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
* reserve it, but let's get enough confirmation reports first.
*/
base &= -size;
- dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
+ dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base,
+ base + size - 1);
}
/*
@@ -668,8 +669,7 @@ static void quirk_xio2000a(struct pci_dev *dev)
struct pci_dev *pdev;
u16 command;
- dev_warn(&dev->dev, "TI XIO2000a quirk detected; "
- "secondary bus fast back-to-back transfers disabled\n");
+ dev_warn(&dev->dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
pci_read_config_word(pdev, PCI_COMMAND, &command);
if (command & PCI_COMMAND_FAST_BACK)
@@ -703,7 +703,7 @@ static void quirk_via_ioapic(struct pci_dev *dev)
tmp == 0 ? "Disa" : "Ena");
/* Offset 0x58: External APIC IRQ output control */
- pci_write_config_byte (dev, 0x58, tmp);
+ pci_write_config_byte(dev, 0x58, tmp);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
@@ -761,8 +761,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw);
static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
{
if (dev->subordinate && dev->revision <= 0x12) {
- dev_info(&dev->dev, "AMD8131 rev %x detected; "
- "disabling PCI-X MMRBC\n", dev->revision);
+ dev_info(&dev->dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
+ dev->revision);
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
}
}
@@ -916,12 +916,12 @@ static void quirk_amd_ordering(struct pci_dev *dev)
{
u32 pcic;
pci_read_config_dword(dev, 0x4C, &pcic);
- if ((pcic&6)!=6) {
+ if ((pcic & 6) != 6) {
pcic |= 6;
dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
pci_write_config_dword(dev, 0x4C, pcic);
pci_read_config_dword(dev, 0x84, &pcic);
- pcic |= (1<<23); /* Required in this mode */
+ pcic |= (1 << 23); /* Required in this mode */
pci_write_config_dword(dev, 0x84, pcic);
}
}
@@ -937,7 +937,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C
*/
static void quirk_dunord(struct pci_dev *dev)
{
- struct resource *r = &dev->resource [1];
+ struct resource *r = &dev->resource[1];
r->flags |= IORESOURCE_UNSET;
r->start = 0;
@@ -967,11 +967,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge)
static void quirk_mediagx_master(struct pci_dev *dev)
{
u8 reg;
+
pci_read_config_byte(dev, 0x41, &reg);
if (reg & 2) {
reg &= ~2;
- dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg);
- pci_write_config_byte(dev, 0x41, reg);
+ dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
+ reg);
+ pci_write_config_byte(dev, 0x41, reg);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
@@ -1120,7 +1122,7 @@ static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
{
if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x8025: /* P4B-LX */
case 0x8070: /* P4B */
case 0x8088: /* P4B533 */
@@ -1128,14 +1130,14 @@ static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x80b1: /* P4GE-V */
case 0x80b2: /* P4PE */
case 0x8093: /* P4B533-V */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x8030: /* P4T533 */
asus_hides_smbus = 1;
}
@@ -1175,7 +1177,7 @@ static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x088C: /* HP Compaq nc8000 */
case 0x0890: /* HP Compaq nc6000 */
asus_hides_smbus = 1;
@@ -1192,20 +1194,20 @@ static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
case 0x12bf: /* HP xw4100 */
asus_hides_smbus = 1;
}
- } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
- if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
- switch(dev->subsystem_device) {
- case 0xC00C: /* Samsung P35 notebook */
- asus_hides_smbus = 1;
- }
+ } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
+ if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+ switch (dev->subsystem_device) {
+ case 0xC00C: /* Samsung P35 notebook */
+ asus_hides_smbus = 1;
+ }
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x0058: /* Compaq Evo N620c */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs, therefore checking
@@ -1213,7 +1215,7 @@ static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
- switch(dev->subsystem_device) {
+ switch (dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
case 0x00ba: /* Compaq Evo D510 USDT */
@@ -1261,7 +1263,8 @@ static void asus_hides_smbus_lpc(struct pci_dev *dev)
pci_write_config_word(dev, 0xF2, val & (~0x8));
pci_read_config_word(dev, 0xF2, &val);
if (val & 0x8)
- dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val);
+ dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
+ val);
else
dev_info(&dev->dev, "Enabled i801 SMBus device\n");
}
@@ -1409,7 +1412,8 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev)
pci_write_config_byte(dev, 0x50, val & (~0xc0));
pci_read_config_byte(dev, 0x50, &val);
if (val & 0xc0)
- dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val);
+ dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
+ val);
else
dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n");
}
@@ -1514,10 +1518,8 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
/* The next five BARs all seem to be rubbish, so just clean
* them out */
- for (i=1; i < 6; i++) {
+ for (i = 1; i < 6; i++)
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
- }
-
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
#endif
@@ -1552,7 +1554,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pci
* Some Intel PCI Express chipsets have trouble with downstream
* device power management.
*/
-static void quirk_intel_pcie_pm(struct pci_dev * dev)
+static void quirk_intel_pcie_pm(struct pci_dev *dev)
{
pci_pm_d3_delay = 120;
dev->no_d1d2 = 1;
@@ -1721,8 +1723,8 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
if (!pci_config_word) {
- dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] "
- "already disabled\n", dev->vendor, dev->device);
+ dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] already disabled\n",
+ dev->vendor, dev->device);
return;
}
pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
@@ -1770,8 +1772,7 @@ static void quirk_plx_pci9050(struct pci_dev *dev)
if (pci_resource_len(dev, bar) == 0x80 &&
(pci_resource_start(dev, bar) & 0x80)) {
struct resource *r = &dev->resource[bar];
- dev_info(&dev->dev,
- "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
+ dev_info(&dev->dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
bar);
r->flags |= IORESOURCE_UNSET;
r->start = 0;
@@ -1818,9 +1819,7 @@ static void quirk_netmos(struct pci_dev *dev)
case PCI_DEVICE_ID_NETMOS_9845:
case PCI_DEVICE_ID_NETMOS_9855:
if (num_parallel) {
- dev_info(&dev->dev, "Netmos %04x (%u parallel, "
- "%u serial); changing class SERIAL to OTHER "
- "(use parport_serial)\n",
+ dev_info(&dev->dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
dev->device, num_parallel, num_serial);
dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
(dev->class & 0xff);
@@ -1887,8 +1886,7 @@ static void quirk_e100_interrupt(struct pci_dev *dev)
cmd_hi = readb(csr + 3);
if (cmd_hi == 0) {
- dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; "
- "disabling\n");
+ dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; disabling\n");
writeb(1, csr + 3);
}
@@ -1958,8 +1956,7 @@ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
if (!(b & 0x20)) {
pci_write_config_byte(dev, 0xf41, b | 0x20);
- dev_info(&dev->dev,
- "Linking AER extended capability\n");
+ dev_info(&dev->dev, "Linking AER extended capability\n");
}
}
}
@@ -1997,8 +1994,7 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
/* Turn off PCI Bus Parking */
pci_write_config_byte(dev, 0x76, b ^ 0x40);
- dev_info(&dev->dev,
- "Disabling VIA CX700 PCI parking\n");
+ dev_info(&dev->dev, "Disabling VIA CX700 PCI parking\n");
}
}
@@ -2013,8 +2009,7 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
/* Disable "Read FIFO Timer" */
pci_write_config_byte(dev, 0x77, 0x0);
- dev_info(&dev->dev,
- "Disabling VIA CX700 PCI caching\n");
+ dev_info(&dev->dev, "Disabling VIA CX700 PCI caching\n");
}
}
}
@@ -2149,8 +2144,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disab
static void quirk_disable_msi(struct pci_dev *dev)
{
if (dev->subordinate) {
- dev_warn(&dev->dev, "MSI quirk detected; "
- "subordinate MSI disabled\n");
+ dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
@@ -2189,8 +2183,7 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
- &flags) == 0)
- {
+ &flags) == 0) {
dev_info(&dev->dev, "Found %s HT MSI Mapping\n",
flags & HT_MSI_FLAGS_ENABLE ?
"enabled" : "disabled");
@@ -2207,8 +2200,7 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
static void quirk_msi_ht_cap(struct pci_dev *dev)
{
if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
- dev_warn(&dev->dev, "MSI quirk detected; "
- "subordinate MSI disabled\n");
+ dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
@@ -2232,8 +2224,7 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
if (!pdev)
return;
if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
- dev_warn(&dev->dev, "MSI quirk detected; "
- "subordinate MSI disabled\n");
+ dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
pci_dev_put(pdev);
@@ -2279,8 +2270,7 @@ static void nvenet_msi_disable(struct pci_dev *dev)
if (board_name &&
(strstr(board_name, "P5N32-SLI PREMIUM") ||
strstr(board_name, "P5N32-E SLI"))) {
- dev_info(&dev->dev,
- "Disabling msi for MCP55 NIC on P5N32-SLI\n");
+ dev_info(&dev->dev, "Disabling msi for MCP55 NIC on P5N32-SLI\n");
dev->no_msi = 1;
}
}
@@ -2489,8 +2479,7 @@ static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
*/
host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (host_bridge == NULL) {
- dev_warn(&dev->dev,
- "nv_msi_ht_cap_quirk didn't locate host bridge\n");
+ dev_warn(&dev->dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
return;
}
@@ -2817,8 +2806,7 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
*/
err = pci_read_config_word(dev, 0x48, &rcc);
if (err) {
- dev_err(&dev->dev, "Error attempting to read the read "
- "completion coalescing register.\n");
+ dev_err(&dev->dev, "Error attempting to read the read completion coalescing register\n");
return;
}
@@ -2829,13 +2817,11 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
err = pci_write_config_word(dev, 0x48, rcc);
if (err) {
- dev_err(&dev->dev, "Error attempting to write the read "
- "completion coalescing register.\n");
+ dev_err(&dev->dev, "Error attempting to write the read completion coalescing register\n");
return;
}
- pr_info_once("Read completion coalescing disabled due to hardware "
- "errata relating to 256B MPS.\n");
+ pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n");
}
/* Intel 5000 series memory controllers and ports 2-7 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
@@ -2944,8 +2930,7 @@ static void disable_igfx_irq(struct pci_dev *dev)
/* Check if any interrupt line is still enabled */
if (readl(regs + I915_DEIER_REG) != 0) {
- dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; "
- "disabling\n");
+ dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
writel(0, regs + I915_DEIER_REG);
}
@@ -3040,7 +3025,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
{
struct pci_fixup *start, *end;
- switch(pass) {
+ switch (pass) {
case pci_fixup_early:
start = __start_pci_fixups_early;
end = __end_pci_fixups_early;
@@ -3112,8 +3097,8 @@ static int __init pci_apply_final_quirks(void)
if (!tmp || cls == tmp)
continue;
- printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), "
- "using %u bytes\n", cls << 2, tmp << 2,
+ printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
+ cls << 2, tmp << 2,
pci_dfl_cache_line_size << 2);
pci_cache_line_size = pci_dfl_cache_line_size;
}
@@ -3342,6 +3327,85 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
return -ENOTTY;
}
+static void quirk_dma_func0_alias(struct pci_dev *dev)
+{
+ if (PCI_FUNC(dev->devfn) != 0) {
+ dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ }
+}
+
+/*
+ * https://bugzilla.redhat.com/show_bug.cgi?id=605888
+ *
+ * Some Ricoh devices use function 0 as the PCIe requester ID for DMA.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
+
+static void quirk_dma_func1_alias(struct pci_dev *dev)
+{
+ if (PCI_FUNC(dev->devfn) != 1) {
+ dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1);
+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ }
+}
+
+/*
+ * Marvell 88SE9123 uses function 1 as the requester ID for DMA. In some
+ * SKUs function 1 is present and is a legacy IDE controller, in other
+ * SKUs this function is not present, making this a ghost requester.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=42679
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
+ quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
+ quirk_dma_func1_alias);
+/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
+ PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+ quirk_dma_func1_alias);
+
+/*
+ * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
+ * using the wrong DMA alias for the device. Some of these devices can be
+ * used as either forward or reverse bridges, so we need to test whether the
+ * device is operating in the correct mode. We could probably apply this
+ * quirk to PCI_ANY_ID, but for now we'll just use known offenders. The test
+ * is for a non-root, non-PCIe bridge where the upstream device is PCIe and
+ * is not a PCIe-to-PCI bridge, then @pdev is actually a PCIe-to-PCI bridge.
+ */
+static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
+{
+ if (!pci_is_root_bus(pdev->bus) &&
+ pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
+ !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
+ pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
+ pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
+}
+/* ASM1083/1085, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c46 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
+ quirk_use_pcie_bridge_dma_alias);
+/* Tundra 8113, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c43 */
+DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
+/* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
+DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
+
static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
{
if (!PCI_FUNC(dev->devfn))
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index c1839450d4d6..f955edb9bea7 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -38,6 +38,7 @@ int pci_enable_rom(struct pci_dev *pdev)
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
return 0;
}
+EXPORT_SYMBOL_GPL(pci_enable_rom);
/**
* pci_disable_rom - disable ROM decoding for a PCI device
@@ -53,6 +54,7 @@ void pci_disable_rom(struct pci_dev *pdev)
rom_addr &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
}
+EXPORT_SYMBOL_GPL(pci_disable_rom);
/**
* pci_get_rom_size - obtain the actual size of the ROM image
@@ -135,7 +137,7 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
} else {
/* assign the ROM an address if it doesn't have one */
if (res->parent == NULL &&
- pci_assign_resource(pdev,PCI_ROM_RESOURCE))
+ pci_assign_resource(pdev, PCI_ROM_RESOURCE))
return NULL;
start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
@@ -166,6 +168,7 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
*size = pci_get_rom_size(pdev, rom, *size);
return rom;
}
+EXPORT_SYMBOL(pci_map_rom);
/**
* pci_unmap_rom - unmap the ROM from kernel space
@@ -187,6 +190,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
pci_disable_rom(pdev);
}
+EXPORT_SYMBOL(pci_unmap_rom);
/**
* pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy
@@ -199,7 +203,7 @@ void pci_cleanup_rom(struct pci_dev *pdev)
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
if (res->flags & IORESOURCE_ROM_COPY) {
- kfree((void*)(unsigned long)res->start);
+ kfree((void *)(unsigned long)res->start);
res->flags |= IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_ROM_COPY;
res->start = 0;
@@ -222,9 +226,4 @@ void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
return NULL;
}
-
-EXPORT_SYMBOL(pci_map_rom);
-EXPORT_SYMBOL(pci_unmap_rom);
-EXPORT_SYMBOL_GPL(pci_enable_rom);
-EXPORT_SYMBOL_GPL(pci_disable_rom);
EXPORT_SYMBOL(pci_platform_rom);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 8e495bda678f..827ad831f1dd 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -17,14 +17,100 @@ DECLARE_RWSEM(pci_bus_sem);
EXPORT_SYMBOL_GPL(pci_bus_sem);
/*
+ * pci_for_each_dma_alias - Iterate over DMA aliases for a device
+ * @pdev: starting downstream device
+ * @fn: function to call for each alias
+ * @data: opaque data to pass to @fn
+ *
+ * Starting @pdev, walk up the bus calling @fn for each possible alias
+ * of @pdev at the root bus.
+ */
+int pci_for_each_dma_alias(struct pci_dev *pdev,
+ int (*fn)(struct pci_dev *pdev,
+ u16 alias, void *data), void *data)
+{
+ struct pci_bus *bus;
+ int ret;
+
+ ret = fn(pdev, PCI_DEVID(pdev->bus->number, pdev->devfn), data);
+ if (ret)
+ return ret;
+
+ /*
+ * If the device is broken and uses an alias requester ID for
+ * DMA, iterate over that too.
+ */
+ if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) {
+ ret = fn(pdev, PCI_DEVID(pdev->bus->number,
+ pdev->dma_alias_devfn), data);
+ if (ret)
+ return ret;
+ }
+
+ for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
+ struct pci_dev *tmp;
+
+ /* Skip virtual buses */
+ if (!bus->self)
+ continue;
+
+ tmp = bus->self;
+
+ /*
+ * PCIe-to-PCI/X bridges alias transactions from downstream
+ * devices using the subordinate bus number (PCI Express to
+ * PCI/PCI-X Bridge Spec, rev 1.0, sec 2.3). For all cases
+ * where the upstream bus is PCI/X we alias to the bridge
+ * (there are various conditions in the previous reference
+ * where the bridge may take ownership of transactions, even
+ * when the secondary interface is PCI-X).
+ */
+ if (pci_is_pcie(tmp)) {
+ switch (pci_pcie_type(tmp)) {
+ case PCI_EXP_TYPE_ROOT_PORT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ continue;
+ case PCI_EXP_TYPE_PCI_BRIDGE:
+ ret = fn(tmp,
+ PCI_DEVID(tmp->subordinate->number,
+ PCI_DEVFN(0, 0)), data);
+ if (ret)
+ return ret;
+ continue;
+ case PCI_EXP_TYPE_PCIE_BRIDGE:
+ ret = fn(tmp,
+ PCI_DEVID(tmp->bus->number,
+ tmp->devfn), data);
+ if (ret)
+ return ret;
+ continue;
+ }
+ } else {
+ if (tmp->dev_flags & PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS)
+ ret = fn(tmp,
+ PCI_DEVID(tmp->subordinate->number,
+ PCI_DEVFN(0, 0)), data);
+ else
+ ret = fn(tmp,
+ PCI_DEVID(tmp->bus->number,
+ tmp->devfn), data);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
* find the upstream PCIe-to-PCI bridge of a PCI device
* if the device is PCIE, return NULL
* if the device isn't connected to a PCIe bridge (that is its parent is a
* legacy PCI bridge and the bridge is directly connected to bus 0), return its
* parent
*/
-struct pci_dev *
-pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
+struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
{
struct pci_dev *tmp = NULL;
@@ -56,12 +142,12 @@ static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
struct pci_bus *child;
struct pci_bus *tmp;
- if(bus->number == busnr)
+ if (bus->number == busnr)
return bus;
list_for_each_entry(tmp, &bus->children, node) {
child = pci_do_find_bus(tmp, busnr);
- if(child)
+ if (child)
return child;
}
return NULL;
@@ -76,7 +162,7 @@ static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
* in the global list of PCI buses. If the bus is found, a pointer to its
* data structure is returned. If no bus is found, %NULL is returned.
*/
-struct pci_bus * pci_find_bus(int domain, int busnr)
+struct pci_bus *pci_find_bus(int domain, int busnr)
{
struct pci_bus *bus = NULL;
struct pci_bus *tmp_bus;
@@ -90,6 +176,7 @@ struct pci_bus * pci_find_bus(int domain, int busnr)
}
return NULL;
}
+EXPORT_SYMBOL(pci_find_bus);
/**
* pci_find_next_bus - begin or continue searching for a PCI bus
@@ -100,8 +187,7 @@ struct pci_bus * pci_find_bus(int domain, int busnr)
* @from is not %NULL, searches continue from next device on the
* global list.
*/
-struct pci_bus *
-pci_find_next_bus(const struct pci_bus *from)
+struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
{
struct list_head *n;
struct pci_bus *b = NULL;
@@ -114,6 +200,7 @@ pci_find_next_bus(const struct pci_bus *from)
up_read(&pci_bus_sem);
return b;
}
+EXPORT_SYMBOL(pci_find_next_bus);
/**
* pci_get_slot - locate PCI device for a given PCI slot
@@ -147,6 +234,7 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn)
up_read(&pci_bus_sem);
return dev;
}
+EXPORT_SYMBOL(pci_get_slot);
/**
* pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot
@@ -251,6 +339,7 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
return pci_get_dev_by_id(&id, from);
}
+EXPORT_SYMBOL(pci_get_subsys);
/**
* pci_get_device - begin or continue searching for a PCI device by vendor/device id
@@ -266,11 +355,12 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
* from next device on the global list. The reference count for @from is
* always decremented if it is not %NULL.
*/
-struct pci_dev *
-pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
+struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
+ struct pci_dev *from)
{
return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
}
+EXPORT_SYMBOL(pci_get_device);
/**
* pci_get_class - begin or continue searching for a PCI device by class
@@ -299,6 +389,7 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
return pci_get_dev_by_id(&id, from);
}
+EXPORT_SYMBOL(pci_get_class);
/**
* pci_dev_present - Returns 1 if device matching the device list is present, 0 if not.
@@ -328,12 +419,3 @@ int pci_dev_present(const struct pci_device_id *ids)
return 0;
}
EXPORT_SYMBOL(pci_dev_present);
-
-/* For boot time work */
-EXPORT_SYMBOL(pci_find_bus);
-EXPORT_SYMBOL(pci_find_next_bus);
-/* For everyone */
-EXPORT_SYMBOL(pci_get_device);
-EXPORT_SYMBOL(pci_get_subsys);
-EXPORT_SYMBOL(pci_get_slot);
-EXPORT_SYMBOL(pci_get_class);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index fd9b545c3cf5..a5a63ecfb628 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -68,7 +68,7 @@ static int add_to_list(struct list_head *head,
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
- pr_warning("add_to_list: kmalloc() failed!\n");
+ pr_warn("add_to_list: kmalloc() failed!\n");
return -ENOMEM;
}
@@ -148,8 +148,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- panic("pdev_sort_resources(): "
- "kmalloc() failed!\n");
+ panic("pdev_sort_resources(): kmalloc() failed!\n");
tmp->res = r;
tmp->dev = dev;
@@ -736,7 +735,7 @@ static resource_size_t calculate_iosize(resource_size_t size,
{
if (size < min_size)
size = min_size;
- if (old_size == 1 )
+ if (old_size == 1)
old_size = 0;
/* To be fixed in 2.5: we should have sort of HAVE_ISA
flag in the struct pci_bus. */
@@ -757,7 +756,7 @@ static resource_size_t calculate_memsize(resource_size_t size,
{
if (size < min_size)
size = min_size;
- if (old_size == 1 )
+ if (old_size == 1)
old_size = 0;
if (size < old_size)
size = old_size;
@@ -859,9 +858,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
resource_size(b_res), min_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
- dev_info(&bus->self->dev, "disabling bridge window "
- "%pR to %pR (unused)\n", b_res,
- &bus->busn_res);
+ dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n",
+ b_res, &bus->busn_res);
b_res->flags = 0;
return;
}
@@ -872,10 +870,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
- dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
- "%pR to %pR add_size %llx\n", b_res,
- &bus->busn_res,
- (unsigned long long)size1-size0);
+ dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx\n",
+ b_res, &bus->busn_res,
+ (unsigned long long)size1-size0);
}
}
@@ -974,9 +971,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (order < 0)
order = 0;
if (order >= ARRAY_SIZE(aligns)) {
- dev_warn(&dev->dev, "disabling BAR %d: %pR "
- "(bad alignment %#llx)\n", i, r,
- (unsigned long long) align);
+ dev_warn(&dev->dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
+ i, r, (unsigned long long) align);
r->flags = 0;
continue;
}
@@ -1003,9 +999,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
resource_size(b_res), min_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
- dev_info(&bus->self->dev, "disabling bridge window "
- "%pR to %pR (unused)\n", b_res,
- &bus->busn_res);
+ dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n",
+ b_res, &bus->busn_res);
b_res->flags = 0;
return 0;
}
@@ -1014,9 +1009,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->flags |= IORESOURCE_STARTALIGN;
if (size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
- dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
- "%pR to %pR add_size %llx\n", b_res,
- &bus->busn_res, (unsigned long long)size1-size0);
+ dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx\n",
+ b_res, &bus->busn_res,
+ (unsigned long long)size1-size0);
}
return 0;
}
@@ -1274,8 +1269,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
break;
default:
- dev_info(&dev->dev, "not setting up bridge for bus "
- "%04x:%02x\n", pci_domain_nr(b), b->number);
+ dev_info(&dev->dev, "not setting up bridge for bus %04x:%02x\n",
+ pci_domain_nr(b), b->number);
break;
}
}
@@ -1312,8 +1307,8 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
break;
default:
- dev_info(&bridge->dev, "not setting up bridge for bus "
- "%04x:%02x\n", pci_domain_nr(b), b->number);
+ dev_info(&bridge->dev, "not setting up bridge for bus %04x:%02x\n",
+ pci_domain_nr(b), b->number);
break;
}
}
@@ -1430,10 +1425,10 @@ static void pci_bus_dump_res(struct pci_bus *bus)
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->end || !res->flags)
- continue;
+ continue;
dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
- }
+ }
}
static void pci_bus_dump_resources(struct pci_bus *bus)
@@ -1458,7 +1453,7 @@ static int pci_bus_get_depth(struct pci_bus *bus)
int depth = 0;
struct pci_bus *child_bus;
- list_for_each_entry(child_bus, &bus->children, node){
+ list_for_each_entry(child_bus, &bus->children, node) {
int ret;
ret = pci_bus_get_depth(child_bus);
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index dbc4ffcf42de..4e2d595d50ca 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -22,10 +22,9 @@ void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
-static void
-pdev_fixup_irq(struct pci_dev *dev,
- u8 (*swizzle)(struct pci_dev *, u8 *),
- int (*map_irq)(const struct pci_dev *, u8, u8))
+static void pdev_fixup_irq(struct pci_dev *dev,
+ u8 (*swizzle)(struct pci_dev *, u8 *),
+ int (*map_irq)(const struct pci_dev *, u8, u8))
{
u8 pin, slot;
int irq = 0;
@@ -58,11 +57,11 @@ pdev_fixup_irq(struct pci_dev *dev,
pcibios_update_irq(dev, irq);
}
-void
-pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
- int (*map_irq)(const struct pci_dev *, u8, u8))
+void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
+ int (*map_irq)(const struct pci_dev *, u8, u8))
{
struct pci_dev *dev = NULL;
+
for_each_pci_dev(dev)
pdev_fixup_irq(dev, swizzle, map_irq);
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 33f9e32d94d0..caed1ce6facd 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -96,8 +96,8 @@ void pci_update_resource(struct pci_dev *dev, int resno)
pci_write_config_dword(dev, reg + 4, new);
pci_read_config_dword(dev, reg + 4, &check);
if (check != new) {
- dev_err(&dev->dev, "BAR %d: error updating "
- "(high %#08x != %#08x)\n", resno, new, check);
+ dev_err(&dev->dev, "BAR %d: error updating (high %#08x != %#08x)\n",
+ resno, new, check);
}
}
@@ -289,8 +289,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
res->flags |= IORESOURCE_UNSET;
align = pci_resource_alignment(dev, res);
if (!align) {
- dev_info(&dev->dev, "BAR %d: can't assign %pR "
- "(bogus alignment)\n", resno, res);
+ dev_info(&dev->dev, "BAR %d: can't assign %pR (bogus alignment)\n",
+ resno, res);
return -EINVAL;
}
@@ -314,6 +314,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
}
return ret;
}
+EXPORT_SYMBOL(pci_assign_resource);
int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
resource_size_t min_align)
@@ -324,8 +325,8 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
res->flags |= IORESOURCE_UNSET;
if (!res->parent) {
- dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR "
- "\n", resno, res);
+ dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR\n",
+ resno, res);
return -EINVAL;
}
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 24750a1b39b6..b91c4da68365 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -99,7 +99,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (!dev)
return -ENODEV;
- switch(len) {
+ switch (len) {
case 1:
err = get_user(byte, (u8 __user *)buf);
if (err)
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 16a2f067c242..64b98d242ea6 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -112,6 +112,7 @@ config PHY_EXYNOS5250_SATA
config PHY_SUN4I_USB
tristate "Allwinner sunxi SoC USB PHY driver"
depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on RESET_CONTROLLER
select GENERIC_PHY
help
Enable this to support the transceiver that is part of Allwinner
@@ -122,6 +123,7 @@ config PHY_SUN4I_USB
config PHY_SAMSUNG_USB2
tristate "Samsung USB 2.0 PHY driver"
+ depends on HAS_IOMEM
select GENERIC_PHY
select MFD_SYSCON
help
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index c64a2f3b2d62..49c446530101 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -614,8 +614,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
return phy;
put_dev:
- put_device(&phy->dev);
- ida_remove(&phy_ida, phy->id);
+ put_device(&phy->dev); /* calls phy_release() which frees resources */
+ return ERR_PTR(ret);
+
free_phy:
kfree(phy);
return ERR_PTR(ret);
@@ -799,7 +800,7 @@ static void phy_release(struct device *dev)
phy = to_phy(dev);
dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
- ida_remove(&phy_ida, phy->id);
+ ida_simple_remove(&phy_ida, phy->id);
kfree(phy);
}
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 7007c11fe07d..34b396146c8a 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -233,8 +233,8 @@ static int omap_usb2_probe(struct platform_device *pdev)
if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
- if (!phy->phy_base)
- return -ENOMEM;
+ if (IS_ERR(phy->phy_base))
+ return PTR_ERR(phy->phy_base);
phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
}
@@ -262,7 +262,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
otg->phy = &phy->phy;
platform_set_drvdata(pdev, phy);
- pm_runtime_enable(phy->dev);
generic_phy = devm_phy_create(phy->dev, &ops, NULL);
if (IS_ERR(generic_phy))
@@ -270,10 +269,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy_set_drvdata(generic_phy, phy);
+ pm_runtime_enable(phy->dev);
phy_provider = devm_of_phy_provider_register(phy->dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
+ if (IS_ERR(phy_provider)) {
+ pm_runtime_disable(phy->dev);
return PTR_ERR(phy_provider);
+ }
phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
if (IS_ERR(phy->wkupclk)) {
@@ -317,6 +319,7 @@ static int omap_usb2_remove(struct platform_device *pdev)
if (!IS_ERR(phy->optclk))
clk_unprepare(phy->optclk);
usb_remove_phy(&phy->phy);
+ pm_runtime_disable(phy->dev);
return 0;
}
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
index 8a8c6bc8709a..1e69a32c221d 100644
--- a/drivers/phy/phy-samsung-usb2.c
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -107,6 +107,7 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = {
#endif
{ },
};
+MODULE_DEVICE_TABLE(of, samsung_usb2_phy_of_match);
static int samsung_usb2_phy_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index edf5d2fd2b22..86db2235ab00 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -320,7 +320,7 @@ int berlin_pinctrl_probe(struct platform_device *pdev,
regmap = dev_get_regmap(&pdev->dev, NULL);
if (!regmap)
- return PTR_ERR(regmap);
+ return -ENODEV;
pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index f1ca75e6d7b1..5f38c7f67834 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -211,6 +211,10 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
configlen++;
pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
+ if (!pinconfig) {
+ kfree(*map);
+ return -ENOMEM;
+ }
if (!of_property_read_u32(node, "allwinner,drive", &val)) {
u16 strength = (val + 1) * 10;
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 7f3aad0e115c..7f1a2e2711bd 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -84,21 +84,19 @@ static struct i2c_board_info tsl2563_als_device = {
I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
};
+static int mxt_t19_keys[] = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ BTN_LEFT
+};
+
static struct mxt_platform_data atmel_224s_tp_platform_data = {
- .x_line = 18,
- .y_line = 12,
- .x_size = 102*20,
- .y_size = 68*20,
- .blen = 0x80, /* Gain setting is in upper 4 bits */
- .threshold = 0x32,
- .voltage = 0, /* 3.3V */
- .orient = MXT_VERTICAL_FLIP,
.irqflags = IRQF_TRIGGER_FALLING,
- .is_tp = true,
- .key_map = { KEY_RESERVED,
- KEY_RESERVED,
- KEY_RESERVED,
- BTN_LEFT },
+ .t19_num_keys = ARRAY_SIZE(mxt_t19_keys),
+ .t19_keymap = mxt_t19_keys,
.config = NULL,
.config_length = 0,
};
@@ -110,16 +108,7 @@ static struct i2c_board_info atmel_224s_tp_device = {
};
static struct mxt_platform_data atmel_1664s_platform_data = {
- .x_line = 32,
- .y_line = 50,
- .x_size = 1700,
- .y_size = 2560,
- .blen = 0x89, /* Gain setting is in upper 4 bits */
- .threshold = 0x28,
- .voltage = 0, /* 3.3V */
- .orient = MXT_ROTATED_90_COUNTER,
.irqflags = IRQF_TRIGGER_FALLING,
- .is_tp = false,
.config = NULL,
.config_length = 0,
};
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 27df2c533b09..172f26ce59ac 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -102,7 +102,7 @@ config DELL_LAPTOP
default n
---help---
This driver adds support for rfkill and backlight control to Dell
- laptops.
+ laptops (except for some models covered by the Compal driver).
config DELL_WMI
tristate "Dell WMI extras"
@@ -127,6 +127,16 @@ config DELL_WMI_AIO
To compile this driver as a module, choose M here: the module will
be called dell-wmi-aio.
+config DELL_SMO8800
+ tristate "Dell Latitude freefall driver (ACPI SMO8800/SMO8810)"
+ depends on ACPI
+ ---help---
+ Say Y here if you want to support SMO8800/SMO8810 freefall device
+ on Dell Latitude laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-smo8800.
+
config FUJITSU_LAPTOP
tristate "Fujitsu Laptop Extras"
@@ -265,23 +275,21 @@ config PANASONIC_LAPTOP
R2, R3, R5, T2, W2 and Y2 series), say Y.
config COMPAL_LAPTOP
- tristate "Compal Laptop Extras"
+ tristate "Compal (and others) Laptop Extras"
depends on ACPI
depends on BACKLIGHT_CLASS_DEVICE
depends on RFKILL
depends on HWMON
depends on POWER_SUPPLY
---help---
- This is a driver for laptops built by Compal:
-
- Compal FL90/IFL90
- Compal FL91/IFL91
- Compal FL92/JFL92
- Compal FT00/IFT00
+ This is a driver for laptops built by Compal, and some models by
+ other brands (e.g. Dell, Toshiba).
- It adds support for Bluetooth, WLAN and LCD brightness control.
+ It adds support for rfkill, Bluetooth, WLAN and LCD brightness
+ control.
- If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here.
+ For a (possibly incomplete) list of supported laptops, please refer
+ to: Documentation/platform/x86-laptop-drivers.txt
config SONY_LAPTOP
tristate "Sony Laptop Extras"
@@ -724,7 +732,7 @@ config IBM_RTL
config XO1_RFKILL
tristate "OLPC XO-1 software RF kill switch"
- depends on OLPC
+ depends on OLPC || COMPILE_TEST
depends on RFKILL
---help---
Support for enabling/disabling the WLAN interface on the OLPC XO-1
@@ -732,6 +740,7 @@ config XO1_RFKILL
config XO15_EBOOK
tristate "OLPC XO-1.5 ebook switch"
+ depends on OLPC || COMPILE_TEST
depends on ACPI && INPUT
---help---
Support for the ebook switch on the OLPC XO-1.5 laptop.
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 1a2eafc9d48e..c4ca428fd3db 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
+obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_ACCEL) += hp_accel.o
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index 541f9514f76f..297b6640213f 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -32,6 +32,7 @@
#define WMAX_METHOD_HDMI_STATUS 0x2
#define WMAX_METHOD_BRIGHTNESS 0x3
#define WMAX_METHOD_ZONE_CONTROL 0x4
+#define WMAX_METHOD_HDMI_CABLE 0x5
MODULE_AUTHOR("Mario Limonciello <mario_limonciello@dell.com>");
MODULE_DESCRIPTION("Alienware special feature control");
@@ -350,12 +351,11 @@ static int alienware_zone_init(struct platform_device *dev)
char *name;
if (interface == WMAX) {
- global_led.max_brightness = 100;
lighting_control_state = WMAX_RUNNING;
} else if (interface == LEGACY) {
- global_led.max_brightness = 0x0F;
lighting_control_state = LEGACY_RUNNING;
}
+ global_led.max_brightness = 0x0F;
global_brightness = global_led.max_brightness;
/*
@@ -423,41 +423,85 @@ static void alienware_zone_exit(struct platform_device *dev)
The HDMI mux sysfs node indicates the status of the HDMI input mux.
It can toggle between standard system GPU output and HDMI input.
*/
-static ssize_t show_hdmi(struct device *dev, struct device_attribute *attr,
- char *buf)
+static acpi_status alienware_hdmi_command(struct hdmi_args *in_args,
+ u32 command, int *out_data)
{
acpi_status status;
- struct acpi_buffer input;
union acpi_object *obj;
- u32 tmp = 0;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer input;
+ struct acpi_buffer output;
+
+ input.length = (acpi_size) sizeof(*in_args);
+ input.pointer = in_args;
+ if (out_data != NULL) {
+ output.length = ACPI_ALLOCATE_BUFFER;
+ output.pointer = NULL;
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+ command, &input, &output);
+ } else
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+ command, &input, NULL);
+
+ if (ACPI_SUCCESS(status) && out_data != NULL) {
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *out_data = (u32) obj->integer.value;
+ }
+ return status;
+
+}
+
+static ssize_t show_hdmi_cable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ u32 out_data;
struct hdmi_args in_args = {
.arg = 0,
};
- input.length = (acpi_size) sizeof(in_args);
- input.pointer = &in_args;
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
- WMAX_METHOD_HDMI_STATUS, &input, &output);
+ status =
+ alienware_hdmi_command(&in_args, WMAX_METHOD_HDMI_CABLE,
+ (u32 *) &out_data);
+ if (ACPI_SUCCESS(status)) {
+ if (out_data == 0)
+ return scnprintf(buf, PAGE_SIZE,
+ "[unconnected] connected unknown\n");
+ else if (out_data == 1)
+ return scnprintf(buf, PAGE_SIZE,
+ "unconnected [connected] unknown\n");
+ }
+ pr_err("alienware-wmi: unknown HDMI cable status: %d\n", status);
+ return scnprintf(buf, PAGE_SIZE, "unconnected connected [unknown]\n");
+}
+
+static ssize_t show_hdmi_source(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ u32 out_data;
+ struct hdmi_args in_args = {
+ .arg = 0,
+ };
+ status =
+ alienware_hdmi_command(&in_args, WMAX_METHOD_HDMI_STATUS,
+ (u32 *) &out_data);
if (ACPI_SUCCESS(status)) {
- obj = (union acpi_object *)output.pointer;
- if (obj && obj->type == ACPI_TYPE_INTEGER)
- tmp = (u32) obj->integer.value;
- if (tmp == 1)
+ if (out_data == 1)
return scnprintf(buf, PAGE_SIZE,
"[input] gpu unknown\n");
- else if (tmp == 2)
+ else if (out_data == 2)
return scnprintf(buf, PAGE_SIZE,
"input [gpu] unknown\n");
}
- pr_err("alienware-wmi: unknown HDMI status: %d\n", status);
+ pr_err("alienware-wmi: unknown HDMI source status: %d\n", out_data);
return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
}
-static ssize_t toggle_hdmi(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t toggle_hdmi_source(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct acpi_buffer input;
acpi_status status;
struct hdmi_args args;
if (strcmp(buf, "gpu\n") == 0)
@@ -467,33 +511,46 @@ static ssize_t toggle_hdmi(struct device *dev, struct device_attribute *attr,
else
args.arg = 3;
pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
- input.length = (acpi_size) sizeof(args);
- input.pointer = &args;
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
- WMAX_METHOD_HDMI_SOURCE, &input, NULL);
+
+ status = alienware_hdmi_command(&args, WMAX_METHOD_HDMI_SOURCE, NULL);
+
if (ACPI_FAILURE(status))
pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
status);
return count;
}
-static DEVICE_ATTR(hdmi, S_IRUGO | S_IWUSR, show_hdmi, toggle_hdmi);
+static DEVICE_ATTR(cable, S_IRUGO, show_hdmi_cable, NULL);
+static DEVICE_ATTR(source, S_IRUGO | S_IWUSR, show_hdmi_source,
+ toggle_hdmi_source);
+
+static struct attribute *hdmi_attrs[] = {
+ &dev_attr_cable.attr,
+ &dev_attr_source.attr,
+ NULL,
+};
-static void remove_hdmi(struct platform_device *device)
+static struct attribute_group hdmi_attribute_group = {
+ .name = "hdmi",
+ .attrs = hdmi_attrs,
+};
+
+static void remove_hdmi(struct platform_device *dev)
{
- device_remove_file(&device->dev, &dev_attr_hdmi);
+ sysfs_remove_group(&dev->dev.kobj, &hdmi_attribute_group);
}
-static int create_hdmi(void)
+static int create_hdmi(struct platform_device *dev)
{
- int ret = -ENOMEM;
- ret = device_create_file(&platform_device->dev, &dev_attr_hdmi);
+ int ret;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &hdmi_attribute_group);
if (ret)
goto error_create_hdmi;
return 0;
error_create_hdmi:
- remove_hdmi(platform_device);
+ remove_hdmi(dev);
return ret;
}
@@ -527,7 +584,7 @@ static int __init alienware_wmi_init(void)
goto fail_platform_device2;
if (interface == WMAX) {
- ret = create_hdmi();
+ ret = create_hdmi(platform_device);
if (ret)
goto fail_prep_hdmi;
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 563f59efa669..ddf0eefd862c 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -137,6 +137,15 @@ static struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X550CA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X550CA"),
+ },
+ .driver_data = &quirk_asus_x401u,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X55A",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 91ef69a52263..3c6ccedc82b6 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -266,7 +266,7 @@ static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
union acpi_object *obj;
- u32 tmp;
+ u32 tmp = 0;
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, method_id,
&input, &output);
@@ -277,8 +277,6 @@ static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
tmp = (u32) obj->integer.value;
- else
- tmp = 0;
if (retval)
*retval = tmp;
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c
new file mode 100644
index 000000000000..a653716055d1
--- /dev/null
+++ b/drivers/platform/x86/dell-smo8800.c
@@ -0,0 +1,233 @@
+/*
+ * dell-smo8800.c - Dell Latitude ACPI SMO8800/SMO8810 freefall sensor driver
+ *
+ * Copyright (C) 2012 Sonal Santan <sonal.santan@gmail.com>
+ * Copyright (C) 2014 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This is loosely based on lis3lv02d driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define DRIVER_NAME "smo8800"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+
+struct smo8800_device {
+ u32 irq; /* acpi device irq */
+ atomic_t counter; /* count after last read */
+ struct miscdevice miscdev; /* for /dev/freefall */
+ unsigned long misc_opened; /* whether the device is open */
+ wait_queue_head_t misc_wait; /* Wait queue for the misc dev */
+ struct device *dev; /* acpi device */
+};
+
+static irqreturn_t smo8800_interrupt_quick(int irq, void *data)
+{
+ struct smo8800_device *smo8800 = data;
+
+ atomic_inc(&smo8800->counter);
+ wake_up_interruptible(&smo8800->misc_wait);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t smo8800_interrupt_thread(int irq, void *data)
+{
+ struct smo8800_device *smo8800 = data;
+
+ dev_info(smo8800->dev, "detected free fall\n");
+ return IRQ_HANDLED;
+}
+
+static acpi_status smo8800_get_resource(struct acpi_resource *resource,
+ void *context)
+{
+ struct acpi_resource_extended_irq *irq;
+
+ if (resource->type != ACPI_RESOURCE_TYPE_EXTENDED_IRQ)
+ return AE_OK;
+
+ irq = &resource->data.extended_irq;
+ if (!irq || !irq->interrupt_count)
+ return AE_OK;
+
+ *((u32 *)context) = irq->interrupts[0];
+ return AE_CTRL_TERMINATE;
+}
+
+static u32 smo8800_get_irq(struct acpi_device *device)
+{
+ u32 irq = 0;
+ acpi_status status;
+
+ status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ smo8800_get_resource, &irq);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&device->dev, "acpi_walk_resources failed\n");
+ return 0;
+ }
+
+ return irq;
+}
+
+static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct smo8800_device *smo8800 = container_of(file->private_data,
+ struct smo8800_device, miscdev);
+
+ u32 data = 0;
+ unsigned char byte_data = 0;
+ ssize_t retval = 1;
+
+ if (count < 1)
+ return -EINVAL;
+
+ atomic_set(&smo8800->counter, 0);
+ retval = wait_event_interruptible(smo8800->misc_wait,
+ (data = atomic_xchg(&smo8800->counter, 0)));
+
+ if (retval)
+ return retval;
+
+ byte_data = 1;
+ retval = 1;
+
+ if (data < 255)
+ byte_data = data;
+ else
+ byte_data = 255;
+
+ if (put_user(byte_data, buf))
+ retval = -EFAULT;
+
+ return retval;
+}
+
+static int smo8800_misc_open(struct inode *inode, struct file *file)
+{
+ struct smo8800_device *smo8800 = container_of(file->private_data,
+ struct smo8800_device, miscdev);
+
+ if (test_and_set_bit(0, &smo8800->misc_opened))
+ return -EBUSY; /* already open */
+
+ atomic_set(&smo8800->counter, 0);
+ return 0;
+}
+
+static int smo8800_misc_release(struct inode *inode, struct file *file)
+{
+ struct smo8800_device *smo8800 = container_of(file->private_data,
+ struct smo8800_device, miscdev);
+
+ clear_bit(0, &smo8800->misc_opened); /* release the device */
+ return 0;
+}
+
+static const struct file_operations smo8800_misc_fops = {
+ .owner = THIS_MODULE,
+ .read = smo8800_misc_read,
+ .open = smo8800_misc_open,
+ .release = smo8800_misc_release,
+};
+
+static int smo8800_add(struct acpi_device *device)
+{
+ int err;
+ struct smo8800_device *smo8800;
+
+ smo8800 = devm_kzalloc(&device->dev, sizeof(*smo8800), GFP_KERNEL);
+ if (!smo8800) {
+ dev_err(&device->dev, "failed to allocate device data\n");
+ return -ENOMEM;
+ }
+
+ smo8800->dev = &device->dev;
+ smo8800->miscdev.minor = MISC_DYNAMIC_MINOR;
+ smo8800->miscdev.name = "freefall";
+ smo8800->miscdev.fops = &smo8800_misc_fops;
+
+ init_waitqueue_head(&smo8800->misc_wait);
+
+ err = misc_register(&smo8800->miscdev);
+ if (err) {
+ dev_err(&device->dev, "failed to register misc dev: %d\n", err);
+ return err;
+ }
+
+ device->driver_data = smo8800;
+
+ smo8800->irq = smo8800_get_irq(device);
+ if (!smo8800->irq) {
+ dev_err(&device->dev, "failed to obtain IRQ\n");
+ err = -EINVAL;
+ goto error;
+ }
+
+ err = request_threaded_irq(smo8800->irq, smo8800_interrupt_quick,
+ smo8800_interrupt_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ DRIVER_NAME, smo8800);
+ if (err) {
+ dev_err(&device->dev,
+ "failed to request thread for IRQ %d: %d\n",
+ smo8800->irq, err);
+ goto error;
+ }
+
+ dev_dbg(&device->dev, "device /dev/freefall registered with IRQ %d\n",
+ smo8800->irq);
+ return 0;
+
+error:
+ misc_deregister(&smo8800->miscdev);
+ return err;
+}
+
+static int smo8800_remove(struct acpi_device *device)
+{
+ struct smo8800_device *smo8800 = device->driver_data;
+
+ free_irq(smo8800->irq, smo8800);
+ misc_deregister(&smo8800->miscdev);
+ dev_dbg(&device->dev, "device /dev/freefall unregistered\n");
+ return 0;
+}
+
+static const struct acpi_device_id smo8800_ids[] = {
+ { "SMO8800", 0 },
+ { "SMO8810", 0 },
+ { "", 0 },
+};
+
+MODULE_DEVICE_TABLE(acpi, smo8800_ids);
+
+static struct acpi_driver smo8800_driver = {
+ .name = DRIVER_NAME,
+ .class = "Latitude",
+ .ids = smo8800_ids,
+ .ops = {
+ .add = smo8800_add,
+ .remove = smo8800_remove,
+ },
+ .owner = THIS_MODULE,
+};
+
+module_acpi_driver(smo8800_driver);
+
+MODULE_DESCRIPTION("Dell Latitude freefall driver (ACPI SMO8800/SMO8810)");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sonal Santan, Pali Rohár");
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 8ba8956b5a48..484a8673b835 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -53,6 +53,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_ALS_QUERY 0x3
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
+#define HPWMI_BIOS_QUERY 0x9
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_FEATURE_QUERY 0xd
#define HPWMI_WIRELESS2_QUERY 0x1b
@@ -144,6 +145,7 @@ static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x2142, { KEY_MEDIA } },
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_DIRECTION } },
+ { KE_KEY, 0x216a, { KEY_SETUP } },
{ KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
@@ -304,6 +306,19 @@ static int hp_wmi_bios_2009_later(void)
return (state & 0x10) ? 1 : 0;
}
+static int hp_wmi_enable_hotkeys(void)
+{
+ int ret;
+ int query = 0x6e;
+
+ ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
+ 0);
+
+ if (ret)
+ return -EINVAL;
+ return 0;
+}
+
static int hp_wmi_set_block(void *data, bool blocked)
{
enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -648,6 +663,9 @@ static int __init hp_wmi_input_setup(void)
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
+ if (hp_wmi_bios_2009_later() == 4)
+ hp_wmi_enable_hotkeys();
+
status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 6dd060a0bb65..b4c495a62eec 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -36,6 +36,8 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/i8042.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
#define IDEAPAD_RFKILL_DEV_NUM (3)
@@ -819,6 +821,19 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
}
}
+/* Blacklist for devices where the ideapad rfkill interface does not work */
+static struct dmi_system_id rfkill_blacklist[] = {
+ /* The Lenovo Yoga 2 11 always reports everything as blocked */
+ {
+ .ident = "Lenovo Yoga 2 11",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2 11"),
+ },
+ },
+ {}
+};
+
static int ideapad_acpi_add(struct platform_device *pdev)
{
int ret, i;
@@ -833,7 +848,7 @@ static int ideapad_acpi_add(struct platform_device *pdev)
if (read_method_int(adev->handle, "_CFG", &cfg))
return -ENODEV;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -844,7 +859,7 @@ static int ideapad_acpi_add(struct platform_device *pdev)
ret = ideapad_sysfs_init(priv);
if (ret)
- goto sysfs_failed;
+ return ret;
ret = ideapad_debugfs_init(priv);
if (ret)
@@ -854,11 +869,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
if (ret)
goto input_failed;
- for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
- if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
- ideapad_register_rfkill(priv, i);
- else
- priv->rfk[i] = NULL;
+ if (!dmi_check_system(rfkill_blacklist)) {
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
+ ideapad_register_rfkill(priv, i);
}
ideapad_sync_rfk_state(priv);
ideapad_sync_touchpad_state(priv);
@@ -884,8 +898,6 @@ input_failed:
ideapad_debugfs_exit(priv);
debugfs_failed:
ideapad_sysfs_exit(priv);
-sysfs_failed:
- kfree(priv);
return ret;
}
@@ -903,7 +915,6 @@ static int ideapad_acpi_remove(struct platform_device *pdev)
ideapad_debugfs_exit(priv);
ideapad_sysfs_exit(priv);
dev_set_drvdata(&pdev->dev, NULL);
- kfree(priv);
return 0;
}
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 93fab8b70ce1..ab7860a21a22 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -481,7 +481,8 @@ static int mid_thermal_probe(struct platform_device *pdev)
int i;
struct platform_info *pinfo;
- pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
+ pinfo = devm_kzalloc(&pdev->dev, sizeof(struct platform_info),
+ GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
@@ -489,7 +490,6 @@ static int mid_thermal_probe(struct platform_device *pdev)
ret = mid_initialize_adc(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "ADC init failed");
- kfree(pinfo);
return ret;
}
@@ -520,7 +520,6 @@ err:
thermal_zone_device_unregister(pinfo->tzd[i]);
}
configure_adc(0);
- kfree(pinfo);
return ret;
}
@@ -541,8 +540,6 @@ static int mid_thermal_remove(struct platform_device *pdev)
thermal_zone_device_unregister(pinfo->tzd[i]);
}
- kfree(pinfo);
-
/* Stop the ADC */
return configure_adc(0);
}
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 2805988485f6..40929e4f7ad7 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -91,7 +91,7 @@ static void pmic_program_irqtype(int gpio, int type)
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- if (offset > 8) {
+ if (offset >= 8) {
pr_err("only pin 0-7 support input\n");
return -1;/* we only have 8 GPIO can use as input */
}
@@ -130,7 +130,7 @@ static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
int ret;
/* we only have 8 GPIO pins we can use as input */
- if (offset > 8)
+ if (offset >= 8)
return -EOPNOTSUPP;
ret = intel_scu_ipc_ioread8(GPIO0 + offset, &r);
if (ret < 0)
diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
index c9f6e511daa6..073a90a63dbc 100644
--- a/drivers/platform/x86/pvpanic.c
+++ b/drivers/platform/x86/pvpanic.c
@@ -70,6 +70,7 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
static struct notifier_block pvpanic_panic_nb = {
.notifier_call = pvpanic_panic_notify,
+ .priority = 1, /* let this called before broken drm_fb_helper */
};
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index d1f030053176..5a5966512277 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -27,6 +27,7 @@
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/efi.h>
+#include <linux/suspend.h>
#include <acpi/video.h>
/*
@@ -340,6 +341,8 @@ struct samsung_laptop {
struct samsung_laptop_debug debug;
struct samsung_quirks *quirks;
+ struct notifier_block pm_nb;
+
bool handle_backlight;
bool has_stepping_quirk;
@@ -348,6 +351,8 @@ struct samsung_laptop {
struct samsung_quirks {
bool broken_acpi_video;
+ bool four_kbd_backlight_levels;
+ bool enable_kbd_backlight;
};
static struct samsung_quirks samsung_unknown = {};
@@ -356,6 +361,11 @@ static struct samsung_quirks samsung_broken_acpi_video = {
.broken_acpi_video = true,
};
+static struct samsung_quirks samsung_np740u3e = {
+ .four_kbd_backlight_levels = true,
+ .enable_kbd_backlight = true,
+};
+
static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force,
@@ -1051,6 +1061,8 @@ static int __init samsung_leds_init(struct samsung_laptop *samsung)
samsung->kbd_led.brightness_set = kbd_led_set;
samsung->kbd_led.brightness_get = kbd_led_get;
samsung->kbd_led.max_brightness = 8;
+ if (samsung->quirks->four_kbd_backlight_levels)
+ samsung->kbd_led.max_brightness = 4;
ret = led_classdev_register(&samsung->platform_device->dev,
&samsung->kbd_led);
@@ -1414,6 +1426,19 @@ static void samsung_platform_exit(struct samsung_laptop *samsung)
}
}
+static int samsung_pm_notification(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ struct samsung_laptop *samsung;
+
+ samsung = container_of(nb, struct samsung_laptop, pm_nb);
+ if (val == PM_POST_HIBERNATION &&
+ samsung->quirks->enable_kbd_backlight)
+ kbd_backlight_enable(samsung);
+
+ return 0;
+}
+
static int __init samsung_platform_init(struct samsung_laptop *samsung)
{
struct platform_device *pdev;
@@ -1534,6 +1559,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
},
.driver_data = &samsung_broken_acpi_video,
},
+ {
+ .callback = samsung_dmi_matched,
+ .ident = "730U3E/740U3E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
+ },
+ .driver_data = &samsung_np740u3e,
+ },
{ },
};
MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
@@ -1608,6 +1642,9 @@ static int __init samsung_init(void)
if (ret)
goto error_debugfs;
+ samsung->pm_nb.notifier_call = samsung_pm_notification;
+ register_pm_notifier(&samsung->pm_nb);
+
samsung_platform_device = samsung->platform_device;
return ret;
@@ -1633,6 +1670,7 @@ static void __exit samsung_exit(void)
struct samsung_laptop *samsung;
samsung = platform_get_drvdata(samsung_platform_device);
+ unregister_pm_notifier(&samsung->pm_nb);
samsung_debugfs_exit(samsung);
samsung_leds_exit(samsung);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 15e61c16736e..d82f196e3cfe 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3171,8 +3171,10 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_MICMUTE, /* 0x1a: Mic mute (since ?400 or so) */
/* (assignments unknown, please report if found) */
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN,
+
+ /* Extra keys in use since the X240 / T440 / T540 */
+ KEY_CONFIG, KEY_SEARCH, KEY_SCALE, KEY_COMPUTER,
},
};
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 46473ca7566b..76441dcbe5ff 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -56,6 +56,7 @@
#include <linux/workqueue.h>
#include <linux/i8042.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <asm/uaccess.h>
MODULE_AUTHOR("John Belmonte");
@@ -213,6 +214,30 @@ static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_END, 0 },
};
+/* alternative keymap */
+static const struct dmi_system_id toshiba_alt_keymap_dmi[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite M840"),
+ },
+ },
+ {}
+};
+
+static const struct key_entry toshiba_acpi_alt_keymap[] = {
+ { KE_KEY, 0x157, { KEY_MUTE } },
+ { KE_KEY, 0x102, { KEY_ZOOMOUT } },
+ { KE_KEY, 0x103, { KEY_ZOOMIN } },
+ { KE_KEY, 0x139, { KEY_ZOOMRESET } },
+ { KE_KEY, 0x13e, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x13c, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x13d, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x158, { KEY_WLAN } },
+ { KE_KEY, 0x13f, { KEY_TOUCHPAD_TOGGLE } },
+ { KE_END, 0 },
+};
+
/* utility
*/
@@ -1440,6 +1465,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
acpi_handle ec_handle;
int error;
u32 hci_result;
+ const struct key_entry *keymap = toshiba_acpi_keymap;
dev->hotkey_dev = input_allocate_device();
if (!dev->hotkey_dev)
@@ -1449,7 +1475,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
dev->hotkey_dev->phys = "toshiba_acpi/input0";
dev->hotkey_dev->id.bustype = BUS_HOST;
- error = sparse_keymap_setup(dev->hotkey_dev, toshiba_acpi_keymap, NULL);
+ if (dmi_check_system(toshiba_alt_keymap_dmi))
+ keymap = toshiba_acpi_alt_keymap;
+ error = sparse_keymap_setup(dev->hotkey_dev, keymap, NULL);
if (error)
goto err_free_dev;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6aea373547f6..ee3de3421f2d 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -74,7 +74,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
- depends on X86 || COMPILE_TEST
+ depends on X86_32 || COMPILE_TEST
depends on HAS_IOMEM && NET
select PTP_1588_CLOCK
help
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index e25d2bc898e5..296b0ec8744d 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -142,7 +142,10 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
} else if (tx->modes & ADJ_FREQUENCY) {
- err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
+ s32 ppb = scaled_ppm_to_ppb(tx->freq);
+ if (ppb > ops->max_adj || ppb < -ops->max_adj)
+ return -ERANGE;
+ err = ops->adjfreq(ops, ppb);
ptp->dialed_frequency = tx->freq;
} else if (tx->modes == 0) {
tx->freq = ptp->dialed_frequency;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 5b34ff29ea38..4ad7b89a4cb4 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -62,6 +62,15 @@ config PWM_ATMEL_TCB
To compile this driver as a module, choose M here: the module
will be called pwm-atmel-tcb.
+config PWM_BCM_KONA
+ tristate "Kona PWM support"
+ depends on ARCH_BCM_MOBILE
+ help
+ Generic PWM framework driver for Broadcom Kona PWM block.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-bcm-kona.
+
config PWM_BFIN
tristate "Blackfin PWM support"
depends on BFIN_GPTIMERS
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index e57d2c38a794..5c86a19d5d39 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PWM_SYSFS) += sysfs.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
+obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index a80471399c20..4b66bf09ee55 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -661,10 +661,16 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
}
}
+ mutex_unlock(&pwm_lookup_lock);
+
if (chip)
pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ pwm_set_period(pwm, p->period);
+ pwm_set_polarity(pwm, p->polarity);
- mutex_unlock(&pwm_lookup_lock);
return pwm;
}
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index 1d07a6f99375..4c07a8420b37 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -20,10 +20,6 @@
#define AB8500_PWM_OUT_CTRL2_REG 0x61
#define AB8500_PWM_OUT_CTRL7_REG 0x66
-/* backlight driver constants */
-#define ENABLE_PWM 1
-#define DISABLE_PWM 0
-
struct ab8500_pwm_chip {
struct pwm_chip chip;
};
@@ -64,7 +60,7 @@ static int ab8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), ENABLE_PWM);
+ 1 << (chip->base - 1), 1 << (chip->base - 1));
if (ret < 0)
dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
@@ -77,11 +73,10 @@ static void ab8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), DISABLE_PWM);
+ 1 << (chip->base - 1), 0);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
pwm->label, ret);
- return;
}
static const struct pwm_ops ab8500_pwm_ops = {
@@ -101,10 +96,8 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
* device which is required for ab8500 read and write
*/
ab8500 = devm_kzalloc(&pdev->dev, sizeof(*ab8500), GFP_KERNEL);
- if (ab8500 == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (ab8500 == NULL)
return -ENOMEM;
- }
ab8500->chip.dev = &pdev->dev;
ab8500->chip.ops = &ab8500_pwm_ops;
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 0adc952cc4ef..6e700a541ca3 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -357,6 +357,7 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->chip.base = -1;
atmel_pwm->chip.npwm = 4;
+ atmel_pwm->chip.can_sleep = true;
atmel_pwm->config = data->config;
ret = pwmchip_add(&atmel_pwm->chip);
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
new file mode 100644
index 000000000000..02bc048892a9
--- /dev/null
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/*
+ * The Kona PWM has some unusual characteristics. Here are the main points.
+ *
+ * 1) There is no disable bit and the hardware docs advise programming a zero
+ * duty to achieve output equivalent to that of a normal disable operation.
+ *
+ * 2) Changes to prescale, duty, period, and polarity do not take effect until
+ * a subsequent rising edge of the trigger bit.
+ *
+ * 3) If the smooth bit and trigger bit are both low, the output is a constant
+ * high signal. Otherwise, the earlier waveform continues to be output.
+ *
+ * 4) If the smooth bit is set on the rising edge of the trigger bit, output
+ * will transition to the new settings on a period boundary (which could be
+ * seconds away). If the smooth bit is clear, new settings will be applied
+ * as soon as possible (the hardware always has a 400ns delay).
+ *
+ * 5) When the external clock that feeds the PWM is disabled, output is pegged
+ * high or low depending on its state at that exact instant.
+ */
+
+#define PWM_CONTROL_OFFSET (0x00000000)
+#define PWM_CONTROL_SMOOTH_SHIFT(chan) (24 + (chan))
+#define PWM_CONTROL_TYPE_SHIFT(chan) (16 + (chan))
+#define PWM_CONTROL_POLARITY_SHIFT(chan) (8 + (chan))
+#define PWM_CONTROL_TRIGGER_SHIFT(chan) (chan)
+
+#define PRESCALE_OFFSET (0x00000004)
+#define PRESCALE_SHIFT(chan) ((chan) << 2)
+#define PRESCALE_MASK(chan) (0x7 << PRESCALE_SHIFT(chan))
+#define PRESCALE_MIN (0x00000000)
+#define PRESCALE_MAX (0x00000007)
+
+#define PERIOD_COUNT_OFFSET(chan) (0x00000008 + ((chan) << 3))
+#define PERIOD_COUNT_MIN (0x00000002)
+#define PERIOD_COUNT_MAX (0x00ffffff)
+
+#define DUTY_CYCLE_HIGH_OFFSET(chan) (0x0000000c + ((chan) << 3))
+#define DUTY_CYCLE_HIGH_MIN (0x00000000)
+#define DUTY_CYCLE_HIGH_MAX (0x00ffffff)
+
+struct kona_pwmc {
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *_chip)
+{
+ return container_of(_chip, struct kona_pwmc, chip);
+}
+
+static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
+{
+ unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
+
+ /* Clear trigger bit but set smooth bit to maintain old output */
+ value |= 1 << PWM_CONTROL_SMOOTH_SHIFT(chan);
+ value &= ~(1 << PWM_CONTROL_TRIGGER_SHIFT(chan));
+ writel(value, kp->base + PWM_CONTROL_OFFSET);
+
+ /* Set trigger bit and clear smooth bit to apply new settings */
+ value &= ~(1 << PWM_CONTROL_SMOOTH_SHIFT(chan));
+ value |= 1 << PWM_CONTROL_TRIGGER_SHIFT(chan);
+ writel(value, kp->base + PWM_CONTROL_OFFSET);
+}
+
+static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct kona_pwmc *kp = to_kona_pwmc(chip);
+ u64 val, div, rate;
+ unsigned long prescale = PRESCALE_MIN, pc, dc;
+ unsigned int value, chan = pwm->hwpwm;
+
+ /*
+ * Find period count, duty count and prescale to suit duty_ns and
+ * period_ns. This is done according to formulas described below:
+ *
+ * period_ns = 10^9 * (PRESCALE + 1) * PC / PWM_CLK_RATE
+ * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
+ *
+ * PC = (PWM_CLK_RATE * period_ns) / (10^9 * (PRESCALE + 1))
+ * DC = (PWM_CLK_RATE * duty_ns) / (10^9 * (PRESCALE + 1))
+ */
+
+ rate = clk_get_rate(kp->clk);
+
+ while (1) {
+ div = 1000000000;
+ div *= 1 + prescale;
+ val = rate * period_ns;
+ pc = div64_u64(val, div);
+ val = rate * duty_ns;
+ dc = div64_u64(val, div);
+
+ /* If duty_ns or period_ns are not achievable then return */
+ if (pc < PERIOD_COUNT_MIN || dc < DUTY_CYCLE_HIGH_MIN)
+ return -EINVAL;
+
+ /* If pc and dc are in bounds, the calculation is done */
+ if (pc <= PERIOD_COUNT_MAX && dc <= DUTY_CYCLE_HIGH_MAX)
+ break;
+
+ /* Otherwise, increase prescale and recalculate pc and dc */
+ if (++prescale > PRESCALE_MAX)
+ return -EINVAL;
+ }
+
+ /* If the PWM channel is enabled, write the settings to the HW */
+ if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ value = readl(kp->base + PRESCALE_OFFSET);
+ value &= ~PRESCALE_MASK(chan);
+ value |= prescale << PRESCALE_SHIFT(chan);
+ writel(value, kp->base + PRESCALE_OFFSET);
+
+ writel(pc, kp->base + PERIOD_COUNT_OFFSET(chan));
+
+ writel(dc, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
+
+ kona_pwmc_apply_settings(kp, chan);
+ }
+
+ return 0;
+}
+
+static int kona_pwmc_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct kona_pwmc *kp = to_kona_pwmc(chip);
+ unsigned int chan = pwm->hwpwm;
+ unsigned int value;
+ int ret;
+
+ ret = clk_prepare_enable(kp->clk);
+ if (ret < 0) {
+ dev_err(chip->dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ value = readl(kp->base + PWM_CONTROL_OFFSET);
+
+ if (polarity == PWM_POLARITY_NORMAL)
+ value |= 1 << PWM_CONTROL_POLARITY_SHIFT(chan);
+ else
+ value &= ~(1 << PWM_CONTROL_POLARITY_SHIFT(chan));
+
+ writel(value, kp->base + PWM_CONTROL_OFFSET);
+
+ kona_pwmc_apply_settings(kp, chan);
+
+ /* Wait for waveform to settle before gating off the clock */
+ ndelay(400);
+
+ clk_disable_unprepare(kp->clk);
+
+ return 0;
+}
+
+static int kona_pwmc_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct kona_pwmc *kp = to_kona_pwmc(chip);
+ int ret;
+
+ ret = clk_prepare_enable(kp->clk);
+ if (ret < 0) {
+ dev_err(chip->dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = kona_pwmc_config(chip, pwm, pwm->duty_cycle, pwm->period);
+ if (ret < 0) {
+ clk_disable_unprepare(kp->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kona_pwmc_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct kona_pwmc *kp = to_kona_pwmc(chip);
+ unsigned int chan = pwm->hwpwm;
+
+ /* Simulate a disable by configuring for zero duty */
+ writel(0, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
+ kona_pwmc_apply_settings(kp, chan);
+
+ /* Wait for waveform to settle before gating off the clock */
+ ndelay(400);
+
+ clk_disable_unprepare(kp->clk);
+}
+
+static const struct pwm_ops kona_pwm_ops = {
+ .config = kona_pwmc_config,
+ .set_polarity = kona_pwmc_set_polarity,
+ .enable = kona_pwmc_enable,
+ .disable = kona_pwmc_disable,
+ .owner = THIS_MODULE,
+};
+
+static int kona_pwmc_probe(struct platform_device *pdev)
+{
+ struct kona_pwmc *kp;
+ struct resource *res;
+ unsigned int chan;
+ unsigned int value = 0;
+ int ret = 0;
+
+ kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
+ if (kp == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, kp);
+
+ kp->chip.dev = &pdev->dev;
+ kp->chip.ops = &kona_pwm_ops;
+ kp->chip.base = -1;
+ kp->chip.npwm = 6;
+ kp->chip.of_xlate = of_pwm_xlate_with_flags;
+ kp->chip.of_pwm_n_cells = 3;
+ kp->chip.can_sleep = true;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ kp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(kp->base))
+ return PTR_ERR(kp->base);
+
+ kp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(kp->clk)) {
+ dev_err(&pdev->dev, "failed to get clock: %ld\n",
+ PTR_ERR(kp->clk));
+ return PTR_ERR(kp->clk);
+ }
+
+ ret = clk_prepare_enable(kp->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ /* Set smooth mode, push/pull, and normal polarity for all channels */
+ for (chan = 0; chan < kp->chip.npwm; chan++) {
+ value |= (1 << PWM_CONTROL_SMOOTH_SHIFT(chan));
+ value |= (1 << PWM_CONTROL_TYPE_SHIFT(chan));
+ value |= (1 << PWM_CONTROL_POLARITY_SHIFT(chan));
+ }
+
+ writel(value, kp->base + PWM_CONTROL_OFFSET);
+
+ clk_disable_unprepare(kp->clk);
+
+ ret = pwmchip_add(&kp->chip);
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+
+ return ret;
+}
+
+static int kona_pwmc_remove(struct platform_device *pdev)
+{
+ struct kona_pwmc *kp = platform_get_drvdata(pdev);
+ unsigned int chan;
+
+ for (chan = 0; chan < kp->chip.npwm; chan++)
+ if (test_bit(PWMF_ENABLED, &kp->chip.pwms[chan].flags))
+ clk_disable_unprepare(kp->clk);
+
+ return pwmchip_remove(&kp->chip);
+}
+
+static const struct of_device_id bcm_kona_pwmc_dt[] = {
+ { .compatible = "brcm,kona-pwm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_kona_pwmc_dt);
+
+static struct platform_driver kona_pwmc_driver = {
+ .driver = {
+ .name = "bcm-kona-pwm",
+ .of_match_table = bcm_kona_pwmc_dt,
+ },
+ .probe = kona_pwmc_probe,
+ .remove = kona_pwmc_remove,
+};
+module_platform_driver(kona_pwmc_driver);
+
+MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
+MODULE_AUTHOR("Tim Kryger <tkryger@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Kona PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 420169e96b5f..a18bc8fea385 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -454,6 +454,7 @@ static int fsl_pwm_probe(struct platform_device *pdev)
fpc->chip.of_pwm_n_cells = 3;
fpc->chip.base = -1;
fpc->chip.npwm = 8;
+ fpc->chip.can_sleep = true;
ret = pwmchip_add(&fpc->chip);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index cc4773344874..d797c7b84c3f 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -241,10 +241,8 @@ static int imx_pwm_probe(struct platform_device *pdev)
return -ENODEV;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
- if (imx == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (imx == NULL)
return -ENOMEM;
- }
imx->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(imx->clk_per)) {
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index a40b9c34e9ff..2c39b0e50fa4 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -278,6 +278,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
lp3943_pwm->chip.dev = &pdev->dev;
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
+ lp3943_pwm->chip.can_sleep = true;
platform_set_drvdata(pdev, lp3943_pwm);
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 449e372050a0..44ce6c6103ae 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -6,6 +6,7 @@
* Author: Chew Kean Ho <kean.ho.chew@intel.com>
* Author: Chang Rebecca Swee Fun <rebecca.swee.fun.chang@intel.com>
* Author: Chew Chiau Ee <chiau.ee.chew@intel.com>
+ * Author: Alan Cox <alan@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,6 +20,9 @@
#include <linux/module.h>
#include <linux/pwm.h>
#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+static int pci_drv, plat_drv; /* So we know which drivers registered */
#define PWM 0x00000000
#define PWM_ENABLE BIT(31)
@@ -34,6 +38,16 @@ struct pwm_lpss_chip {
struct pwm_chip chip;
void __iomem *regs;
struct clk *clk;
+ unsigned long clk_rate;
+};
+
+struct pwm_lpss_boardinfo {
+ unsigned long clk_rate;
+};
+
+/* BayTrail */
+static const struct pwm_lpss_boardinfo byt_info = {
+ 25000000
};
static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
@@ -55,7 +69,7 @@ static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* The equation is: base_unit = ((freq / c) * 65536) + correction */
base_unit = freq * 65536;
- c = clk_get_rate(lpwm->clk);
+ c = lpwm->clk_rate;
if (!c)
return -EINVAL;
@@ -113,52 +127,48 @@ static const struct pwm_ops pwm_lpss_ops = {
.owner = THIS_MODULE,
};
-static const struct acpi_device_id pwm_lpss_acpi_match[] = {
- { "80860F09", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match);
-
-static int pwm_lpss_probe(struct platform_device *pdev)
+static struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev,
+ struct resource *r,
+ const struct pwm_lpss_boardinfo *info)
{
struct pwm_lpss_chip *lpwm;
- struct resource *r;
int ret;
- lpwm = devm_kzalloc(&pdev->dev, sizeof(*lpwm), GFP_KERNEL);
+ lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
if (!lpwm)
- return -ENOMEM;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ return ERR_PTR(-ENOMEM);
- lpwm->regs = devm_ioremap_resource(&pdev->dev, r);
+ lpwm->regs = devm_ioremap_resource(dev, r);
if (IS_ERR(lpwm->regs))
- return PTR_ERR(lpwm->regs);
-
- lpwm->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(lpwm->clk)) {
- dev_err(&pdev->dev, "failed to get PWM clock\n");
- return PTR_ERR(lpwm->clk);
+ return ERR_CAST(lpwm->regs);
+
+ if (info) {
+ lpwm->clk_rate = info->clk_rate;
+ } else {
+ lpwm->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(lpwm->clk)) {
+ dev_err(dev, "failed to get PWM clock\n");
+ return ERR_CAST(lpwm->clk);
+ }
+ lpwm->clk_rate = clk_get_rate(lpwm->clk);
}
- lpwm->chip.dev = &pdev->dev;
+ lpwm->chip.dev = dev;
lpwm->chip.ops = &pwm_lpss_ops;
lpwm->chip.base = -1;
lpwm->chip.npwm = 1;
ret = pwmchip_add(&lpwm->chip);
if (ret) {
- dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
- return ret;
+ dev_err(dev, "failed to add PWM chip: %d\n", ret);
+ return ERR_PTR(ret);
}
- platform_set_drvdata(pdev, lpwm);
- return 0;
+ return lpwm;
}
-static int pwm_lpss_remove(struct platform_device *pdev)
+static int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
{
- struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev);
u32 ctrl;
ctrl = readl(lpwm->regs + PWM);
@@ -167,15 +177,104 @@ static int pwm_lpss_remove(struct platform_device *pdev)
return pwmchip_remove(&lpwm->chip);
}
-static struct platform_driver pwm_lpss_driver = {
+static int pwm_lpss_probe_pci(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ const struct pwm_lpss_boardinfo *info;
+ struct pwm_lpss_chip *lpwm;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err < 0)
+ return err;
+
+ info = (struct pwm_lpss_boardinfo *)id->driver_data;
+ lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info);
+ if (IS_ERR(lpwm))
+ return PTR_ERR(lpwm);
+
+ pci_set_drvdata(pdev, lpwm);
+ return 0;
+}
+
+static void pwm_lpss_remove_pci(struct pci_dev *pdev)
+{
+ struct pwm_lpss_chip *lpwm = pci_get_drvdata(pdev);
+
+ pwm_lpss_remove(lpwm);
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id pwm_lpss_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&byt_info},
+ { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&byt_info},
+ { },
+};
+MODULE_DEVICE_TABLE(pci, pwm_lpss_pci_ids);
+
+static struct pci_driver pwm_lpss_driver_pci = {
+ .name = "pwm-lpss",
+ .id_table = pwm_lpss_pci_ids,
+ .probe = pwm_lpss_probe_pci,
+ .remove = pwm_lpss_remove_pci,
+};
+
+static int pwm_lpss_probe_platform(struct platform_device *pdev)
+{
+ struct pwm_lpss_chip *lpwm;
+ struct resource *r;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ lpwm = pwm_lpss_probe(&pdev->dev, r, NULL);
+ if (IS_ERR(lpwm))
+ return PTR_ERR(lpwm);
+
+ platform_set_drvdata(pdev, lpwm);
+ return 0;
+}
+
+static int pwm_lpss_remove_platform(struct platform_device *pdev)
+{
+ struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev);
+
+ return pwm_lpss_remove(lpwm);
+}
+
+static const struct acpi_device_id pwm_lpss_acpi_match[] = {
+ { "80860F09", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match);
+
+static struct platform_driver pwm_lpss_driver_platform = {
.driver = {
.name = "pwm-lpss",
.acpi_match_table = pwm_lpss_acpi_match,
},
- .probe = pwm_lpss_probe,
- .remove = pwm_lpss_remove,
+ .probe = pwm_lpss_probe_platform,
+ .remove = pwm_lpss_remove_platform,
};
-module_platform_driver(pwm_lpss_driver);
+
+static int __init pwm_init(void)
+{
+ pci_drv = pci_register_driver(&pwm_lpss_driver_pci);
+ plat_drv = platform_driver_register(&pwm_lpss_driver_platform);
+ if (pci_drv && plat_drv)
+ return pci_drv;
+
+ return 0;
+}
+module_init(pwm_init);
+
+static void __exit pwm_exit(void)
+{
+ if (!pci_drv)
+ pci_unregister_driver(&pwm_lpss_driver_pci);
+ if (!plat_drv)
+ platform_driver_unregister(&pwm_lpss_driver_platform);
+}
+module_exit(pwm_exit);
MODULE_DESCRIPTION("PWM driver for Intel LPSS");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 9475bc7a6f97..4f1bb4e0a426 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -147,6 +147,7 @@ static int mxs_pwm_probe(struct platform_device *pdev)
mxs->chip.dev = &pdev->dev;
mxs->chip.ops = &mxs_pwm_ops;
mxs->chip.base = -1;
+ mxs->chip.can_sleep = true;
ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret);
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index cd356d870244..0b312ec420b6 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -179,10 +179,8 @@ static int pwm_probe(struct platform_device *pdev)
return -EINVAL;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
- if (pwm == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (pwm == NULL)
return -ENOMEM;
- }
pwm->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pwm->clk))
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index aff6ba9b49e7..3b71b42e89d5 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -21,13 +21,14 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/platform_data/pwm-renesas-tpu.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#define TPU_CHANNEL_MAX 4
+
#define TPU_TSTR 0x00 /* Timer start register (shared) */
#define TPU_TCRn 0x00 /* Timer control register */
@@ -87,7 +88,6 @@ struct tpu_pwm_device {
struct tpu_device {
struct platform_device *pdev;
- enum pwm_polarity polarities[TPU_CHANNEL_MAX];
struct pwm_chip chip;
spinlock_t lock;
@@ -229,7 +229,7 @@ static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *_pwm)
pwm->tpu = tpu;
pwm->channel = _pwm->hwpwm;
- pwm->polarity = tpu->polarities[pwm->channel];
+ pwm->polarity = PWM_POLARITY_NORMAL;
pwm->prescaler = 0;
pwm->period = 0;
pwm->duty = 0;
@@ -388,16 +388,6 @@ static const struct pwm_ops tpu_pwm_ops = {
* Probe and remove
*/
-static void tpu_parse_pdata(struct tpu_device *tpu)
-{
- struct tpu_pwm_platform_data *pdata = tpu->pdev->dev.platform_data;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(tpu->polarities); ++i)
- tpu->polarities[i] = pdata ? pdata->channels[i].polarity
- : PWM_POLARITY_NORMAL;
-}
-
static int tpu_probe(struct platform_device *pdev)
{
struct tpu_device *tpu;
@@ -405,17 +395,12 @@ static int tpu_probe(struct platform_device *pdev)
int ret;
tpu = devm_kzalloc(&pdev->dev, sizeof(*tpu), GFP_KERNEL);
- if (tpu == NULL) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
+ if (tpu == NULL)
return -ENOMEM;
- }
spin_lock_init(&tpu->lock);
tpu->pdev = pdev;
- /* Initialize device configuration from platform data. */
- tpu_parse_pdata(tpu);
-
/* Map memory, get clock and pin control. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tpu->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index d66529a995a1..ba6b650cf8dc 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -335,9 +335,6 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
writel(tcnt, our_chip->base + REG_TCNTB(pwm->hwpwm));
writel(tcmp, our_chip->base + REG_TCMPB(pwm->hwpwm));
- if (test_bit(PWMF_ENABLED, &pwm->flags))
- pwm_samsung_enable(chip, pwm);
-
chan->period_ns = period_ns;
chan->tin_ns = tin_ns;
chan->duty_ns = duty_ns;
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index cb2d4f0f9711..6fd93e6a4122 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -179,10 +179,8 @@ static int spear_pwm_probe(struct platform_device *pdev)
u32 val;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!pc)
return -ENOMEM;
- }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
@@ -222,7 +220,7 @@ static int spear_pwm_probe(struct platform_device *pdev)
}
ret = pwmchip_add(&pc->chip);
- if (!ret) {
+ if (ret < 0) {
clk_unprepare(pc->clk);
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
}
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 74298c561c4e..61d86b9498ca 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -173,10 +173,8 @@ static int tegra_pwm_probe(struct platform_device *pdev)
int ret;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
- if (!pwm) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!pwm)
return -ENOMEM;
- }
pwm->dev = &pdev->dev;
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 032092c7a6ae..74efbe7f20c3 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -209,10 +209,8 @@ static int ecap_pwm_probe(struct platform_device *pdev)
u16 status;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!pc)
return -ENOMEM;
- }
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index aee4471424d1..cb75133085a8 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -138,12 +138,12 @@ static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
return container_of(chip, struct ehrpwm_pwm_chip, chip);
}
-static u16 ehrpwm_read(void __iomem *base, int offset)
+static inline u16 ehrpwm_read(void __iomem *base, int offset)
{
return readw(base + offset);
}
-static void ehrpwm_write(void __iomem *base, int offset, unsigned int val)
+static inline void ehrpwm_write(void __iomem *base, int offset, unsigned int val)
{
writew(val & 0xFFFF, base + offset);
}
@@ -440,10 +440,8 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
u16 status;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!pc)
return -ENOMEM;
- }
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
@@ -531,6 +529,7 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+#ifdef CONFIG_PM_SLEEP
static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
{
pm_runtime_get_sync(pc->chip.dev);
@@ -557,7 +556,6 @@ static void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
ehrpwm_write(pc->mmio_base, TBCTL, pc->ctx.tbctl);
}
-#ifdef CONFIG_PM_SLEEP
static int ehrpwm_pwm_suspend(struct device *dev)
{
struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index b99a50e626a6..04f76725d591 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -265,14 +265,6 @@ static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read TOGGLE3\n", pwm->label);
- goto out;
- }
-
- val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
-
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
- if (ret < 0) {
dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
goto out;
}
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 323125abf3f4..652e6b5b859b 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -211,10 +211,8 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
}
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
- if (chip == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (chip == NULL)
return -ENOMEM;
- }
chip->chip.dev = &pdev->dev;
chip->chip.ops = &vt8500_pwm_ops;
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 85585219ce82..ad9e0c9b7daf 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -433,6 +433,7 @@ static struct regulator_ops as3722_ldo3_extcntrl_ops = {
};
static const struct regulator_linear_range as3722_ldo_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
};
@@ -609,6 +610,7 @@ static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
}
static const struct regulator_linear_range as3722_sd2345_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000),
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 57544e254a78..58ece59367ae 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -119,6 +119,10 @@ static const unsigned int ldo_c_table[] = {
2900000, 3000000, 3300000,
};
+static const unsigned int ldo_vbus[] = {
+ 5000000,
+};
+
/* DCDC group CSR: supported voltages in microvolts */
static const struct regulator_linear_range dcdc_csr_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
@@ -192,6 +196,7 @@ static struct bcm590xx_info bcm590xx_regs[] = {
BCM590XX_REG_TABLE(gpldo4, ldo_a_table),
BCM590XX_REG_TABLE(gpldo5, ldo_a_table),
BCM590XX_REG_TABLE(gpldo6, ldo_a_table),
+ BCM590XX_REG_TABLE(vbus, ldo_vbus),
};
struct bcm590xx_reg {
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 110a99ee1162..c8105182b8b8 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -255,7 +255,7 @@ static int ltc3589_parse_regulators_dt(struct ltc3589 *ltc3589)
struct device_node *node;
int i, ret;
- node = of_find_node_by_name(dev->of_node, "regulators");
+ node = of_get_child_by_name(dev->of_node, "regulators");
if (!node) {
dev_err(dev, "regulators node not found\n");
return -EINVAL;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 864ed02ce4b7..93b4ad842901 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -37,12 +37,14 @@ struct regs_info {
};
static const struct regulator_linear_range smps_low_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(500000, 0x1, 0x6, 0),
REGULATOR_LINEAR_RANGE(510000, 0x7, 0x79, 10000),
REGULATOR_LINEAR_RANGE(1650000, 0x7A, 0x7f, 0),
};
static const struct regulator_linear_range smps_high_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1000000, 0x1, 0x6, 0),
REGULATOR_LINEAR_RANGE(1020000, 0x7, 0x79, 20000),
REGULATOR_LINEAR_RANGE(3300000, 0x7A, 0x7f, 0),
@@ -323,6 +325,10 @@ static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
if (rail_enable)
palmas_smps_write(pmic->palmas,
palmas_regs_info[id].ctrl_addr, reg);
+
+ /* Switch the enable value to ensure this is used for enable */
+ pmic->desc[id].enable_val = pmic->current_reg_mode[id];
+
return 0;
}
@@ -962,6 +968,14 @@ static int palmas_regulators_probe(struct platform_device *pdev)
return ret;
pmic->current_reg_mode[id] = reg &
PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+
+ pmic->desc[id].enable_reg =
+ PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+ palmas_regs_info[id].ctrl_addr);
+ pmic->desc[id].enable_mask =
+ PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+ /* set_mode overrides this value */
+ pmic->desc[id].enable_val = SMPS_CTRL_MODE_ON;
}
pmic->desc[id].type = REGULATOR_VOLTAGE;
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 69b4b7750410..9effe48c605e 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -209,7 +209,7 @@ static const struct regulator_desc regulators[] = {
1, -1, -1, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0, 0),
TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64,
- TPS65218_REG_CONTROL_DCDC4,
+ TPS65218_REG_CONTROL_LDO1,
TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges,
2, 0),
@@ -240,6 +240,7 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
config.init_data = init_data;
config.driver_data = tps;
config.regmap = tps->regmap;
+ config.of_node = pdev->dev.of_node;
rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index f53e78b9a84e..6ff95b045984 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -266,11 +266,11 @@ static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(min_microvolts, 0666, show_min_uV, set_min_uV);
-static DEVICE_ATTR(max_microvolts, 0666, show_max_uV, set_max_uV);
-static DEVICE_ATTR(min_microamps, 0666, show_min_uA, set_min_uA);
-static DEVICE_ATTR(max_microamps, 0666, show_max_uA, set_max_uA);
-static DEVICE_ATTR(mode, 0666, show_mode, set_mode);
+static DEVICE_ATTR(min_microvolts, 0664, show_min_uV, set_min_uV);
+static DEVICE_ATTR(max_microvolts, 0664, show_max_uV, set_max_uV);
+static DEVICE_ATTR(min_microamps, 0664, show_min_uA, set_min_uA);
+static DEVICE_ATTR(max_microamps, 0664, show_max_uA, set_max_uA);
+static DEVICE_ATTR(mode, 0664, show_mode, set_mode);
static struct attribute *regulator_virtual_attributes[] = {
&dev_attr_min_microvolts.attr,
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index ce1743d0b679..5e343bab9458 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -44,7 +44,7 @@ config STE_MODEM_RPROC
config DA8XX_REMOTEPROC
tristate "DA8xx/OMAP-L13x remoteproc support"
depends on ARCH_DAVINCI_DA8XX
- select CMA
+ select CMA if MMU
select REMOTEPROC
select RPMSG
help
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 71988b69eca6..0754f5c7cb3b 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -530,11 +530,11 @@ config RTC_DRV_RV3029C2
will be called rtc-rv3029c2.
config RTC_DRV_S5M
- tristate "Samsung S5M series"
+ tristate "Samsung S2M/S5M series"
depends on MFD_SEC_CORE
help
If you say yes here you will get support for the
- RTC of Samsung S5M PMIC series.
+ RTC of Samsung S2MPS14 and S5M PMIC series.
This driver can also be built as a module. If so, the module
will be called rtc-s5m.
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index 1ecfe3bd92ac..1cff2a21db67 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -71,7 +71,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
{
unsigned int tmp;
- dev_debug(dev, "%s: pie=%d\n", __func__, enabled);
+ dev_dbg(dev, "%s: pie=%d\n", __func__, enabled);
spin_lock_irq(&puv3_rtc_pie_lock);
tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
@@ -140,7 +140,7 @@ static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
rtc_tm_to_time(tm, &rtcalarm_count);
writel(rtcalarm_count, RTC_RTAR);
- puv3_rtc_setaie(&dev->dev, alrm->enabled);
+ puv3_rtc_setaie(dev, alrm->enabled);
if (alrm->enabled)
enable_irq_wake(puv3_rtc_alarmno);
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 8ec2d6a1dbe1..8f06250a0389 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ * Copyright (c) 2013-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
*
* Copyright (C) 2013 Google, Inc
@@ -17,27 +17,76 @@
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/bcd.h>
-#include <linux/bitops.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
-#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/irq.h>
#include <linux/mfd/samsung/rtc.h>
+#include <linux/mfd/samsung/s2mps14.h>
/*
* Maximum number of retries for checking changes in UDR field
- * of SEC_RTC_UDR_CON register (to limit possible endless loop).
+ * of S5M_RTC_UDR_CON register (to limit possible endless loop).
*
* After writing to RTC registers (setting time or alarm) read the UDR field
- * in SEC_RTC_UDR_CON register. UDR is auto-cleared when data have
+ * in S5M_RTC_UDR_CON register. UDR is auto-cleared when data have
* been transferred.
*/
#define UDR_READ_RETRY_CNT 5
+/* Registers used by the driver which are different between chipsets. */
+struct s5m_rtc_reg_config {
+ /* Number of registers used for setting time/alarm0/alarm1 */
+ unsigned int regs_count;
+ /* First register for time, seconds */
+ unsigned int time;
+ /* RTC control register */
+ unsigned int ctrl;
+ /* First register for alarm 0, seconds */
+ unsigned int alarm0;
+ /* First register for alarm 1, seconds */
+ unsigned int alarm1;
+ /* SMPL/WTSR register */
+ unsigned int smpl_wtsr;
+ /*
+ * Register for update flag (UDR). Typically setting UDR field to 1
+ * will enable update of time or alarm register. Then it will be
+ * auto-cleared after successful update.
+ */
+ unsigned int rtc_udr_update;
+ /* Mask for UDR field in 'rtc_udr_update' register */
+ unsigned int rtc_udr_mask;
+};
+
+/* Register map for S5M8763 and S5M8767 */
+static const struct s5m_rtc_reg_config s5m_rtc_regs = {
+ .regs_count = 8,
+ .time = S5M_RTC_SEC,
+ .ctrl = S5M_ALARM1_CONF,
+ .alarm0 = S5M_ALARM0_SEC,
+ .alarm1 = S5M_ALARM1_SEC,
+ .smpl_wtsr = S5M_WTSR_SMPL_CNTL,
+ .rtc_udr_update = S5M_RTC_UDR_CON,
+ .rtc_udr_mask = S5M_RTC_UDR_MASK,
+};
+
+/*
+ * Register map for S2MPS14.
+ * It may be also suitable for S2MPS11 but this was not tested.
+ */
+static const struct s5m_rtc_reg_config s2mps_rtc_regs = {
+ .regs_count = 7,
+ .time = S2MPS_RTC_SEC,
+ .ctrl = S2MPS_RTC_CTRL,
+ .alarm0 = S2MPS_ALARM0_SEC,
+ .alarm1 = S2MPS_ALARM1_SEC,
+ .smpl_wtsr = S2MPS_WTSR_SMPL_CNTL,
+ .rtc_udr_update = S2MPS_RTC_UDR_CON,
+ .rtc_udr_mask = S2MPS_RTC_WUDR_MASK,
+};
+
struct s5m_rtc_info {
struct device *dev;
struct i2c_client *i2c;
@@ -48,13 +97,14 @@ struct s5m_rtc_info {
int device_type;
int rtc_24hr_mode;
bool wtsr_smpl;
+ const struct s5m_rtc_reg_config *regs;
};
static const struct regmap_config s5m_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = SEC_RTC_REG_MAX,
+ .max_register = S5M_RTC_REG_MAX,
};
static const struct regmap_config s2mps14_rtc_regmap_config = {
@@ -119,8 +169,9 @@ static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
unsigned int data;
do {
- ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
- } while (--retry && (data & RTC_UDR_MASK) && !ret);
+ ret = regmap_read(info->regmap, info->regs->rtc_udr_update,
+ &data);
+ } while (--retry && (data & info->regs->rtc_udr_mask) && !ret);
if (!retry)
dev_err(info->dev, "waiting for UDR update, reached max number of retries\n");
@@ -128,21 +179,53 @@ static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
return ret;
}
+static inline int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
+ struct rtc_wkalrm *alarm)
+{
+ int ret;
+ unsigned int val;
+
+ switch (info->device_type) {
+ case S5M8767X:
+ case S5M8763X:
+ ret = regmap_read(info->regmap, S5M_RTC_STATUS, &val);
+ val &= S5M_ALARM0_STATUS;
+ break;
+ case S2MPS14X:
+ ret = regmap_read(info->s5m87xx->regmap_pmic, S2MPS14_REG_ST2,
+ &val);
+ val &= S2MPS_ALARM0_STATUS;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ alarm->pending = 1;
+ else
+ alarm->pending = 0;
+
+ return 0;
+}
+
static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
{
int ret;
unsigned int data;
- ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
+ ret = regmap_read(info->regmap, info->regs->rtc_udr_update, &data);
if (ret < 0) {
dev_err(info->dev, "failed to read update reg(%d)\n", ret);
return ret;
}
- data |= RTC_TIME_EN_MASK;
- data |= RTC_UDR_MASK;
+ data |= info->regs->rtc_udr_mask;
+ if (info->device_type == S5M8763X || info->device_type == S5M8767X)
+ data |= S5M_RTC_TIME_EN_MASK;
- ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
+ ret = regmap_write(info->regmap, info->regs->rtc_udr_update, data);
if (ret < 0) {
dev_err(info->dev, "failed to write update reg(%d)\n", ret);
return ret;
@@ -158,17 +241,27 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
int ret;
unsigned int data;
- ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
+ ret = regmap_read(info->regmap, info->regs->rtc_udr_update, &data);
if (ret < 0) {
dev_err(info->dev, "%s: fail to read update reg(%d)\n",
__func__, ret);
return ret;
}
- data &= ~RTC_TIME_EN_MASK;
- data |= RTC_UDR_MASK;
+ data |= info->regs->rtc_udr_mask;
+ switch (info->device_type) {
+ case S5M8763X:
+ case S5M8767X:
+ data &= ~S5M_RTC_TIME_EN_MASK;
+ break;
+ case S2MPS14X:
+ data |= S2MPS_RTC_RUDR_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
- ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
+ ret = regmap_write(info->regmap, info->regs->rtc_udr_update, data);
if (ret < 0) {
dev_err(info->dev, "%s: fail to write update reg(%d)\n",
__func__, ret);
@@ -215,10 +308,22 @@ static void s5m8763_tm_to_data(struct rtc_time *tm, u8 *data)
static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
- u8 data[8];
+ u8 data[info->regs->regs_count];
int ret;
- ret = regmap_bulk_read(info->regmap, SEC_RTC_SEC, data, 8);
+ if (info->device_type == S2MPS14X) {
+ ret = regmap_update_bits(info->regmap,
+ info->regs->rtc_udr_update,
+ S2MPS_RTC_RUDR_MASK, S2MPS_RTC_RUDR_MASK);
+ if (ret) {
+ dev_err(dev,
+ "Failed to prepare registers for time reading: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ ret = regmap_bulk_read(info->regmap, info->regs->time, data,
+ info->regs->regs_count);
if (ret < 0)
return ret;
@@ -228,6 +333,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
break;
case S5M8767X:
+ case S2MPS14X:
s5m8767_data_to_tm(data, tm, info->rtc_24hr_mode);
break;
@@ -245,7 +351,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
- u8 data[8];
+ u8 data[info->regs->regs_count];
int ret = 0;
switch (info->device_type) {
@@ -253,6 +359,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
s5m8763_tm_to_data(tm, data);
break;
case S5M8767X:
+ case S2MPS14X:
ret = s5m8767_tm_to_data(tm, data);
break;
default:
@@ -266,7 +373,8 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
- ret = regmap_raw_write(info->regmap, SEC_RTC_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, info->regs->time, data,
+ info->regs->regs_count);
if (ret < 0)
return ret;
@@ -278,70 +386,60 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
- u8 data[8];
+ u8 data[info->regs->regs_count];
unsigned int val;
int ret, i;
- ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, info->regs->alarm0, data,
+ info->regs->regs_count);
if (ret < 0)
return ret;
switch (info->device_type) {
case S5M8763X:
s5m8763_data_to_tm(data, &alrm->time);
- ret = regmap_read(info->regmap, SEC_ALARM0_CONF, &val);
+ ret = regmap_read(info->regmap, S5M_ALARM0_CONF, &val);
if (ret < 0)
return ret;
alrm->enabled = !!val;
-
- ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
- if (ret < 0)
- return ret;
-
break;
case S5M8767X:
+ case S2MPS14X:
s5m8767_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
- dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
- 1900 + alrm->time.tm_year, 1 + alrm->time.tm_mon,
- alrm->time.tm_mday, alrm->time.tm_hour,
- alrm->time.tm_min, alrm->time.tm_sec,
- alrm->time.tm_wday);
-
alrm->enabled = 0;
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < info->regs->regs_count; i++) {
if (data[i] & ALARM_ENABLE_MASK) {
alrm->enabled = 1;
break;
}
}
-
- alrm->pending = 0;
- ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
- if (ret < 0)
- return ret;
break;
default:
return -EINVAL;
}
- if (val & ALARM0_STATUS)
- alrm->pending = 1;
- else
- alrm->pending = 0;
+ dev_dbg(dev, "%s: %d/%d/%d %d:%d:%d(%d)\n", __func__,
+ 1900 + alrm->time.tm_year, 1 + alrm->time.tm_mon,
+ alrm->time.tm_mday, alrm->time.tm_hour,
+ alrm->time.tm_min, alrm->time.tm_sec,
+ alrm->time.tm_wday);
+
+ ret = s5m_check_peding_alarm_interrupt(info, alrm);
return 0;
}
static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
{
- u8 data[8];
+ u8 data[info->regs->regs_count];
int ret, i;
struct rtc_time tm;
- ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, info->regs->alarm0, data,
+ info->regs->regs_count);
if (ret < 0)
return ret;
@@ -352,14 +450,16 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
switch (info->device_type) {
case S5M8763X:
- ret = regmap_write(info->regmap, SEC_ALARM0_CONF, 0);
+ ret = regmap_write(info->regmap, S5M_ALARM0_CONF, 0);
break;
case S5M8767X:
- for (i = 0; i < 7; i++)
+ case S2MPS14X:
+ for (i = 0; i < info->regs->regs_count; i++)
data[i] &= ~ALARM_ENABLE_MASK;
- ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, info->regs->alarm0, data,
+ info->regs->regs_count);
if (ret < 0)
return ret;
@@ -377,11 +477,12 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
{
int ret;
- u8 data[8];
+ u8 data[info->regs->regs_count];
u8 alarm0_conf;
struct rtc_time tm;
- ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, info->regs->alarm0, data,
+ info->regs->regs_count);
if (ret < 0)
return ret;
@@ -393,10 +494,11 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
switch (info->device_type) {
case S5M8763X:
alarm0_conf = 0x77;
- ret = regmap_write(info->regmap, SEC_ALARM0_CONF, alarm0_conf);
+ ret = regmap_write(info->regmap, S5M_ALARM0_CONF, alarm0_conf);
break;
case S5M8767X:
+ case S2MPS14X:
data[RTC_SEC] |= ALARM_ENABLE_MASK;
data[RTC_MIN] |= ALARM_ENABLE_MASK;
data[RTC_HOUR] |= ALARM_ENABLE_MASK;
@@ -408,7 +510,8 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
if (data[RTC_YEAR1] & 0x7f)
data[RTC_YEAR1] |= ALARM_ENABLE_MASK;
- ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, info->regs->alarm0, data,
+ info->regs->regs_count);
if (ret < 0)
return ret;
ret = s5m8767_rtc_set_alarm_reg(info);
@@ -425,7 +528,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
- u8 data[8];
+ u8 data[info->regs->regs_count];
int ret;
switch (info->device_type) {
@@ -434,6 +537,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
break;
case S5M8767X:
+ case S2MPS14X:
s5m8767_tm_to_data(&alrm->time, data);
break;
@@ -450,7 +554,8 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret < 0)
return ret;
- ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, info->regs->alarm0, data,
+ info->regs->regs_count);
if (ret < 0)
return ret;
@@ -495,7 +600,7 @@ static const struct rtc_class_ops s5m_rtc_ops = {
static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
{
int ret;
- ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
+ ret = regmap_update_bits(info->regmap, info->regs->smpl_wtsr,
WTSR_ENABLE_MASK,
enable ? WTSR_ENABLE_MASK : 0);
if (ret < 0)
@@ -506,7 +611,7 @@ static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable)
{
int ret;
- ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
+ ret = regmap_update_bits(info->regmap, info->regs->smpl_wtsr,
SMPL_ENABLE_MASK,
enable ? SMPL_ENABLE_MASK : 0);
if (ret < 0)
@@ -517,50 +622,41 @@ static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable)
static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
{
u8 data[2];
- unsigned int tp_read;
int ret;
- struct rtc_time tm;
- ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &tp_read);
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to read control reg(%d)\n",
- __func__, ret);
- return ret;
- }
+ switch (info->device_type) {
+ case S5M8763X:
+ case S5M8767X:
+ /* UDR update time. Default of 7.32 ms is too long. */
+ ret = regmap_update_bits(info->regmap, S5M_RTC_UDR_CON,
+ S5M_RTC_UDR_T_MASK, S5M_RTC_UDR_T_450_US);
+ if (ret < 0)
+ dev_err(info->dev, "%s: fail to change UDR time: %d\n",
+ __func__, ret);
- /* Set RTC control register : Binary mode, 24hour mode */
- data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
- data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ /* Set RTC control register : Binary mode, 24hour mode */
+ data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+
+ ret = regmap_raw_write(info->regmap, S5M_ALARM0_CONF, data, 2);
+ break;
+
+ case S2MPS14X:
+ data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
+ break;
+
+ default:
+ return -EINVAL;
+ }
info->rtc_24hr_mode = 1;
- ret = regmap_raw_write(info->regmap, SEC_ALARM0_CONF, data, 2);
if (ret < 0) {
dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
__func__, ret);
return ret;
}
- /* In first boot time, Set rtc time to 1/1/2012 00:00:00(SUN) */
- if ((tp_read & RTC_TCON_MASK) == 0) {
- dev_dbg(info->dev, "rtc init\n");
- tm.tm_sec = 0;
- tm.tm_min = 0;
- tm.tm_hour = 0;
- tm.tm_wday = 0;
- tm.tm_mday = 1;
- tm.tm_mon = 0;
- tm.tm_year = 112;
- tm.tm_yday = 0;
- tm.tm_isdst = 0;
- ret = s5m_rtc_set_time(info->dev, &tm);
- }
-
- ret = regmap_update_bits(info->regmap, SEC_RTC_UDR_CON,
- RTC_TCON_MASK, tp_read | RTC_TCON_MASK);
- if (ret < 0)
- dev_err(info->dev, "%s: fail to update TCON reg(%d)\n",
- __func__, ret);
-
return ret;
}
@@ -570,7 +666,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
struct sec_platform_data *pdata = s5m87xx->pdata;
struct s5m_rtc_info *info;
const struct regmap_config *regmap_cfg;
- int ret;
+ int ret, alarm_irq;
if (!pdata) {
dev_err(pdev->dev.parent, "Platform data not supplied\n");
@@ -584,12 +680,18 @@ static int s5m_rtc_probe(struct platform_device *pdev)
switch (pdata->device_type) {
case S2MPS14X:
regmap_cfg = &s2mps14_rtc_regmap_config;
+ info->regs = &s2mps_rtc_regs;
+ alarm_irq = S2MPS14_IRQ_RTCA0;
break;
case S5M8763X:
regmap_cfg = &s5m_rtc_regmap_config;
+ info->regs = &s5m_rtc_regs;
+ alarm_irq = S5M8763_IRQ_ALARM0;
break;
case S5M8767X:
regmap_cfg = &s5m_rtc_regmap_config;
+ info->regs = &s5m_rtc_regs;
+ alarm_irq = S5M8767_IRQ_RTCA1;
break;
default:
dev_err(&pdev->dev, "Device type is not supported by RTC driver\n");
@@ -615,20 +717,11 @@ static int s5m_rtc_probe(struct platform_device *pdev)
info->device_type = s5m87xx->device_type;
info->wtsr_smpl = s5m87xx->wtsr_smpl;
- switch (pdata->device_type) {
- case S5M8763X:
- info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
- S5M8763_IRQ_ALARM0);
- break;
-
- case S5M8767X:
- info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
- S5M8767_IRQ_RTCA1);
- break;
-
- default:
+ info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq);
+ if (info->irq <= 0) {
ret = -EINVAL;
- dev_err(&pdev->dev, "Unsupported device type: %d\n", ret);
+ dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n",
+ alarm_irq);
goto err;
}
@@ -676,7 +769,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
if (info->wtsr_smpl) {
for (i = 0; i < 3; i++) {
s5m_rtc_enable_wtsr(info, false);
- regmap_read(info->regmap, SEC_WTSR_SMPL_CNTL, &val);
+ regmap_read(info->regmap, info->regs->smpl_wtsr, &val);
pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
if (val & WTSR_ENABLE_MASK)
pr_emerg("%s: fail to disable WTSR\n",
@@ -730,7 +823,8 @@ static int s5m_rtc_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = {
- { "s5m-rtc", 0 },
+ { "s5m-rtc", S5M8767X },
+ { "s2mps14-rtc", S2MPS14X },
};
static struct platform_driver s5m_rtc_driver = {
@@ -749,6 +843,6 @@ module_platform_driver(s5m_rtc_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("Samsung S5M RTC driver");
+MODULE_DESCRIPTION("Samsung S5M/S2MPS14 RTC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:s5m-rtc");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index ee0e85abe1fd..0f471750327e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -593,7 +593,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->start = dcssblk_find_lowest_addr(dev_info);
dev_info->end = dcssblk_find_highest_addr(dev_info);
- dev_set_name(&dev_info->dev, dev_info->segment_name);
+ dev_set_name(&dev_info->dev, "%s", dev_info->segment_name);
dev_info->dev.release = dcssblk_release_segment;
dev_info->dev.groups = dcssblk_dev_attr_groups;
INIT_LIST_HEAD(&dev_info->lh);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 629fcc275e92..78b6ace7edcb 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
-obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index cd9c91909596..b9a9f721716d 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -838,8 +838,6 @@ sclp_vt220_con_init(void)
{
int rc;
- if (!CONSOLE_IS_SCLP)
- return 0;
rc = __sclp_vt220_init(sclp_console_pages);
if (rc)
return rc;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index cf31d3321dab..a8848db7b09d 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -761,7 +761,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (dev) {
- dev_set_name(dev, priv->internal_name);
+ dev_set_name(dev, "%s", priv->internal_name);
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
deleted file mode 100644
index d5eac985976b..000000000000
--- a/drivers/s390/char/vmwatchdog.c
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * Watchdog implementation based on z/VM Watchdog Timer API
- *
- * Copyright IBM Corp. 2004, 2009
- *
- * The user space watchdog daemon can use this driver as
- * /dev/vmwatchdog to have z/VM execute the specified CP
- * command when the timeout expires. The default command is
- * "IPL", which which cause an immediate reboot.
- */
-#define KMSG_COMPONENT "vmwatchdog"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/watchdog.h>
-
-#include <asm/ebcdic.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-#define MAX_CMDLEN 240
-#define MIN_INTERVAL 15
-static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
-static bool vmwdt_conceal;
-
-static bool vmwdt_nowayout = WATCHDOG_NOWAYOUT;
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
-MODULE_DESCRIPTION("z/VM Watchdog Timer");
-module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644);
-MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers");
-module_param_named(conceal, vmwdt_conceal, bool, 0644);
-MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog "
- " is active");
-module_param_named(nowayout, vmwdt_nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
- " (default=CONFIG_WATCHDOG_NOWAYOUT)");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
-static unsigned int vmwdt_interval = 60;
-static unsigned long vmwdt_is_open;
-static int vmwdt_expect_close;
-
-static DEFINE_MUTEX(vmwdt_mutex);
-
-#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */
-#define VMWDT_RUNNING 1 /* The watchdog is armed */
-
-enum vmwdt_func {
- /* function codes */
- wdt_init = 0,
- wdt_change = 1,
- wdt_cancel = 2,
- /* flags */
- wdt_conceal = 0x80000000,
-};
-
-static int __diag288(enum vmwdt_func func, unsigned int timeout,
- char *cmd, size_t len)
-{
- register unsigned long __func asm("2") = func;
- register unsigned long __timeout asm("3") = timeout;
- register unsigned long __cmdp asm("4") = virt_to_phys(cmd);
- register unsigned long __cmdl asm("5") = len;
- int err;
-
- err = -EINVAL;
- asm volatile(
- " diag %1,%3,0x288\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (err) : "d"(__func), "d"(__timeout),
- "d"(__cmdp), "d"(__cmdl) : "1", "cc");
- return err;
-}
-
-static int vmwdt_keepalive(void)
-{
- /* we allocate new memory every time to avoid having
- * to track the state. static allocation is not an
- * option since that might not be contiguous in real
- * storage in case of a modular build */
- static char *ebc_cmd;
- size_t len;
- int ret;
- unsigned int func;
-
- ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
- if (!ebc_cmd)
- return -ENOMEM;
-
- len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN);
- ASCEBC(ebc_cmd, MAX_CMDLEN);
- EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
-
- func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
- set_bit(VMWDT_RUNNING, &vmwdt_is_open);
- ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
- WARN_ON(ret != 0);
- kfree(ebc_cmd);
- return ret;
-}
-
-static int vmwdt_disable(void)
-{
- char cmd[] = {'\0'};
- int ret = __diag288(wdt_cancel, 0, cmd, 0);
- WARN_ON(ret != 0);
- clear_bit(VMWDT_RUNNING, &vmwdt_is_open);
- return ret;
-}
-
-static int __init vmwdt_probe(void)
-{
- /* there is no real way to see if the watchdog is supported,
- * so we try initializing it with a NOP command ("BEGIN")
- * that won't cause any harm even if the following disable
- * fails for some reason */
- char ebc_begin[] = {
- 194, 197, 199, 201, 213
- };
- if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
- return -EINVAL;
- return vmwdt_disable();
-}
-
-static int vmwdt_open(struct inode *i, struct file *f)
-{
- int ret;
- if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
- return -EBUSY;
- ret = vmwdt_keepalive();
- if (ret)
- clear_bit(VMWDT_OPEN, &vmwdt_is_open);
- return ret ? ret : nonseekable_open(i, f);
-}
-
-static int vmwdt_close(struct inode *i, struct file *f)
-{
- if (vmwdt_expect_close == 42)
- vmwdt_disable();
- vmwdt_expect_close = 0;
- clear_bit(VMWDT_OPEN, &vmwdt_is_open);
- return 0;
-}
-
-static struct watchdog_info vmwdt_info = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
- .firmware_version = 0,
- .identity = "z/VM Watchdog Timer",
-};
-
-static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- if (copy_to_user((void __user *)arg, &vmwdt_info,
- sizeof(vmwdt_info)))
- return -EFAULT;
- return 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, (int __user *)arg);
- case WDIOC_GETTEMP:
- return -EINVAL;
- case WDIOC_SETOPTIONS:
- {
- int options, ret;
- if (get_user(options, (int __user *)arg))
- return -EFAULT;
- ret = -EINVAL;
- if (options & WDIOS_DISABLECARD) {
- ret = vmwdt_disable();
- if (ret)
- return ret;
- }
- if (options & WDIOS_ENABLECARD) {
- ret = vmwdt_keepalive();
- }
- return ret;
- }
- case WDIOC_GETTIMEOUT:
- return put_user(vmwdt_interval, (int __user *)arg);
- case WDIOC_SETTIMEOUT:
- {
- int interval;
- if (get_user(interval, (int __user *)arg))
- return -EFAULT;
- if (interval < MIN_INTERVAL)
- return -EINVAL;
- vmwdt_interval = interval;
- }
- return vmwdt_keepalive();
- case WDIOC_KEEPALIVE:
- return vmwdt_keepalive();
- }
- return -EINVAL;
-}
-
-static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- int rc;
-
- mutex_lock(&vmwdt_mutex);
- rc = __vmwdt_ioctl(cmd, arg);
- mutex_unlock(&vmwdt_mutex);
- return (long) rc;
-}
-
-static ssize_t vmwdt_write(struct file *f, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- if(count) {
- if (!vmwdt_nowayout) {
- size_t i;
-
- /* note: just in case someone wrote the magic character
- * five months ago... */
- vmwdt_expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf+i))
- return -EFAULT;
- if (c == 'V')
- vmwdt_expect_close = 42;
- }
- }
- /* someone wrote to us, we should restart timer */
- vmwdt_keepalive();
- }
- return count;
-}
-
-static int vmwdt_resume(void)
-{
- clear_bit(VMWDT_OPEN, &vmwdt_is_open);
- return NOTIFY_DONE;
-}
-
-/*
- * It makes no sense to go into suspend while the watchdog is running.
- * Depending on the memory size, the watchdog might trigger, while we
- * are still saving the memory.
- * We reuse the open flag to ensure that suspend and watchdog open are
- * exclusive operations
- */
-static int vmwdt_suspend(void)
-{
- if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
- pr_err("The system cannot be suspended while the watchdog"
- " is in use\n");
- return notifier_from_errno(-EBUSY);
- }
- if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
- clear_bit(VMWDT_OPEN, &vmwdt_is_open);
- pr_err("The system cannot be suspended while the watchdog"
- " is running\n");
- return notifier_from_errno(-EBUSY);
- }
- return NOTIFY_DONE;
-}
-
-/*
- * This function is called for suspend and resume.
- */
-static int vmwdt_power_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- switch (event) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- return vmwdt_resume();
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- return vmwdt_suspend();
- default:
- return NOTIFY_DONE;
- }
-}
-
-static struct notifier_block vmwdt_power_notifier = {
- .notifier_call = vmwdt_power_event,
-};
-
-static const struct file_operations vmwdt_fops = {
- .open = &vmwdt_open,
- .release = &vmwdt_close,
- .unlocked_ioctl = &vmwdt_ioctl,
- .write = &vmwdt_write,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice vmwdt_dev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &vmwdt_fops,
-};
-
-static int __init vmwdt_init(void)
-{
- int ret;
-
- ret = vmwdt_probe();
- if (ret)
- return ret;
- ret = register_pm_notifier(&vmwdt_power_notifier);
- if (ret)
- return ret;
- /*
- * misc_register() has to be the last action in module_init(), because
- * file operations will be available right after this.
- */
- ret = misc_register(&vmwdt_dev);
- if (ret) {
- unregister_pm_notifier(&vmwdt_power_notifier);
- return ret;
- }
- return 0;
-}
-module_init(vmwdt_init);
-
-static void __exit vmwdt_exit(void)
-{
- unregister_pm_notifier(&vmwdt_power_notifier);
- misc_deregister(&vmwdt_dev);
-}
-module_exit(vmwdt_exit);
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 445564c790f6..00bfbee0af9e 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -196,11 +196,11 @@ EXPORT_SYMBOL(airq_iv_release);
*/
unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
{
- unsigned long bit, i;
+ unsigned long bit, i, flags;
if (!iv->avail || num == 0)
return -1UL;
- spin_lock(&iv->lock);
+ spin_lock_irqsave(&iv->lock, flags);
bit = find_first_bit_inv(iv->avail, iv->bits);
while (bit + num <= iv->bits) {
for (i = 1; i < num; i++)
@@ -218,9 +218,8 @@ unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
}
if (bit + num > iv->bits)
bit = -1UL;
- spin_unlock(&iv->lock);
+ spin_unlock_irqrestore(&iv->lock, flags);
return bit;
-
}
EXPORT_SYMBOL(airq_iv_alloc);
@@ -232,11 +231,11 @@ EXPORT_SYMBOL(airq_iv_alloc);
*/
void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
{
- unsigned long i;
+ unsigned long i, flags;
if (!iv->avail || num == 0)
return;
- spin_lock(&iv->lock);
+ spin_lock_irqsave(&iv->lock, flags);
for (i = 0; i < num; i++) {
/* Clear (possibly left over) interrupt bit */
clear_bit_inv(bit + i, iv->vector);
@@ -248,7 +247,7 @@ void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
iv->end--;
}
- spin_unlock(&iv->lock);
+ spin_unlock_irqrestore(&iv->lock, flags);
}
EXPORT_SYMBOL(airq_iv_free);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index dfd7bc681c25..e443b0d0b236 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -184,7 +184,7 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
const char *buf, size_t count)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- int rc;
+ int rc = 0;
/* Prevent concurrent online/offline processing and ungrouping. */
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
@@ -196,11 +196,12 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
if (device_remove_file_self(dev, attr))
ccwgroup_ungroup(gdev);
+ else
+ rc = -ENODEV;
out:
if (rc) {
- if (rc != -EAGAIN)
- /* Release onoff "lock" when ungrouping failed. */
- atomic_set(&gdev->onoff, 0);
+ /* Release onoff "lock" when ungrouping failed. */
+ atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
@@ -227,6 +228,7 @@ static void ccwgroup_ungroup_workfn(struct work_struct *work)
container_of(work, struct ccwgroup_device, ungroup_work);
ccwgroup_ungroup(gdev);
+ put_device(&gdev->dev);
}
static void ccwgroup_release(struct device *dev)
@@ -412,8 +414,10 @@ static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
{
struct ccwgroup_device *gdev = to_ccwgroupdev(data);
- if (action == BUS_NOTIFY_UNBIND_DRIVER)
+ if (action == BUS_NOTIFY_UNBIND_DRIVER) {
+ get_device(&gdev->dev);
schedule_work(&gdev->ungroup_work);
+ }
return NOTIFY_OK;
}
@@ -582,11 +586,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
__ccwgroup_match_all))) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- mutex_lock(&gdev->reg_mutex);
- __ccwgroup_remove_symlinks(gdev);
- device_unregister(dev);
- __ccwgroup_remove_cdev_refs(gdev);
- mutex_unlock(&gdev->reg_mutex);
+ ccwgroup_ungroup(gdev);
put_device(dev);
}
driver_unregister(&cdriver->driver);
@@ -633,13 +633,7 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
get_device(&gdev->dev);
spin_unlock_irq(cdev->ccwlock);
/* Unregister group device. */
- mutex_lock(&gdev->reg_mutex);
- if (device_is_registered(&gdev->dev)) {
- __ccwgroup_remove_symlinks(gdev);
- device_unregister(&gdev->dev);
- __ccwgroup_remove_cdev_refs(gdev);
- }
- mutex_unlock(&gdev->reg_mutex);
+ ccwgroup_ungroup(gdev);
/* Release ccwgroup device reference for local processing. */
put_device(&gdev->dev);
}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 77f9c92df4b9..2905d8b0ec95 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -602,6 +602,7 @@ void __init init_cio_interrupts(void)
#ifdef CONFIG_CCW_CONSOLE
static struct subchannel *console_sch;
+static struct lock_class_key console_sch_key;
/*
* Use cio_tsch to update the subchannel status and call the interrupt handler
@@ -686,6 +687,7 @@ struct subchannel *cio_probe_console(void)
if (IS_ERR(sch))
return sch;
+ lockdep_set_class(sch->lock, &console_sch_key);
isc_register(CONSOLE_ISC);
sch->config.isc = CONSOLE_ISC;
sch->config.intparm = (u32)(addr_t)sch;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d8d9b5b5cc56..dfef5e63cb7b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -678,18 +678,11 @@ static const struct attribute_group *ccwdev_attr_groups[] = {
NULL,
};
-/* this is a simple abstraction for device_register that sets the
- * correct bus type and adds the bus specific files */
-static int ccw_device_register(struct ccw_device *cdev)
+static int ccw_device_add(struct ccw_device *cdev)
{
struct device *dev = &cdev->dev;
- int ret;
dev->bus = &ccw_bus_type;
- ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- if (ret)
- return ret;
return device_add(dev);
}
@@ -764,22 +757,46 @@ static void ccw_device_todo(struct work_struct *work);
static int io_subchannel_initialize_dev(struct subchannel *sch,
struct ccw_device *cdev)
{
- cdev->private->cdev = cdev;
- cdev->private->int_class = IRQIO_CIO;
- atomic_set(&cdev->private->onoff, 0);
+ struct ccw_device_private *priv = cdev->private;
+ int ret;
+
+ priv->cdev = cdev;
+ priv->int_class = IRQIO_CIO;
+ priv->state = DEV_STATE_NOT_OPER;
+ priv->dev_id.devno = sch->schib.pmcw.dev;
+ priv->dev_id.ssid = sch->schid.ssid;
+ priv->schid = sch->schid;
+
+ INIT_WORK(&priv->todo_work, ccw_device_todo);
+ INIT_LIST_HEAD(&priv->cmb_list);
+ init_waitqueue_head(&priv->wait_q);
+ init_timer(&priv->timer);
+
+ atomic_set(&priv->onoff, 0);
+ cdev->ccwlock = sch->lock;
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
- INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
cdev->dev.groups = ccwdev_attr_groups;
/* Do first half of device_register. */
device_initialize(&cdev->dev);
+ ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret)
+ goto out_put;
if (!get_device(&sch->dev)) {
- /* Release reference from device_initialize(). */
- put_device(&cdev->dev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put;
}
- cdev->private->flags.initialized = 1;
+ priv->flags.initialized = 1;
+ spin_lock_irq(sch->lock);
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
return 0;
+
+out_put:
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ return ret;
}
static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
@@ -858,7 +875,7 @@ static void io_subchannel_register(struct ccw_device *cdev)
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
/* make it known to the system */
- ret = ccw_device_register(cdev);
+ ret = ccw_device_add(cdev);
if (ret) {
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
cdev->private->dev_id.ssid,
@@ -923,26 +940,11 @@ io_subchannel_recog_done(struct ccw_device *cdev)
static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
{
- struct ccw_device_private *priv;
-
- cdev->ccwlock = sch->lock;
-
- /* Init private data. */
- priv = cdev->private;
- priv->dev_id.devno = sch->schib.pmcw.dev;
- priv->dev_id.ssid = sch->schid.ssid;
- priv->schid = sch->schid;
- priv->state = DEV_STATE_NOT_OPER;
- INIT_LIST_HEAD(&priv->cmb_list);
- init_waitqueue_head(&priv->wait_q);
- init_timer(&priv->timer);
-
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */
spin_lock_irq(sch->lock);
- sch_set_cdev(sch, cdev);
ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
}
@@ -1083,7 +1085,7 @@ static int io_subchannel_probe(struct subchannel *sch)
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
cdev = sch_get_cdev(sch);
- rc = ccw_device_register(cdev);
+ rc = ccw_device_add(cdev);
if (rc) {
/* Release online reference. */
put_device(&cdev->dev);
@@ -1597,7 +1599,6 @@ int __init ccw_device_enable_console(struct ccw_device *cdev)
if (rc)
return rc;
sch->driver = &io_subchannel_driver;
- sch_set_cdev(sch, cdev);
io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
@@ -1639,6 +1640,7 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
put_device(&sch->dev);
return ERR_PTR(-ENOMEM);
}
+ set_io_private(sch, io_priv);
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev)) {
put_device(&sch->dev);
@@ -1646,7 +1648,6 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
return cdev;
}
cdev->drv = drv;
- set_io_private(sch, io_priv);
ccw_device_set_int_class(cdev);
return cdev;
}
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 4221b02085ad..f1f3baa8e6e4 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -7,6 +7,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/export.h>
+#include <linux/slab.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
@@ -16,11 +17,51 @@ debug_info_t *qdio_dbf_error;
static struct dentry *debugfs_root;
#define QDIO_DEBUGFS_NAME_LEN 10
+#define QDIO_DBF_NAME_LEN 20
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+struct qdio_dbf_entry {
+ char dbf_name[QDIO_DBF_NAME_LEN];
+ debug_info_t *dbf_info;
+ struct list_head dbf_list;
+};
+
+static LIST_HEAD(qdio_dbf_list);
+static DEFINE_MUTEX(qdio_dbf_list_mutex);
+
+static debug_info_t *qdio_get_dbf_entry(char *name)
+{
+ struct qdio_dbf_entry *entry;
+ debug_info_t *rc = NULL;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
+ if (strcmp(entry->dbf_name, name) == 0) {
+ rc = entry->dbf_info;
+ break;
+ }
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+ return rc;
+}
+
+static void qdio_clear_dbf_list(void)
+{
+ struct qdio_dbf_entry *entry, *tmp;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
+ list_del(&entry->dbf_list);
+ debug_unregister(entry->dbf_info);
+ kfree(entry);
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+}
+
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr)
{
- char text[20];
+ char text[QDIO_DBF_NAME_LEN];
+ struct qdio_dbf_entry *new_entry;
DBF_EVENT("qfmt:%1d", init_data->q_format);
DBF_HEX(init_data->adapter_name, 8);
@@ -38,11 +79,34 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data,
DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
/* allocate trace view for the interface */
- snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev));
- irq_ptr->debug_area = debug_register(text, 2, 1, 16);
- debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view);
- debug_set_level(irq_ptr->debug_area, DBF_WARN);
- DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+ snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
+ dev_name(&init_data->cdev->dev));
+ irq_ptr->debug_area = qdio_get_dbf_entry(text);
+ if (irq_ptr->debug_area)
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
+ else {
+ irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+ if (!irq_ptr->debug_area)
+ return -ENOMEM;
+ if (debug_register_view(irq_ptr->debug_area,
+ &debug_hex_ascii_view)) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ debug_set_level(irq_ptr->debug_area, DBF_WARN);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+ new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
+ if (!new_entry) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+ new_entry->dbf_info = irq_ptr->debug_area;
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_add(&new_entry->dbf_list, &qdio_dbf_list);
+ mutex_unlock(&qdio_dbf_list_mutex);
+ }
+ return 0;
}
static int qstat_show(struct seq_file *m, void *v)
@@ -300,6 +364,7 @@ int __init qdio_debug_init(void)
void qdio_debug_exit(void)
{
+ qdio_clear_dbf_list();
debugfs_remove(debugfs_root);
if (qdio_dbf_setup)
debug_unregister(qdio_dbf_setup);
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index dfac9bfefea3..f33ce8577619 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -75,7 +75,7 @@ static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
}
}
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr);
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 77466c4faabb..848e3b64ea6e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -409,17 +409,16 @@ static inline void qdio_stop_polling(struct qdio_q *q)
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
}
-static inline void account_sbals(struct qdio_q *q, int count)
+static inline void account_sbals(struct qdio_q *q, unsigned int count)
{
- int pos = 0;
+ int pos;
q->q_stats.nr_sbal_total += count;
if (count == QDIO_MAX_BUFFERS_MASK) {
q->q_stats.nr_sbals[7]++;
return;
}
- while (count >>= 1)
- pos++;
+ pos = ilog2(count);
q->q_stats.nr_sbals[pos]++;
}
@@ -1234,12 +1233,10 @@ int qdio_free(struct ccw_device *cdev)
return -ENODEV;
DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
mutex_lock(&irq_ptr->setup_mutex);
- if (irq_ptr->debug_area != NULL) {
- debug_unregister(irq_ptr->debug_area);
- irq_ptr->debug_area = NULL;
- }
+ irq_ptr->debug_area = NULL;
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1276,7 +1273,8 @@ int qdio_allocate(struct qdio_initialize *init_data)
goto out_err;
mutex_init(&irq_ptr->setup_mutex);
- qdio_allocate_dbf(init_data, irq_ptr);
+ if (qdio_allocate_dbf(init_data, irq_ptr))
+ goto out_rel;
/*
* Allocate a page for the chsc calls in qdio_establish.
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8eec1653c9cc..69ef4f8cfac8 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -77,12 +77,12 @@ MODULE_ALIAS("z90crypt");
* Module parameter
*/
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
-module_param_named(domain, ap_domain_index, int, 0000);
+module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(domain, "domain index for ap devices");
EXPORT_SYMBOL(ap_domain_index);
static int ap_thread_flag = 0;
-module_param_named(poll_thread, ap_thread_flag, int, 0000);
+module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
static struct device *ap_root_device = NULL;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 5222ebe15705..0e18c5dcd91f 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -356,7 +356,7 @@ struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
zops = __ops_lookup(name, variant);
if (!zops) {
- request_module(name);
+ request_module("%s", name);
zops = __ops_lookup(name, variant);
}
if ((!zops) || (!try_module_get(zops->owner)))
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 1e1fc671f89a..d2c0b442bce5 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/kvm_para.h>
+#include <linux/notifier.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/cio.h>
@@ -62,6 +63,7 @@ struct virtio_ccw_device {
struct vq_config_block *config_block;
bool is_thinint;
bool going_away;
+ bool device_lost;
void *airq_info;
};
@@ -1010,11 +1012,14 @@ static void virtio_ccw_remove(struct ccw_device *cdev)
unsigned long flags;
struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
- if (vcdev && cdev->online)
+ if (vcdev && cdev->online) {
+ if (vcdev->device_lost)
+ virtio_break_device(&vcdev->vdev);
unregister_virtio_device(&vcdev->vdev);
- spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
- dev_set_drvdata(&cdev->dev, NULL);
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ }
cdev->handler = NULL;
}
@@ -1023,12 +1028,14 @@ static int virtio_ccw_offline(struct ccw_device *cdev)
unsigned long flags;
struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
- if (vcdev) {
- unregister_virtio_device(&vcdev->vdev);
- spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
- dev_set_drvdata(&cdev->dev, NULL);
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
- }
+ if (!vcdev)
+ return 0;
+ if (vcdev->device_lost)
+ virtio_break_device(&vcdev->vdev);
+ unregister_virtio_device(&vcdev->vdev);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return 0;
}
@@ -1096,8 +1103,26 @@ out_free:
static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
{
- /* TODO: Check whether we need special handling here. */
- return 0;
+ int rc;
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+ /*
+ * Make sure vcdev is set
+ * i.e. set_offline/remove callback not already running
+ */
+ if (!vcdev)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case CIO_GONE:
+ vcdev->device_lost = true;
+ rc = NOTIFY_DONE;
+ break;
+ default:
+ rc = NOTIFY_DONE;
+ break;
+ }
+ return rc;
}
static struct ccw_device_id virtio_ids[] = {
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index fd7b3bd80789..d837c3c5330f 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3348,7 +3348,7 @@ static int __init claw_init(void)
}
CLAW_DBF_TEXT(2, setup, "init_mod");
claw_root_dev = root_device_register("claw");
- ret = PTR_RET(claw_root_dev);
+ ret = PTR_ERR_OR_ZERO(claw_root_dev);
if (ret)
goto register_err;
ret = ccw_driver_register(&claw_ccw_driver);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 70b3a023100e..03b6ad035577 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1837,7 +1837,7 @@ static int __init ctcm_init(void)
if (ret)
goto out_err;
ctcm_root_dev = root_device_register("ctcm");
- ret = PTR_RET(ctcm_root_dev);
+ ret = PTR_ERR_OR_ZERO(ctcm_root_dev);
if (ret)
goto register_err;
ret = ccw_driver_register(&ctcm_ccw_driver);
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 985b5dcbdac8..6bcfbbb20f04 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -34,8 +34,9 @@ static ssize_t ctcm_buffer_write(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct net_device *ndev;
- int bs1;
+ unsigned int bs1;
struct ctcm_priv *priv = dev_get_drvdata(dev);
+ int rc;
ndev = priv->channel[CTCM_READ]->netdev;
if (!(priv && priv->channel[CTCM_READ] && ndev)) {
@@ -43,7 +44,9 @@ static ssize_t ctcm_buffer_write(struct device *dev,
return -ENODEV;
}
- sscanf(buf, "%u", &bs1);
+ rc = sscanf(buf, "%u", &bs1);
+ if (rc != 1)
+ goto einval;
if (bs1 > CTCM_BUFSIZE_LIMIT)
goto einval;
if (bs1 < (576 + LL_HEADER_LENGTH + 2))
@@ -143,13 +146,14 @@ static ssize_t ctcm_proto_show(struct device *dev,
static ssize_t ctcm_proto_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- int value;
+ int value, rc;
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
- sscanf(buf, "%u", &value);
- if (!((value == CTCM_PROTO_S390) ||
+ rc = sscanf(buf, "%d", &value);
+ if ((rc != 1) ||
+ !((value == CTCM_PROTO_S390) ||
(value == CTCM_PROTO_LINUX) ||
(value == CTCM_PROTO_MPC) ||
(value == CTCM_PROTO_OS390)))
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c461f2aac610..0a7d87c372b8 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1943,14 +1943,16 @@ static ssize_t
lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct lcs_card *card;
- int value;
+ int value, rc;
card = dev_get_drvdata(dev);
if (!card)
return 0;
- sscanf(buf, "%u", &value);
+ rc = sscanf(buf, "%d", &value);
+ if (rc != 1)
+ return -EINVAL;
/* TODO: sanity checks */
card->portno = value;
@@ -1997,14 +1999,17 @@ static ssize_t
lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct lcs_card *card;
- int value;
+ unsigned int value;
+ int rc;
card = dev_get_drvdata(dev);
if (!card)
return 0;
- sscanf(buf, "%u", &value);
+ rc = sscanf(buf, "%u", &value);
+ if (rc != 1)
+ return -EINVAL;
/* TODO: sanity checks */
card->lancmd_timeout = value;
@@ -2442,7 +2447,7 @@ __init lcs_init_module(void)
if (rc)
goto out_err;
lcs_root_dev = root_device_register("lcs");
- rc = PTR_RET(lcs_root_dev);
+ rc = PTR_ERR_OR_ZERO(lcs_root_dev);
if (rc)
goto register_err;
rc = ccw_driver_register(&lcs_ccw_driver);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 5333b2c018e7..a2088af51cc5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -268,10 +268,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_NO_PRIO_QUEUEING 0
#define QETH_PRIO_Q_ING_PREC 1
#define QETH_PRIO_Q_ING_TOS 2
-#define IP_TOS_LOWDELAY 0x10
-#define IP_TOS_HIGHTHROUGHPUT 0x08
-#define IP_TOS_HIGHRELIABILITY 0x04
-#define IP_TOS_NOTIMPORTANT 0x02
+#define QETH_PRIO_Q_ING_SKB 3
+#define QETH_PRIO_Q_ING_VLAN 4
/* Packing */
#define QETH_LOW_WATERMARK_PACK 2
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e89f38c31176..f54bec54d677 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -20,6 +20,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
#include <net/iucv/af_iucv.h>
+#include <net/dsfield.h>
#include <asm/ebcdic.h>
#include <asm/chpid.h>
@@ -1013,7 +1014,7 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
card = CARD_FROM_CDEV(cdev);
- if (!IS_ERR(irb))
+ if (!card || !IS_ERR(irb))
return 0;
switch (PTR_ERR(irb)) {
@@ -1029,7 +1030,7 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
if (intparm == QETH_RCD_PARM) {
- if (card && (card->data.ccwdev == cdev)) {
+ if (card->data.ccwdev == cdev) {
card->data.state = CH_STATE_DOWN;
wake_up(&card->wait_q);
}
@@ -3662,42 +3663,56 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
+/**
+ * Note: Function assumes that we have 4 outbound queues.
+ */
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
- if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSX))
- return card->qdio.default_out_queue;
- switch (card->qdio.no_out_queues) {
- case 4:
- if (cast_type && card->info.is_multicast_different)
- return card->info.is_multicast_different &
- (card->qdio.no_out_queues - 1);
- if (card->qdio.do_prio_queueing && (ipv == 4)) {
- const u8 tos = ip_hdr(skb)->tos;
-
- if (card->qdio.do_prio_queueing ==
- QETH_PRIO_Q_ING_TOS) {
- if (tos & IP_TOS_NOTIMPORTANT)
- return 3;
- if (tos & IP_TOS_HIGHRELIABILITY)
- return 2;
- if (tos & IP_TOS_HIGHTHROUGHPUT)
- return 1;
- if (tos & IP_TOS_LOWDELAY)
- return 0;
- }
- if (card->qdio.do_prio_queueing ==
- QETH_PRIO_Q_ING_PREC)
- return 3 - (tos >> 6);
- } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
- /* TODO: IPv6!!! */
+ __be16 *tci;
+ u8 tos;
+
+ if (cast_type && card->info.is_multicast_different)
+ return card->info.is_multicast_different &
+ (card->qdio.no_out_queues - 1);
+
+ switch (card->qdio.do_prio_queueing) {
+ case QETH_PRIO_Q_ING_TOS:
+ case QETH_PRIO_Q_ING_PREC:
+ switch (ipv) {
+ case 4:
+ tos = ipv4_get_dsfield(ip_hdr(skb));
+ break;
+ case 6:
+ tos = ipv6_get_dsfield(ipv6_hdr(skb));
+ break;
+ default:
+ return card->qdio.default_out_queue;
}
- return card->qdio.default_out_queue;
- case 1: /* fallthrough for single-out-queue 1920-device */
+ if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
+ return ~tos >> 6 & 3;
+ if (tos & IPTOS_MINCOST)
+ return 3;
+ if (tos & IPTOS_RELIABILITY)
+ return 2;
+ if (tos & IPTOS_THROUGHPUT)
+ return 1;
+ if (tos & IPTOS_LOWDELAY)
+ return 0;
+ break;
+ case QETH_PRIO_Q_ING_SKB:
+ if (skb->priority > 5)
+ return 0;
+ return ~skb->priority >> 1 & 3;
+ case QETH_PRIO_Q_ING_VLAN:
+ tci = &((struct ethhdr *)skb->data)->h_proto;
+ if (*tci == ETH_P_8021Q)
+ return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3;
+ break;
default:
- return card->qdio.default_out_queue;
+ break;
}
+ return card->qdio.default_out_queue;
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
@@ -5703,6 +5718,7 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
struct qeth_card *card = netdev->ml_priv;
enum qeth_link_types link_type;
struct carrier_info carrier_info;
+ u32 speed;
if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
link_type = QETH_LINK_TYPE_10GBIT_ETH;
@@ -5717,28 +5733,29 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
case QETH_LINK_TYPE_FAST_ETH:
case QETH_LINK_TYPE_LANE_ETH100:
qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP);
- ecmd->speed = SPEED_100;
+ speed = SPEED_100;
ecmd->port = PORT_TP;
break;
case QETH_LINK_TYPE_GBIT_ETH:
case QETH_LINK_TYPE_LANE_ETH1000:
qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
- ecmd->speed = SPEED_1000;
+ speed = SPEED_1000;
ecmd->port = PORT_FIBRE;
break;
case QETH_LINK_TYPE_10GBIT_ETH:
qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
- ecmd->speed = SPEED_10000;
+ speed = SPEED_10000;
ecmd->port = PORT_FIBRE;
break;
default:
qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP);
- ecmd->speed = SPEED_10;
+ speed = SPEED_10;
ecmd->port = PORT_TP;
}
+ ethtool_cmd_speed_set(ecmd, speed);
/* Check if we can obtain more accurate information. */
/* If QUERY_CARD_INFO command is not supported or fails, */
@@ -5783,18 +5800,19 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
switch (carrier_info.port_speed) {
case CARD_INFO_PORTS_10M:
- ecmd->speed = SPEED_10;
+ speed = SPEED_10;
break;
case CARD_INFO_PORTS_100M:
- ecmd->speed = SPEED_100;
+ speed = SPEED_100;
break;
case CARD_INFO_PORTS_1G:
- ecmd->speed = SPEED_1000;
+ speed = SPEED_1000;
break;
case CARD_INFO_PORTS_10G:
- ecmd->speed = SPEED_10000;
+ speed = SPEED_10000;
break;
}
+ ethtool_cmd_speed_set(ecmd, speed);
return 0;
}
@@ -5816,7 +5834,7 @@ static int __init qeth_core_init(void)
if (rc)
goto out_err;
qeth_core_root_dev = root_device_register("qeth");
- rc = PTR_RET(qeth_core_root_dev);
+ rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
if (rc)
goto register_err;
qeth_core_header_cache = kmem_cache_create("qeth_hdr",
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 425c0ecf1f3b..8a25a2be9890 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -217,6 +217,10 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
return sprintf(buf, "%s\n", "by precedence");
case QETH_PRIO_Q_ING_TOS:
return sprintf(buf, "%s\n", "by type of service");
+ case QETH_PRIO_Q_ING_SKB:
+ return sprintf(buf, "%s\n", "by skb-priority");
+ case QETH_PRIO_Q_ING_VLAN:
+ return sprintf(buf, "%s\n", "by VLAN headers");
default:
return sprintf(buf, "always queue %i\n",
card->qdio.default_out_queue);
@@ -250,11 +254,23 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
}
tmp = strsep((char **) &buf, "\n");
- if (!strcmp(tmp, "prio_queueing_prec"))
+ if (!strcmp(tmp, "prio_queueing_prec")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
- else if (!strcmp(tmp, "prio_queueing_tos"))
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ } else if (!strcmp(tmp, "prio_queueing_skb")) {
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ } else if (!strcmp(tmp, "prio_queueing_tos")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
- else if (!strcmp(tmp, "no_prio_queueing:0")) {
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ } else if (!strcmp(tmp, "prio_queueing_vlan")) {
+ if (!card->options.layer2) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ } else if (!strcmp(tmp, "no_prio_queueing:0")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 0;
} else if (!strcmp(tmp, "no_prio_queueing:1")) {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8dea3f12ccc1..5ef5b4f45758 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -725,15 +725,20 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int elements = 0;
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = skb;
- int ipv = qeth_get_ip_version(skb);
int cast_type = qeth_l2_get_cast_type(card, skb);
- struct qeth_qdio_out_q *queue = card->qdio.out_qs
- [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+ struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int data_offset = -1;
int elements_needed = 0;
int hd_len = 0;
+ if (card->qdio.do_prio_queueing || (cast_type &&
+ card->info.is_multicast_different))
+ queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
+ qeth_get_ip_version(skb), cast_type)];
+ else
+ queue = card->qdio.out_qs[card->qdio.default_out_queue];
+
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
card->stats.tx_carrier_errors++;
goto tx_drop;
@@ -964,10 +969,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
- if (card->info.type != QETH_CARD_TYPE_OSN)
- SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
- else
- SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
+ card->dev->ethtool_ops =
+ (card->info.type != QETH_CARD_TYPE_OSN) ?
+ &qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3524d34ff694..14e0b5810e8c 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -63,7 +63,7 @@ void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
{
int count = 0, rc = 0;
- int in[4];
+ unsigned int in[4];
char c;
rc = sscanf(buf, "%u.%u.%u.%u%c",
@@ -1659,7 +1659,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
struct net_device *netdev;
- netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
+ netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
vid);
if (netdev == NULL ||
!(netdev->flags & IFF_UP))
@@ -1721,7 +1721,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
struct net_device *netdev;
- netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
+ netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
vid);
if (netdev == NULL ||
!(netdev->flags & IFF_UP))
@@ -1766,7 +1766,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "frvaddr4");
- netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
+ netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
if (!netdev)
return;
in_dev = in_dev_get(netdev);
@@ -1796,7 +1796,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "frvaddr6");
- netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
+ netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
if (!netdev)
return;
in6_dev = in6_dev_get(netdev);
@@ -2089,7 +2089,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
struct net_device *netdev;
rcu_read_lock();
- netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
+ netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
vid);
rcu_read_unlock();
if (netdev == dev) {
@@ -2926,8 +2926,11 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *new_skb = NULL;
int ipv = qeth_get_ip_version(skb);
int cast_type = qeth_l3_get_cast_type(card, skb);
- struct qeth_qdio_out_q *queue = card->qdio.out_qs
- [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+ struct qeth_qdio_out_q *queue =
+ card->qdio.out_qs[card->qdio.do_prio_queueing
+ || (cast_type && card->info.is_multicast_different) ?
+ qeth_get_priority_queue(card, skb, ipv, cast_type) :
+ card->qdio.default_out_queue];
int tx_bytes = skb->len;
bool large_send;
int data_offset = -1;
@@ -3298,7 +3301,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->ml_priv = card;
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu;
- SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
+ card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 02832d64d918..baca5897039f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1773,6 +1773,7 @@ config SCSI_BFA_FC
config SCSI_VIRTIO
tristate "virtio-scsi support"
depends on VIRTIO
+ select BLK_DEV_INTEGRITY
help
This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M.
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index bcd223868227..93d13fc9a293 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -27,8 +27,6 @@
*/
/*
- * $Log: NCR5380.c,v $
-
* Revision 1.10 1998/9/2 Alan Cox
* (alan@lxorguk.ukuu.org.uk)
* Fixed up the timer lockups reported so far. Things still suck. Looking
@@ -89,13 +87,6 @@
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_transport_spi.h>
-#ifndef NDEBUG
-#define NDEBUG 0
-#endif
-#ifndef NDEBUG_ABORT
-#define NDEBUG_ABORT 0
-#endif
-
#if (NDEBUG & NDEBUG_LISTS)
#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
@@ -1005,7 +996,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)
LIST(cmd, tmp);
tmp->host_scribble = (unsigned char *) cmd;
}
- dprintk(NDEBUG_QUEUES, ("scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"));
+ dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
/* Run the coroutine if it isn't already running. */
/* Kick off command processing */
@@ -1040,7 +1031,7 @@ static void NCR5380_main(struct work_struct *work)
/* Lock held here */
done = 1;
if (!hostdata->connected && !hostdata->selecting) {
- dprintk(NDEBUG_MAIN, ("scsi%d : not connected\n", instance->host_no));
+ dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no);
/*
* Search through the issue_queue for a command destined
* for a target that's not busy.
@@ -1048,7 +1039,7 @@ static void NCR5380_main(struct work_struct *work)
for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
{
if (prev != tmp)
- dprintk(NDEBUG_LISTS, ("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun));
+ dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
/* When we find one, remove it from the issue queue. */
if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) {
if (prev) {
@@ -1066,7 +1057,7 @@ static void NCR5380_main(struct work_struct *work)
* On failure, we must add the command back to the
* issue queue so we can keep trying.
*/
- dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->target, tmp->lun));
+ dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun);
/*
* A successful selection is defined as one that
@@ -1095,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work)
tmp->host_scribble = (unsigned char *) hostdata->issue_queue;
hostdata->issue_queue = tmp;
done = 0;
- dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no));
+ dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no);
}
/* lock held here still */
} /* if target/lun is not busy */
@@ -1125,9 +1116,9 @@ static void NCR5380_main(struct work_struct *work)
#endif
&& (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies))
) {
- dprintk(NDEBUG_MAIN, ("scsi%d : main() : performing information transfer\n", instance->host_no));
+ dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no);
NCR5380_information_transfer(instance);
- dprintk(NDEBUG_MAIN, ("scsi%d : main() : done set false\n", instance->host_no));
+ dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no);
done = 0;
} else
break;
@@ -1159,8 +1150,8 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
unsigned char basr;
unsigned long flags;
- dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n",
- instance->irq));
+ dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n",
+ instance->irq);
do {
done = 1;
@@ -1173,14 +1164,14 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
NCR5380_dprint(NDEBUG_INTR, instance);
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
done = 0;
- dprintk(NDEBUG_INTR, ("scsi%d : SEL interrupt\n", instance->host_no));
+ dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no);
NCR5380_reselect(instance);
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else if (basr & BASR_PARITY_ERROR) {
- dprintk(NDEBUG_INTR, ("scsi%d : PARITY interrupt\n", instance->host_no));
+ dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no);
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
- dprintk(NDEBUG_INTR, ("scsi%d : RESET interrupt\n", instance->host_no));
+ dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no);
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else {
#if defined(REAL_DMA)
@@ -1210,7 +1201,7 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
}
#else
- dprintk(NDEBUG_INTR, ("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)));
+ dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
#endif
}
@@ -1304,7 +1295,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
hostdata->restart_select = 0;
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
- dprintk(NDEBUG_ARBITRATION, ("scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id));
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id);
/*
* Set the phase bits to 0, otherwise the NCR5380 won't drive the
@@ -1333,7 +1324,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
goto failed;
}
- dprintk(NDEBUG_ARBITRATION, ("scsi%d : arbitration complete\n", instance->host_no));
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no);
/*
* The arbitration delay is 2.2us, but this is a minimum and there is
@@ -1347,7 +1338,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
/* Check for lost arbitration */
if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
NCR5380_write(MODE_REG, MR_BASE);
- dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no));
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no);
goto failed;
}
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL);
@@ -1360,7 +1351,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no));
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no);
goto failed;
}
/*
@@ -1370,7 +1361,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
udelay(2);
- dprintk(NDEBUG_ARBITRATION, ("scsi%d : won arbitration\n", instance->host_no));
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no);
/*
* Now that we have won arbitration, start Selection process, asserting
@@ -1422,7 +1413,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
udelay(1);
- dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd)));
+ dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd));
/*
* The SCSI specification calls for a 250 ms timeout for the actual
@@ -1487,7 +1478,7 @@ part2:
collect_stats(hostdata, cmd);
cmd->scsi_done(cmd);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- dprintk(NDEBUG_SELECTION, ("scsi%d : target did not respond within 250ms\n", instance->host_no));
+ dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return 0;
}
@@ -1520,7 +1511,7 @@ part2:
goto failed;
}
- dprintk(NDEBUG_SELECTION, ("scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id));
+ dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id);
tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun);
len = 1;
@@ -1530,7 +1521,7 @@ part2:
data = tmp;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &data);
- dprintk(NDEBUG_SELECTION, ("scsi%d : nexus established.\n", instance->host_no));
+ dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no);
/* XXX need to handle errors here */
hostdata->connected = cmd;
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
@@ -1583,9 +1574,9 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
NCR5380_setup(instance);
if (!(p & SR_IO))
- dprintk(NDEBUG_PIO, ("scsi%d : pio write %d bytes\n", instance->host_no, c));
+ dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c);
else
- dprintk(NDEBUG_PIO, ("scsi%d : pio read %d bytes\n", instance->host_no, c));
+ dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c);
/*
* The NCR5380 chip will only drive the SCSI bus when the
@@ -1620,11 +1611,11 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
break;
}
- dprintk(NDEBUG_HANDSHAKE, ("scsi%d : REQ detected\n", instance->host_no));
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no);
/* Check for phase mismatch */
if ((tmp & PHASE_MASK) != p) {
- dprintk(NDEBUG_HANDSHAKE, ("scsi%d : phase mismatch\n", instance->host_no));
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no);
NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance);
break;
}
@@ -1660,7 +1651,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
/* FIXME - if this fails bus reset ?? */
NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ);
- dprintk(NDEBUG_HANDSHAKE, ("scsi%d : req false, handshake complete\n", instance->host_no));
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no);
/*
* We have several special cases to consider during REQ/ACK handshaking :
@@ -1681,7 +1672,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
}
} while (--c);
- dprintk(NDEBUG_PIO, ("scsi%d : residual %d\n", instance->host_no, c));
+ dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c);
*count = c;
*data = d;
@@ -1828,7 +1819,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
c -= 2;
}
#endif
- dprintk(NDEBUG_DMA, ("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d));
+ dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);
hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c);
#endif
@@ -1857,7 +1848,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
#endif /* def REAL_DMA */
- dprintk(NDEBUG_DMA, ("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)));
+ dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
/*
* On the PAS16 at least I/O recovery delays are not needed here.
@@ -1934,7 +1925,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
}
}
- dprintk(NDEBUG_DMA, ("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG)));
+ dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG));
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
@@ -1948,7 +1939,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
#ifdef READ_OVERRUNS
if (*phase == p && (p & SR_IO) && residue == 0) {
if (overrun) {
- dprintk(NDEBUG_DMA, ("Got an input overrun, using saved byte\n"));
+ dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
**data = saved_data;
*data += 1;
*count -= 1;
@@ -1957,13 +1948,13 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
printk("No overrun??\n");
cnt = toPIO = 2;
}
- dprintk(NDEBUG_DMA, ("Doing %d-byte PIO to 0x%X\n", cnt, *data));
+ dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data);
NCR5380_transfer_pio(instance, phase, &cnt, data);
*count -= toPIO - cnt;
}
#endif
- dprintk(NDEBUG_DMA, ("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)));
+ dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count));
return 0;
#elif defined(REAL_DMA)
@@ -2013,7 +2004,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
foo = NCR5380_pwrite(instance, d, c);
#else
int timeout;
- dprintk(NDEBUG_C400_PWRITE, ("About to pwrite %d bytes\n", c));
+ dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c);
if (!(foo = NCR5380_pwrite(instance, d, c))) {
/*
* Wait for the last byte to be sent. If REQ is being asserted for
@@ -2024,19 +2015,19 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH));
if (!timeout)
- dprintk(NDEBUG_LAST_BYTE_SENT, ("scsi%d : timed out on last byte\n", instance->host_no));
+ dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no);
if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
- dprintk(NDEBUG_LAST_WRITE_SENT, ("scsi%d : last bit sent works\n", instance->host_no));
+ dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no);
}
}
} else {
- dprintk(NDEBUG_C400_PWRITE, ("Waiting for LASTBYTE\n"));
+ dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n");
while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
- dprintk(NDEBUG_C400_PWRITE, ("Got LASTBYTE\n"));
+ dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n");
}
}
#endif
@@ -2045,9 +2036,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) {
- dprintk(NDEBUG_C400_PWRITE, ("53C400w: Checking for IRQ\n"));
+ dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n");
if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
- dprintk(NDEBUG_C400_PWRITE, ("53C400w: got it, reading reset interrupt reg\n"));
+ dprintk(NDEBUG_C400_PWRITE, "53C400w: got it, reading reset interrupt reg\n");
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else {
printk("53C400w: IRQ NOT THERE!\n");
@@ -2139,7 +2130,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
+ dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual);
}
/*
* The preferred transfer method is going to be
@@ -2219,7 +2210,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
case LINKED_FLG_CMD_COMPLETE:
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun));
+ dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun);
/*
* Sanity check : A linked command should only terminate with
* one of these messages if there are more linked commands
@@ -2235,7 +2226,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
/* The next command is still part of this process */
cmd->next_link->tag = cmd->tag;
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun));
+ dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);
collect_stats(hostdata, cmd);
cmd->scsi_done(cmd);
cmd = hostdata->connected;
@@ -2247,7 +2238,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
sink = 1;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
hostdata->connected = NULL;
- dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun));
+ dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun);
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
/*
@@ -2281,13 +2272,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
- dprintk(NDEBUG_AUTOSENSE, ("scsi%d : performing request sense\n", instance->host_no));
+ dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no);
LIST(cmd, hostdata->issue_queue);
cmd->host_scribble = (unsigned char *)
hostdata->issue_queue;
hostdata->issue_queue = (Scsi_Cmnd *) cmd;
- dprintk(NDEBUG_QUEUES, ("scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no));
+ dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no);
} else
#endif /* def AUTOSENSE */
{
@@ -2327,7 +2318,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
hostdata->disconnected_queue;
hostdata->connected = NULL;
hostdata->disconnected_queue = cmd;
- dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun));
+ dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun);
/*
* Restore phase bits to 0 so an interrupted selection,
* arbitration can resume.
@@ -2373,14 +2364,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
extended_msg[0] = EXTENDED_MESSAGE;
/* Accept first byte by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- dprintk(NDEBUG_EXTENDED, ("scsi%d : receiving extended message\n", instance->host_no));
+ dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no);
len = 2;
data = extended_msg + 1;
phase = PHASE_MSGIN;
NCR5380_transfer_pio(instance, &phase, &len, &data);
- dprintk(NDEBUG_EXTENDED, ("scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]));
+ dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]);
if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) {
/* Accept third byte by clearing ACK */
@@ -2390,7 +2381,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
phase = PHASE_MSGIN;
NCR5380_transfer_pio(instance, &phase, &len, &data);
- dprintk(NDEBUG_EXTENDED, ("scsi%d : message received, residual %d\n", instance->host_no, len));
+ dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len);
switch (extended_msg[2]) {
case EXTENDED_SDTR:
@@ -2456,7 +2447,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
NCR5380_transfer_pio(instance, &phase, &len, &data);
if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) {
NCR5380_set_timer(hostdata, USLEEP_SLEEP);
- dprintk(NDEBUG_USLEEP, ("scsi%d : issued command, sleeping until %ul\n", instance->host_no, hostdata->time_expires));
+ dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
return;
}
break;
@@ -2468,7 +2459,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
break;
default:
printk("scsi%d : unknown phase\n", instance->host_no);
- NCR5380_dprint(NDEBUG_ALL, instance);
+ NCR5380_dprint(NDEBUG_ANY, instance);
} /* switch(phase) */
} /* if (tmp * SR_REQ) */
else {
@@ -2476,7 +2467,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
*/
if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) {
NCR5380_set_timer(hostdata, USLEEP_SLEEP);
- dprintk(NDEBUG_USLEEP, ("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no, hostdata->time_expires));
+ dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
return;
}
}
@@ -2517,7 +2508,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
hostdata->restart_select = 1;
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
- dprintk(NDEBUG_SELECTION, ("scsi%d : reselect\n", instance->host_no));
+ dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no);
/*
* At this point, we have detected that our SCSI ID is on the bus,
@@ -2597,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
do_abort(instance);
} else {
hostdata->connected = tmp;
- dprintk(NDEBUG_RESELECTION, ("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->target, tmp->lun, tmp->tag));
+ dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag);
}
}
@@ -2682,8 +2673,8 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
NCR5380_setup(instance);
- dprintk(NDEBUG_ABORT, ("scsi%d : abort called\n", instance->host_no));
- dprintk(NDEBUG_ABORT, (" basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)));
+ dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no);
+ dprintk(NDEBUG_ABORT, " basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));
#if 0
/*
@@ -2693,7 +2684,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
*/
if (hostdata->connected == cmd) {
- dprintk(NDEBUG_ABORT, ("scsi%d : aborting connected command\n", instance->host_no));
+ dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no);
hostdata->aborted = 1;
/*
* We should perform BSY checking, and make sure we haven't slipped
@@ -2721,14 +2712,14 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
* from the issue queue.
*/
- dprintk(NDEBUG_ABORT, ("scsi%d : abort going into loop.\n", instance->host_no));
+ dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no);
for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble)
if (cmd == tmp) {
REMOVE(5, *prev, tmp, tmp->host_scribble);
(*prev) = (Scsi_Cmnd *) tmp->host_scribble;
tmp->host_scribble = NULL;
tmp->result = DID_ABORT << 16;
- dprintk(NDEBUG_ABORT, ("scsi%d : abort removed command from issue queue.\n", instance->host_no));
+ dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no);
tmp->scsi_done(tmp);
return SUCCESS;
}
@@ -2750,7 +2741,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
*/
if (hostdata->connected) {
- dprintk(NDEBUG_ABORT, ("scsi%d : abort failed, command connected.\n", instance->host_no));
+ dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no);
return FAILED;
}
/*
@@ -2780,11 +2771,11 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
if (cmd == tmp) {
- dprintk(NDEBUG_ABORT, ("scsi%d : aborting disconnected command.\n", instance->host_no));
+ dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no);
if (NCR5380_select(instance, cmd, (int) cmd->tag))
return FAILED;
- dprintk(NDEBUG_ABORT, ("scsi%d : nexus reestablished.\n", instance->host_no));
+ dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no);
do_abort(instance);
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 14964d0a0e9d..c79ddfa6f53c 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -21,10 +21,6 @@
* 1+ (800) 334-5454
*/
-/*
- * $Log: NCR5380.h,v $
- */
-
#ifndef NCR5380_H
#define NCR5380_H
@@ -60,6 +56,9 @@
#define NDEBUG_C400_PREAD 0x100000
#define NDEBUG_C400_PWRITE 0x200000
#define NDEBUG_LISTS 0x400000
+#define NDEBUG_ABORT 0x800000
+#define NDEBUG_TAGS 0x1000000
+#define NDEBUG_MERGING 0x2000000
#define NDEBUG_ANY 0xFFFFFFFFUL
@@ -292,9 +291,24 @@ struct NCR5380_hostdata {
#ifdef __KERNEL__
-#define dprintk(a,b) do {} while(0)
-#define NCR5380_dprint(a,b) do {} while(0)
-#define NCR5380_dprint_phase(a,b) do {} while(0)
+#ifndef NDEBUG
+#define NDEBUG (0)
+#endif
+
+#define dprintk(flg, fmt, ...) \
+ do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0)
+
+#if NDEBUG
+#define NCR5380_dprint(flg, arg) \
+ do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0)
+#define NCR5380_dprint_phase(flg, arg) \
+ do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0)
+static void NCR5380_print_phase(struct Scsi_Host *instance);
+static void NCR5380_print(struct Scsi_Host *instance);
+#else
+#define NCR5380_dprint(flg, arg) do {} while (0)
+#define NCR5380_dprint_phase(flg, arg) do {} while (0)
+#endif
#if defined(AUTOPROBE_IRQ)
static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
@@ -307,10 +321,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
#endif
static void NCR5380_main(struct work_struct *work);
static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance);
-#ifdef NDEBUG
-static void NCR5380_print_phase(struct Scsi_Host *instance);
-static void NCR5380_print(struct Scsi_Host *instance);
-#endif
static int NCR5380_abort(Scsi_Cmnd * cmd);
static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 14b5f8d0e7f4..cc9bd26f5d1a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
for (bit = 0; bit < 8; bit++) {
if ((pci_status[i] & (0x1 << bit)) != 0) {
- static const char *s;
+ const char *s;
s = pci_status_strings[bit];
if (i == 7/*TARG*/ && bit == 3)
@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
for (bit = 0; bit < 8; bit++) {
- if ((split_status[i] & (0x1 << bit)) != 0) {
- static const char *s;
-
- s = split_status_strings[bit];
- printk(s, ahd_name(ahd),
+ if ((split_status[i] & (0x1 << bit)) != 0)
+ printk(split_status_strings[bit], ahd_name(ahd),
split_status_source[i]);
- }
if (i > 1)
continue;
- if ((sg_split_status[i] & (0x1 << bit)) != 0) {
- static const char *s;
-
- s = split_status_strings[bit];
- printk(s, ahd_name(ahd), "SG");
- }
+ if ((sg_split_status[i] & (0x1 << bit)) != 0)
+ printk(split_status_strings[bit], ahd_name(ahd), "SG");
}
}
/*
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 059ff477a398..2e797a367608 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -62,13 +62,6 @@
*/
#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
/*
- * SCSI-II Linked command support.
- *
- * The higher level code doesn't support linked commands yet, and so the option
- * is undef'd here.
- */
-#undef CONFIG_SCSI_ACORNSCSI_LINK
-/*
* SCSI-II Synchronous transfer support.
*
* Tried and tested...
@@ -160,10 +153,6 @@
#error "Yippee! ABORT TAG is now defined! Remove this error!"
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_LINK
-#error SCSI2 LINKed commands not supported (yet)!
-#endif
-
#ifdef USE_DMAC
/*
* DMAC setup parameters
@@ -1668,42 +1657,6 @@ void acornscsi_message(AS_Host *host)
}
break;
-#ifdef CONFIG_SCSI_ACORNSCSI_LINK
- case LINKED_CMD_COMPLETE:
- case LINKED_FLG_CMD_COMPLETE:
- /*
- * We don't support linked commands yet
- */
- if (0) {
-#if (DEBUG & DEBUG_LINK)
- printk("scsi%d.%c: lun %d tag %d linked command complete\n",
- host->host->host_no, acornscsi_target(host), host->SCpnt->tag);
-#endif
- /*
- * A linked command should only terminate with one of these messages
- * if there are more linked commands available.
- */
- if (!host->SCpnt->next_link) {
- printk(KERN_WARNING "scsi%d.%c: lun %d tag %d linked command complete, but no next_link\n",
- instance->host_no, acornscsi_target(host), host->SCpnt->tag);
- acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
- msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
- } else {
- struct scsi_cmnd *SCpnt = host->SCpnt;
-
- acornscsi_dma_cleanup(host);
-
- host->SCpnt = host->SCpnt->next_link;
- host->SCpnt->tag = SCpnt->tag;
- SCpnt->result = DID_OK | host->scsi.SCp.Message << 8 | host->Scsi.SCp.Status;
- SCpnt->done(SCpnt);
-
- /* initialise host->SCpnt->SCp */
- }
- break;
- }
-#endif
-
default: /* reject message */
printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n",
host->host->host_no, acornscsi_target(host),
@@ -2825,9 +2778,6 @@ char *acornscsi_info(struct Scsi_Host *host)
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
" TAG"
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_LINK
- " LINK"
-#endif
#if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")"
#endif
@@ -2851,9 +2801,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
" TAG"
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_LINK
- " LINK"
-#endif
#if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")"
#endif
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index f8e060900052..8ef810a4476e 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -36,9 +36,6 @@
void __iomem *base; \
void __iomem *dma
-#define BOARD_NORMAL 0
-#define BOARD_NCR53C400 1
-
#include "../NCR5380.h"
void cumanascsi_setup(char *str, int *ints)
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 4266eef8aca1..188e734c7ff0 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -37,9 +37,6 @@
#define NCR5380_implementation_fields \
void __iomem *base
-#define BOARD_NORMAL 0
-#define BOARD_NCR53C400 1
-
#include "../NCR5380.h"
#undef START_DMA_INITIATOR_RECEIVE_REG
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 0f3cdbc80ba6..1814aa20b724 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -370,7 +370,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
return 0;
if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
TagAlloc[cmd->device->id][cmd->device->lun].queue_size) {
- TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n",
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
H_NO(cmd), cmd->device->id, cmd->device->lun);
return 1;
}
@@ -394,7 +394,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
!setup_use_tagged_queuing || !cmd->device->tagged_supported) {
cmd->tag = TAG_NONE;
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
- TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
"command\n", H_NO(cmd), cmd->device->id, cmd->device->lun);
} else {
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
@@ -402,7 +402,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
set_bit(cmd->tag, ta->allocated);
ta->nr_allocated++;
- TAG_PRINTK("scsi%d: using tag %d for target %d lun %d "
+ dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
"(now %d tags in use)\n",
H_NO(cmd), cmd->tag, cmd->device->id,
cmd->device->lun, ta->nr_allocated);
@@ -420,7 +420,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
if (cmd->tag == TAG_NONE) {
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
- TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n",
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
H_NO(cmd), cmd->device->id, cmd->device->lun);
} else if (cmd->tag >= MAX_TAGS) {
printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
@@ -429,7 +429,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
clear_bit(cmd->tag, ta->allocated);
ta->nr_allocated--;
- TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n",
+ dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun);
}
}
@@ -478,7 +478,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
cmd->SCp.buffers_residual &&
virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) {
- MER_PRINTK("VTOP(%p) == %08lx -> merging\n",
+ dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n",
page_address(sg_page(&cmd->SCp.buffer[1])), endaddr);
#if (NDEBUG & NDEBUG_MERGING)
++cnt;
@@ -490,7 +490,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
}
#if (NDEBUG & NDEBUG_MERGING)
if (oldlen != cmd->SCp.this_residual)
- MER_PRINTK("merged %d buffers from %p, new length %08x\n",
+ dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",
cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
#endif
}
@@ -626,16 +626,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
}
}
-#else /* !NDEBUG */
-
-/* dummies... */
-static inline void NCR5380_print(struct Scsi_Host *instance)
-{
-};
-static inline void NCR5380_print_phase(struct Scsi_Host *instance)
-{
-};
-
#endif
/*
@@ -676,7 +666,7 @@ static inline void NCR5380_all_init(void)
{
static int done = 0;
if (!done) {
- INI_PRINTK("scsi : NCR5380_all_init()\n");
+ dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
done = 1;
}
}
@@ -739,8 +729,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
Scsi_Cmnd *ptr;
unsigned long flags;
- NCR_PRINT(NDEBUG_ANY);
- NCR_PRINT_PHASE(NDEBUG_ANY);
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ NCR5380_dprint_phase(NDEBUG_ANY, instance);
hostdata = (struct NCR5380_hostdata *)instance->hostdata;
@@ -984,7 +974,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
}
local_irq_restore(flags);
- QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd),
+ dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
(cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
/* If queue_command() is called from an interrupt (real one or bottom
@@ -1054,7 +1044,7 @@ static void NCR5380_main(struct work_struct *work)
done = 1;
if (!hostdata->connected) {
- MAIN_PRINTK("scsi%d: not connected\n", HOSTNO);
+ dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO);
/*
* Search through the issue_queue for a command destined
* for a target that's not busy.
@@ -1107,7 +1097,7 @@ static void NCR5380_main(struct work_struct *work)
* On failure, we must add the command back to the
* issue queue so we can keep trying.
*/
- MAIN_PRINTK("scsi%d: main(): command for target %d "
+ dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
"lun %d removed from issue_queue\n",
HOSTNO, tmp->device->id, tmp->device->lun);
/*
@@ -1140,7 +1130,7 @@ static void NCR5380_main(struct work_struct *work)
#endif
falcon_dont_release--;
local_irq_restore(flags);
- MAIN_PRINTK("scsi%d: main(): select() failed, "
+ dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
"returned to issue_queue\n", HOSTNO);
if (hostdata->connected)
break;
@@ -1155,10 +1145,10 @@ static void NCR5380_main(struct work_struct *work)
#endif
) {
local_irq_restore(flags);
- MAIN_PRINTK("scsi%d: main: performing information transfer\n",
+ dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
HOSTNO);
NCR5380_information_transfer(instance);
- MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO);
+ dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
done = 0;
}
} while (!done);
@@ -1204,12 +1194,12 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
(BASR_PHASE_MATCH|BASR_ACK)) {
saved_data = NCR5380_read(INPUT_DATA_REG);
overrun = 1;
- DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO);
+ dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO);
}
}
}
- DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
+ dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
NCR5380_read(STATUS_REG));
@@ -1229,13 +1219,13 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
cnt = toPIO = atari_read_overruns;
if (overrun) {
- DMA_PRINTK("Got an input overrun, using saved byte\n");
+ dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
*(*data)++ = saved_data;
(*count)--;
cnt--;
toPIO--;
}
- DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
+ dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
NCR5380_transfer_pio(instance, &p, &cnt, data);
*count -= toPIO - cnt;
}
@@ -1261,25 +1251,25 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
int done = 1, handled = 0;
unsigned char basr;
- INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
/* Look for pending interrupts */
basr = NCR5380_read(BUS_AND_STATUS_REG);
- INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr);
+ dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
/* dispatch to appropriate routine if found and done=0 */
if (basr & BASR_IRQ) {
- NCR_PRINT(NDEBUG_INTR);
+ NCR5380_dprint(NDEBUG_INTR, instance);
if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
done = 0;
ENABLE_IRQ();
- INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
NCR5380_reselect(instance);
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else if (basr & BASR_PARITY_ERROR) {
- INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
- INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else {
/*
@@ -1298,7 +1288,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
((basr & BASR_END_DMA_TRANSFER) ||
!(basr & BASR_PHASE_MATCH))) {
- INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
NCR5380_dma_complete( instance );
done = 0;
ENABLE_IRQ();
@@ -1323,7 +1313,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
}
if (!done) {
- INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
/* Put a call to NCR5380_main() on the queue... */
queue_main();
}
@@ -1396,8 +1386,8 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
unsigned long flags;
hostdata->restart_select = 0;
- NCR_PRINT(NDEBUG_ARBITRATION);
- ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO,
+ NCR5380_dprint(NDEBUG_ARBITRATION, instance);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
instance->this_id);
/*
@@ -1442,7 +1432,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
;
#endif
- ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
if (hostdata->connected) {
NCR5380_write(MODE_REG, MR_BASE);
@@ -1463,7 +1453,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
hostdata->connected) {
NCR5380_write(MODE_REG, MR_BASE);
- ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
HOSTNO);
return -1;
}
@@ -1478,7 +1468,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
hostdata->connected) {
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
HOSTNO);
return -1;
}
@@ -1501,7 +1491,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
return -1;
}
- ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
/*
* Now that we have won arbitration, start Selection process, asserting
@@ -1561,7 +1551,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
udelay(1);
- SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
+ dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
/*
* The SCSI specification calls for a 250 ms timeout for the actual
@@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
if (hostdata->restart_select)
printk(KERN_NOTICE "\trestart select\n");
- NCR_PRINT(NDEBUG_ANY);
+ NCR5380_dprint(NDEBUG_ANY, instance);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return -1;
}
@@ -1630,7 +1620,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
#endif
cmd->scsi_done(cmd);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO);
+ dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return 0;
}
@@ -1656,7 +1646,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
while (!(NCR5380_read(STATUS_REG) & SR_REQ))
;
- SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
+ dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
HOSTNO, cmd->device->id);
tmp[0] = IDENTIFY(1, cmd->device->lun);
@@ -1676,7 +1666,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
data = tmp;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &data);
- SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO);
+ dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
/* XXX need to handle errors here */
hostdata->connected = cmd;
#ifndef SUPPORT_TAGS
@@ -1737,12 +1727,12 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
;
- HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO);
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
/* Check for phase mismatch */
if ((tmp & PHASE_MASK) != p) {
- PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO);
- NCR_PRINT_PHASE(NDEBUG_PIO);
+ dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
+ NCR5380_dprint_phase(NDEBUG_PIO, instance);
break;
}
@@ -1764,25 +1754,25 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
if (!(p & SR_IO)) {
if (!((p & SR_MSG) && c > 1)) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
- NCR_PRINT(NDEBUG_PIO);
+ NCR5380_dprint(NDEBUG_PIO, instance);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
ICR_ASSERT_DATA | ICR_ASSERT_ACK);
} else {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
ICR_ASSERT_DATA | ICR_ASSERT_ATN);
- NCR_PRINT(NDEBUG_PIO);
+ NCR5380_dprint(NDEBUG_PIO, instance);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
}
} else {
- NCR_PRINT(NDEBUG_PIO);
+ NCR5380_dprint(NDEBUG_PIO, instance);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
}
while (NCR5380_read(STATUS_REG) & SR_REQ)
;
- HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO);
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
/*
* We have several special cases to consider during REQ/ACK handshaking :
@@ -1803,7 +1793,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
}
} while (--c);
- PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c);
+ dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
*count = c;
*data = d;
@@ -1917,7 +1907,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
if (atari_read_overruns && (p & SR_IO))
c -= atari_read_overruns;
- DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n",
+ dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
HOSTNO, (p & SR_IO) ? "reading" : "writing",
c, (p & SR_IO) ? "to" : "from", d);
@@ -1997,7 +1987,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
phase = (tmp & PHASE_MASK);
if (phase != old_phase) {
old_phase = phase;
- NCR_PRINT_PHASE(NDEBUG_INFORMATION);
+ NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
}
if (sink && (phase != PHASE_MSGOUT)) {
@@ -2039,7 +2029,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* they are at contiguous physical addresses.
*/
merge_contiguous_buffers(cmd);
- INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
+ dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
HOSTNO, cmd->SCp.this_residual,
cmd->SCp.buffers_residual);
}
@@ -2123,7 +2113,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- LNK_PRINTK("scsi%d: target %d lun %d linked command "
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
"complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
/* Enable reselect interrupts */
@@ -2148,7 +2138,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* and don't free it! */
cmd->next_link->tag = cmd->tag;
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- LNK_PRINTK("scsi%d: target %d lun %d linked request "
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
"done, calling scsi_done().\n",
HOSTNO, cmd->device->id, cmd->device->lun);
#ifdef NCR5380_STATS
@@ -2165,7 +2155,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* ++guenther: possible race with Falcon locking */
falcon_dont_release++;
hostdata->connected = NULL;
- QU_PRINTK("scsi%d: command for target %d, lun %d "
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
"completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
#ifdef SUPPORT_TAGS
cmd_free_tag(cmd);
@@ -2179,7 +2169,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* ++Andreas: the mid level code knows about
QUEUE_FULL now. */
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
- TAG_PRINTK("scsi%d: target %d lun %d returned "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
"QUEUE_FULL after %d commands\n",
HOSTNO, cmd->device->id, cmd->device->lun,
ta->nr_allocated);
@@ -2224,14 +2214,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
(status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
- ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO);
+ dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO);
local_irq_save(flags);
LIST(cmd,hostdata->issue_queue);
SET_NEXT(cmd, hostdata->issue_queue);
hostdata->issue_queue = (Scsi_Cmnd *) cmd;
local_irq_restore(flags);
- QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
+ dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
"issue queue\n", H_NO(cmd));
} else
#endif /* def AUTOSENSE */
@@ -2277,7 +2267,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->device->tagged_supported = 0;
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
cmd->tag = TAG_NONE;
- TAG_PRINTK("scsi%d: target %d lun %d rejected "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
"QUEUE_TAG message; tagged queuing "
"disabled\n",
HOSTNO, cmd->device->id, cmd->device->lun);
@@ -2294,7 +2284,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->disconnected_queue = cmd;
local_irq_restore(flags);
- QU_PRINTK("scsi%d: command for target %d lun %d was "
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
"moved from connected to the "
"disconnected_queue\n", HOSTNO,
cmd->device->id, cmd->device->lun);
@@ -2344,13 +2334,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* Accept first byte by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO);
+ dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
len = 2;
data = extended_msg + 1;
phase = PHASE_MSGIN;
NCR5380_transfer_pio(instance, &phase, &len, &data);
- EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO,
+ dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
(int)extended_msg[1], (int)extended_msg[2]);
if (!len && extended_msg[1] <=
@@ -2362,7 +2352,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
phase = PHASE_MSGIN;
NCR5380_transfer_pio(instance, &phase, &len, &data);
- EXT_PRINTK("scsi%d: message received, residual %d\n",
+ dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
HOSTNO, len);
switch (extended_msg[2]) {
@@ -2451,7 +2441,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
break;
default:
printk("scsi%d: unknown phase\n", HOSTNO);
- NCR_PRINT(NDEBUG_ANY);
+ NCR5380_dprint(NDEBUG_ANY, instance);
} /* switch(phase) */
} /* if (tmp * SR_REQ) */
} /* while (1) */
@@ -2493,7 +2483,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
- RSL_PRINTK("scsi%d: reselect\n", HOSTNO);
+ dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
/*
* At this point, we have detected that our SCSI ID is on the bus,
@@ -2544,7 +2534,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
msg[1] == SIMPLE_QUEUE_TAG)
tag = msg[2];
- TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at "
+ dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
"reselection\n", HOSTNO, target_mask, lun, tag);
}
#endif
@@ -2598,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
hostdata->connected = tmp;
- RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
+ dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
falcon_dont_release--;
}
@@ -2640,7 +2630,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n",
HOSTNO);
- ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
+ dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
NCR5380_read(BUS_AND_STATUS_REG),
NCR5380_read(STATUS_REG));
@@ -2653,7 +2643,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
if (hostdata->connected == cmd) {
- ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO);
+ dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
/*
* We should perform BSY checking, and make sure we haven't slipped
* into BUS FREE.
@@ -2683,11 +2673,11 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
local_irq_restore(flags);
cmd->scsi_done(cmd);
falcon_release_lock_if_possible(hostdata);
- return SCSI_ABORT_SUCCESS;
+ return SUCCESS;
} else {
/* local_irq_restore(flags); */
printk("scsi%d: abort of connected command failed!\n", HOSTNO);
- return SCSI_ABORT_ERROR;
+ return FAILED;
}
}
#endif
@@ -2705,13 +2695,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
SET_NEXT(tmp, NULL);
tmp->result = DID_ABORT << 16;
local_irq_restore(flags);
- ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
+ dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
HOSTNO);
/* Tagged queuing note: no tag to free here, hasn't been assigned
* yet... */
tmp->scsi_done(tmp);
falcon_release_lock_if_possible(hostdata);
- return SCSI_ABORT_SUCCESS;
+ return SUCCESS;
}
}
@@ -2728,8 +2718,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
if (hostdata->connected) {
local_irq_restore(flags);
- ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO);
- return SCSI_ABORT_SNOOZE;
+ dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
+ return FAILED;
}
/*
@@ -2761,12 +2751,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
tmp = NEXT(tmp)) {
if (cmd == tmp) {
local_irq_restore(flags);
- ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO);
+ dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
if (NCR5380_select(instance, cmd, (int)cmd->tag))
- return SCSI_ABORT_BUSY;
+ return FAILED;
- ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO);
+ dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
do_abort(instance);
@@ -2791,7 +2781,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
local_irq_restore(flags);
tmp->scsi_done(tmp);
falcon_release_lock_if_possible(hostdata);
- return SCSI_ABORT_SUCCESS;
+ return SUCCESS;
}
}
}
@@ -2816,7 +2806,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
*/
falcon_release_lock_if_possible(hostdata);
- return SCSI_ABORT_NOT_RUNNING;
+ return FAILED;
}
@@ -2825,7 +2815,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
*
* Purpose : reset the SCSI bus.
*
- * Returns : SCSI_RESET_WAKEUP
+ * Returns : SUCCESS or FAILURE
*
*/
@@ -2834,7 +2824,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
SETUP_HOSTDATA(cmd->device->host);
int i;
unsigned long flags;
-#if 1
+#if defined(RESET_RUN_DONE)
Scsi_Cmnd *connected, *disconnected_queue;
#endif
@@ -2859,7 +2849,14 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
* through anymore ... */
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */
+ /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
+ * should go.
+ * Catch-22: if we don't clear all queues, the SCSI driver lock will
+ * not be reset by atari_scsi_reset()!
+ */
+
+#if defined(RESET_RUN_DONE)
+ /* XXX Should now be done by midlevel code, but it's broken XXX */
/* XXX see below XXX */
/* MSch: old-style reset: actually abort all command processing here */
@@ -2890,7 +2887,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
*/
if ((cmd = connected)) {
- ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
cmd->scsi_done(cmd);
}
@@ -2902,7 +2899,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
cmd->scsi_done(cmd);
}
if (i > 0)
- ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i);
+ dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
/* The Falcon lock should be released after a reset...
*/
@@ -2915,7 +2912,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
* the midlevel code that the reset was SUCCESSFUL, and there is no
* need to 'wake up' the commands by a request_sense
*/
- return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
+ return SUCCESS;
#else /* 1 */
/* MSch: new-style reset handling: let the mid-level do what it can */
@@ -2942,11 +2939,11 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
*/
if (hostdata->issue_queue)
- ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
if (hostdata->connected)
- ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
if (hostdata->disconnected_queue)
- ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
local_irq_save(flags);
hostdata->issue_queue = NULL;
@@ -2963,6 +2960,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
local_irq_restore(flags);
/* we did no complete reset of all commands, so a wakeup is required */
- return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET;
+ return SUCCESS;
#endif /* 1 */
}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a8d721ff19eb..b522134528d6 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -67,12 +67,6 @@
#include <linux/module.h>
-#define NDEBUG (0)
-
-#define NDEBUG_ABORT 0x00100000
-#define NDEBUG_TAGS 0x00200000
-#define NDEBUG_MERGING 0x00400000
-
#define AUTOSENSE
/* For the Atari version, use only polled IO or REAL_DMA */
#define REAL_DMA
@@ -314,7 +308,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
dma_stat = tt_scsi_dma.dma_ctrl;
- INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n",
+ dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n",
atari_scsi_host->host_no, dma_stat & 0xff);
/* Look if it was the DMA that has interrupted: First possibility
@@ -340,7 +334,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
if ((dma_stat & 0x02) && !(dma_stat & 0x40)) {
atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr);
- DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n",
+ dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
atari_dma_residual);
if ((signed int)atari_dma_residual < 0)
@@ -371,7 +365,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
* other command. These shouldn't disconnect anyway.
*/
if (atari_dma_residual & 0x1ff) {
- DMA_PRINTK("SCSI DMA: DMA bug corrected, "
+ dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, "
"difference %ld bytes\n",
512 - (atari_dma_residual & 0x1ff));
atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff;
@@ -438,7 +432,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy)
"ST-DMA fifo\n", transferred & 15);
atari_dma_residual = HOSTDATA_DMALEN - transferred;
- DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n",
+ dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
atari_dma_residual);
} else
atari_dma_residual = 0;
@@ -474,11 +468,11 @@ static void atari_scsi_fetch_restbytes(void)
/* there are 'nr' bytes left for the last long address
before the DMA pointer */
phys_dst ^= nr;
- DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx",
+ dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx",
nr, phys_dst);
/* The content of the DMA pointer is a physical address! */
dst = phys_to_virt(phys_dst);
- DMA_PRINTK(" = virt addr %p\n", dst);
+ dprintk(NDEBUG_DMA, " = virt addr %p\n", dst);
for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr)
*dst++ = *src++;
}
@@ -827,7 +821,7 @@ static int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
} else {
atari_turnon_irq(IRQ_MFP_FSCSI);
}
- if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS)
+ if (rv == SUCCESS)
falcon_release_lock_if_possible(hostdata);
return rv;
@@ -883,7 +877,7 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
{
unsigned long addr = virt_to_phys(data);
- DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
+ dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
"dir = %d\n", instance->host_no, data, addr, count, dir);
if (!IS_A_TT() && !STRAM_ADDR(addr)) {
@@ -1063,7 +1057,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
possible_len = limit;
if (possible_len != wanted_len)
- DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes "
+ dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
"instead of %ld\n", possible_len, wanted_len);
return possible_len;
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h
index 11c624bb122d..3299d91d7336 100644
--- a/drivers/scsi/atari_scsi.h
+++ b/drivers/scsi/atari_scsi.h
@@ -54,125 +54,6 @@
#define NCR5380_dma_xfer_len(i,cmd,phase) \
atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
-/* former generic SCSI error handling stuff */
-
-#define SCSI_ABORT_SNOOZE 0
-#define SCSI_ABORT_SUCCESS 1
-#define SCSI_ABORT_PENDING 2
-#define SCSI_ABORT_BUSY 3
-#define SCSI_ABORT_NOT_RUNNING 4
-#define SCSI_ABORT_ERROR 5
-
-#define SCSI_RESET_SNOOZE 0
-#define SCSI_RESET_PUNT 1
-#define SCSI_RESET_SUCCESS 2
-#define SCSI_RESET_PENDING 3
-#define SCSI_RESET_WAKEUP 4
-#define SCSI_RESET_NOT_RUNNING 5
-#define SCSI_RESET_ERROR 6
-
-#define SCSI_RESET_SYNCHRONOUS 0x01
-#define SCSI_RESET_ASYNCHRONOUS 0x02
-#define SCSI_RESET_SUGGEST_BUS_RESET 0x04
-#define SCSI_RESET_SUGGEST_HOST_RESET 0x08
-
-#define SCSI_RESET_BUS_RESET 0x100
-#define SCSI_RESET_HOST_RESET 0x200
-#define SCSI_RESET_ACTION 0xff
-
-/* Debugging printk definitions:
- *
- * ARB -> arbitration
- * ASEN -> auto-sense
- * DMA -> DMA
- * HSH -> PIO handshake
- * INF -> information transfer
- * INI -> initialization
- * INT -> interrupt
- * LNK -> linked commands
- * MAIN -> NCR5380_main() control flow
- * NDAT -> no data-out phase
- * NWR -> no write commands
- * PIO -> PIO transfers
- * PDMA -> pseudo DMA (unused on Atari)
- * QU -> queues
- * RSL -> reselections
- * SEL -> selections
- * USL -> usleep cpde (unused on Atari)
- * LBS -> last byte sent (unused on Atari)
- * RSS -> restarting of selections
- * EXT -> extended messages
- * ABRT -> aborting and resetting
- * TAG -> queue tag handling
- * MER -> merging of consec. buffers
- *
- */
-
-#define dprint(flg, format...) \
-({ \
- if (NDEBUG & (flg)) \
- printk(KERN_DEBUG format); \
-})
-
-#define ARB_PRINTK(format, args...) \
- dprint(NDEBUG_ARBITRATION, format , ## args)
-#define ASEN_PRINTK(format, args...) \
- dprint(NDEBUG_AUTOSENSE, format , ## args)
-#define DMA_PRINTK(format, args...) \
- dprint(NDEBUG_DMA, format , ## args)
-#define HSH_PRINTK(format, args...) \
- dprint(NDEBUG_HANDSHAKE, format , ## args)
-#define INF_PRINTK(format, args...) \
- dprint(NDEBUG_INFORMATION, format , ## args)
-#define INI_PRINTK(format, args...) \
- dprint(NDEBUG_INIT, format , ## args)
-#define INT_PRINTK(format, args...) \
- dprint(NDEBUG_INTR, format , ## args)
-#define LNK_PRINTK(format, args...) \
- dprint(NDEBUG_LINKED, format , ## args)
-#define MAIN_PRINTK(format, args...) \
- dprint(NDEBUG_MAIN, format , ## args)
-#define NDAT_PRINTK(format, args...) \
- dprint(NDEBUG_NO_DATAOUT, format , ## args)
-#define NWR_PRINTK(format, args...) \
- dprint(NDEBUG_NO_WRITE, format , ## args)
-#define PIO_PRINTK(format, args...) \
- dprint(NDEBUG_PIO, format , ## args)
-#define PDMA_PRINTK(format, args...) \
- dprint(NDEBUG_PSEUDO_DMA, format , ## args)
-#define QU_PRINTK(format, args...) \
- dprint(NDEBUG_QUEUES, format , ## args)
-#define RSL_PRINTK(format, args...) \
- dprint(NDEBUG_RESELECTION, format , ## args)
-#define SEL_PRINTK(format, args...) \
- dprint(NDEBUG_SELECTION, format , ## args)
-#define USL_PRINTK(format, args...) \
- dprint(NDEBUG_USLEEP, format , ## args)
-#define LBS_PRINTK(format, args...) \
- dprint(NDEBUG_LAST_BYTE_SENT, format , ## args)
-#define RSS_PRINTK(format, args...) \
- dprint(NDEBUG_RESTART_SELECT, format , ## args)
-#define EXT_PRINTK(format, args...) \
- dprint(NDEBUG_EXTENDED, format , ## args)
-#define ABRT_PRINTK(format, args...) \
- dprint(NDEBUG_ABORT, format , ## args)
-#define TAG_PRINTK(format, args...) \
- dprint(NDEBUG_TAGS, format , ## args)
-#define MER_PRINTK(format, args...) \
- dprint(NDEBUG_MERGING, format , ## args)
-
-/* conditional macros for NCR5380_print_{,phase,status} */
-
-#define NCR_PRINT(mask) \
- ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0)
-
-#define NCR_PRINT_PHASE(mask) \
- ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0)
-
-#define NCR_PRINT_STATUS(mask) \
- ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0)
-
-
#endif /* ndef ASM */
#endif /* ATARI_SCSI_H */
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 1bfb0bd01198..860f527d8f26 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -83,9 +83,20 @@ static inline void queue_tail_inc(struct be_queue_info *q)
/*ISCSI */
+struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
+ bool enable;
+ u32 min_eqd; /* in usecs */
+ u32 max_eqd; /* in usecs */
+ u32 prev_eqd; /* in usecs */
+ u32 et_eqd; /* configured val when aic is off */
+ ulong jiffs;
+ u64 eq_prev; /* Used to calculate eqe */
+};
+
struct be_eq_obj {
bool todo_mcc_cq;
bool todo_cq;
+ u32 cq_count;
struct be_queue_info q;
struct beiscsi_hba *phba;
struct be_queue_info *cq;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 7cf7f99ee442..cc7405c0eca0 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -71,6 +71,7 @@ struct be_mcc_wrb {
#define BEISCSI_FW_MBX_TIMEOUT 100
/* MBOX Command VER */
+#define MBX_CMD_VER1 0x01
#define MBX_CMD_VER2 0x02
struct be_mcc_compl {
@@ -271,6 +272,12 @@ struct be_cmd_resp_eq_create {
u16 rsvd0; /* sword */
} __packed;
+struct be_set_eqd {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+} __packed;
+
struct mgmt_chap_format {
u32 flags;
u8 intr_chap_name[256];
@@ -622,7 +629,7 @@ struct be_cmd_req_modify_eq_delay {
u32 eq_id;
u32 phase;
u32 delay_multiplier;
- } delay[8];
+ } delay[MAX_CPUS];
} __packed;
/******************** Get MAC ADDR *******************/
@@ -708,6 +715,8 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
+int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
+ int num);
int beiscsi_mccq_compl(struct beiscsi_hba *phba,
uint32_t tag, struct be_mcc_wrb **wrb,
struct be_dma_mem *mbx_cmd_mem);
@@ -1005,6 +1014,26 @@ struct tcp_connect_and_offload_in {
u8 rsvd0[3];
} __packed;
+struct tcp_connect_and_offload_in_v1 {
+ struct be_cmd_req_hdr hdr;
+ struct ip_addr_format ip_address;
+ u16 tcp_port;
+ u16 cid;
+ u16 cq_id;
+ u16 defq_id;
+ struct phys_addr dataout_template_pa;
+ u16 hdr_ring_id;
+ u16 data_ring_id;
+ u8 do_offload;
+ u8 ifd_state;
+ u8 rsvd0[2];
+ u16 tcp_window_size;
+ u8 tcp_window_scale_count;
+ u8 rsvd1;
+ u32 tcp_mss:24;
+ u8 rsvd2;
+} __packed;
+
struct tcp_connect_and_offload_out {
struct be_cmd_resp_hdr hdr;
u32 connection_handle;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index a3df43324c98..fd284ff36ecf 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1106,7 +1106,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
struct beiscsi_hba *phba = beiscsi_ep->phba;
struct tcp_connect_and_offload_out *ptcpcnct_out;
struct be_dma_mem nonemb_cmd;
- unsigned int tag;
+ unsigned int tag, req_memsize;
int ret = -ENOMEM;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
@@ -1127,8 +1127,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
(beiscsi_ep->ep_cid)] = ep;
beiscsi_ep->cid_vld = 0;
+
+ if (is_chip_be2_be3r(phba))
+ req_memsize = sizeof(struct tcp_connect_and_offload_in);
+ else
+ req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
+
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
- sizeof(struct tcp_connect_and_offload_in),
+ req_memsize,
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
@@ -1139,7 +1145,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
beiscsi_free_ep(beiscsi_ep);
return -ENOMEM;
}
- nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
+ nonemb_cmd.size = req_memsize;
memset(nonemb_cmd.va, 0, nonemb_cmd.size);
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
if (tag <= 0) {
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 0d822297aa80..56467df3d6de 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -599,15 +599,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
pci_set_drvdata(pcidev, phba);
phba->interface_handle = 0xFFFFFFFF;
- if (iscsi_host_add(shost, &phba->pcidev->dev))
- goto free_devices;
-
return phba;
-
-free_devices:
- pci_dev_put(phba->pcidev);
- iscsi_host_free(phba->shost);
- return NULL;
}
static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
@@ -2279,6 +2271,7 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
ret = beiscsi_process_cq(pbe_eq);
+ pbe_eq->cq_count += ret;
if (ret < budget) {
phba = pbe_eq->phba;
blk_iopoll_complete(iop);
@@ -3692,7 +3685,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct hwi_async_pdu_context *pasync_ctx;
- int i, eq_num, ulp_num;
+ int i, eq_for_mcc, ulp_num;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -3729,16 +3722,17 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
if (q->created)
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
}
+
+ be_mcc_queues_destroy(phba);
if (phba->msix_enabled)
- eq_num = 1;
+ eq_for_mcc = 1;
else
- eq_num = 0;
- for (i = 0; i < (phba->num_cpus + eq_num); i++) {
+ eq_for_mcc = 0;
+ for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
q = &phwi_context->be_eq[i].q;
if (q->created)
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
}
- be_mcc_queues_destroy(phba);
be_cmd_fw_uninit(ctrl);
}
@@ -3833,9 +3827,9 @@ static int hwi_init_port(struct beiscsi_hba *phba)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- phwi_context->max_eqd = 0;
+ phwi_context->max_eqd = 128;
phwi_context->min_eqd = 0;
- phwi_context->cur_eqd = 64;
+ phwi_context->cur_eqd = 0;
be_cmd_fw_initialize(&phba->ctrl);
status = beiscsi_create_eqs(phba, phwi_context);
@@ -4204,6 +4198,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
kfree(phba->ep_array);
phba->ep_array = NULL;
ret = -ENOMEM;
+
+ goto free_memory;
}
for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
@@ -5290,6 +5286,57 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)
return;
}
+static void be_eqd_update(struct beiscsi_hba *phba)
+{
+ struct be_set_eqd set_eqd[MAX_CPUS];
+ struct be_aic_obj *aic;
+ struct be_eq_obj *pbe_eq;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ int eqd, i, num = 0;
+ ulong now;
+ u32 pps, delta;
+ unsigned int tag;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ for (i = 0; i <= phba->num_cpus; i++) {
+ aic = &phba->aic_obj[i];
+ pbe_eq = &phwi_context->be_eq[i];
+ now = jiffies;
+ if (!aic->jiffs || time_before(now, aic->jiffs) ||
+ pbe_eq->cq_count < aic->eq_prev) {
+ aic->jiffs = now;
+ aic->eq_prev = pbe_eq->cq_count;
+ continue;
+ }
+ delta = jiffies_to_msecs(now - aic->jiffs);
+ pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
+ eqd = (pps / 1500) << 2;
+
+ if (eqd < 8)
+ eqd = 0;
+ eqd = min_t(u32, eqd, phwi_context->max_eqd);
+ eqd = max_t(u32, eqd, phwi_context->min_eqd);
+
+ aic->jiffs = now;
+ aic->eq_prev = pbe_eq->cq_count;
+
+ if (eqd != aic->prev_eqd) {
+ set_eqd[num].delay_multiplier = (eqd * 65)/100;
+ set_eqd[num].eq_id = pbe_eq->q.id;
+ aic->prev_eqd = eqd;
+ num++;
+ }
+ }
+ if (num) {
+ tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
+ if (tag)
+ beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ }
+}
+
/*
* beiscsi_hw_health_check()- Check adapter health
* @work: work item to check HW health
@@ -5303,6 +5350,8 @@ beiscsi_hw_health_check(struct work_struct *work)
container_of(work, struct beiscsi_hba,
beiscsi_hw_check_task.work);
+ be_eqd_update(phba);
+
beiscsi_ue_detect(phba);
schedule_delayed_work(&phba->beiscsi_hw_check_task,
@@ -5579,7 +5628,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_numtag[i + 1] = 0;
phba->ctrl.mcc_tag_available++;
memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
- sizeof(struct beiscsi_mcc_tag_state));
+ sizeof(struct be_dma_mem));
}
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
@@ -5621,6 +5670,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
}
hwi_enable_intr(phba);
+ if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
+ goto free_blkenbld;
+
if (beiscsi_setup_boot_info(phba))
/*
* log error but continue, because we may not be using
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 9380b55bdeaf..9ceab426eec9 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "10.2.125.0"
+#define BUILD_STR "10.2.273.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -71,8 +71,8 @@
#define BEISCSI_SGLIST_ELEMENTS 30
-#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
-#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
+#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
+#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */
#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
@@ -427,6 +427,7 @@ struct beiscsi_hba {
struct mgmt_session_info boot_sess;
struct invalidate_command_table inv_tbl[128];
+ struct be_aic_obj aic_obj[MAX_CPUS];
unsigned int attr_log_enable;
int (*iotask_fn)(struct iscsi_task *,
struct scatterlist *sg,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 088bdf752cfa..07934b0b9ee1 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -155,6 +155,43 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba)
}
}
+int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
+ struct be_set_eqd *set_eqd, int num)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_modify_eq_delay *req;
+ unsigned int tag = 0;
+ int i;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+
+ req->num_eq = cpu_to_le32(num);
+ for (i = 0; i < num; i++) {
+ req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+ req->delay[i].phase = 0;
+ req->delay[i].delay_multiplier =
+ cpu_to_le32(set_eqd[i].delay_multiplier);
+ }
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
/**
* mgmt_reopen_session()- Reopen a session based on reopen_type
* @phba: Device priv structure instance
@@ -447,8 +484,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
struct be_dma_mem *nonemb_cmd)
{
struct be_cmd_resp_hdr *resp;
- struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- struct be_sge *mcc_sge = nonembedded_sgl(wrb);
+ struct be_mcc_wrb *wrb;
+ struct be_sge *mcc_sge;
unsigned int tag = 0;
struct iscsi_bsg_request *bsg_req = job->request;
struct be_bsg_vendor_cmd *req = nonemb_cmd->va;
@@ -465,7 +502,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
req->sector = sector;
req->offset = offset;
spin_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
case BEISCSI_WRITE_FLASH:
@@ -495,6 +531,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
return tag;
}
+ wrb = wrb_from_mccq(phba);
+ mcc_sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
job->request_payload.sg_cnt);
mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
@@ -525,7 +563,6 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
int status = 0;
spin_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -675,7 +712,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
- struct tcp_connect_and_offload_in *req;
+ struct tcp_connect_and_offload_in_v1 *req;
unsigned short def_hdr_id;
unsigned short def_data_id;
struct phys_addr template_address = { 0, 0 };
@@ -702,17 +739,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
return tag;
}
wrb = wrb_from_mccq(phba);
- memset(wrb, 0, sizeof(*wrb));
sge = nonembedded_sgl(wrb);
req = nonemb_cmd->va;
memset(req, 0, sizeof(*req));
wrb->tag0 |= tag;
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
- sizeof(*req));
+ nonemb_cmd->size);
if (dst_addr->sa_family == PF_INET) {
__be32 s_addr = daddr_in->sin_addr.s_addr;
req->ip_address.ip_type = BE2_IPV4;
@@ -758,6 +794,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ if (!is_chip_be2_be3r(phba)) {
+ req->hdr.version = MBX_CMD_VER1;
+ req->tcp_window_size = 0;
+ req->tcp_window_scale_count = 2;
+ }
+
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
@@ -804,7 +847,7 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
int resp_buf_len)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
+ struct be_mcc_wrb *wrb;
struct be_sge *sge;
unsigned int tag;
int rc = 0;
@@ -816,7 +859,8 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
rc = -ENOMEM;
goto free_cmd;
}
- memset(wrb, 0, sizeof(*wrb));
+
+ wrb = wrb_from_mccq(phba);
wrb->tag0 |= tag;
sge = nonembedded_sgl(wrb);
@@ -964,10 +1008,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
BE2_IPV6 : BE2_IPV4 ;
rc = mgmt_get_if_info(phba, ip_type, &if_info);
- if (rc) {
- kfree(if_info);
+ if (rc)
return rc;
- }
if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
if (if_info->dhcp_state) {
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 01b8c97284c0..24a8fc577477 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -335,5 +335,7 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
struct wrb_handle *pwrb_handle);
void beiscsi_ue_detect(struct beiscsi_hba *phba);
+int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
+ struct be_set_eqd *, int num);
#endif
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index cc0fbcdc5192..7593b7c1d336 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -507,7 +507,7 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
struct bfad_vport_s *vport;
int rc;
- vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
+ vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC);
if (!vport) {
bfa_trc(bfad, 0);
return;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f54843023466..785d0d71781e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->RxFrames++;
- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
-
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_dev(fp) = lport;
fr_sof(fp) = hp->fcoe_sof;
if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
- put_cpu();
kfree_skb(skb);
return;
}
fr_eof(fp) = crc_eof.fcoe_eof;
fr_crc(fp) = crc_eof.fcoe_crc32;
if (pskb_trim(skb, fr_len)) {
- put_cpu();
kfree_skb(skb);
return;
}
@@ -544,7 +538,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
port = lport_priv(vn_port);
if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
- put_cpu();
kfree_skb(skb);
return;
}
@@ -552,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
fh->fh_type == FC_TYPE_FCP) {
/* Drop FCP data. We dont this in L2 path */
- put_cpu();
kfree_skb(skb);
return;
}
@@ -562,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
case ELS_LOGO:
if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
/* drop non-FIP LOGO */
- put_cpu();
kfree_skb(skb);
return;
}
@@ -572,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
/* Drop incoming ABTS */
- put_cpu();
kfree_skb(skb);
return;
}
+ stats = per_cpu_ptr(lport->stats, smp_processor_id());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
if (le32_to_cpu(fr_crc(fp)) !=
~crc32(~0, skb->data, fr_len)) {
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING PFX "dropping frame with "
"CRC error\n");
stats->InvalidCRCCount++;
- put_cpu();
kfree_skb(skb);
return;
}
- put_cpu();
fc_exch_recv(lport, fp);
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 46a37657307f..512aed3ae4f1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1966,26 +1966,29 @@ static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
{
int i;
int segment_count;
- int hash_table_size;
u32 *pbl;
- segment_count = hba->hash_tbl_segment_count;
- hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
- sizeof(struct fcoe_hash_table_entry);
+ if (hba->hash_tbl_segments) {
- pbl = hba->hash_tbl_pbl;
- for (i = 0; i < segment_count; ++i) {
- dma_addr_t dma_address;
+ pbl = hba->hash_tbl_pbl;
+ if (pbl) {
+ segment_count = hba->hash_tbl_segment_count;
+ for (i = 0; i < segment_count; ++i) {
+ dma_addr_t dma_address;
- dma_address = le32_to_cpu(*pbl);
- ++pbl;
- dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
- ++pbl;
- dma_free_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- hba->hash_tbl_segments[i],
- dma_address);
+ dma_address = le32_to_cpu(*pbl);
+ ++pbl;
+ dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
+ ++pbl;
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_address);
+ }
+ }
+ kfree(hba->hash_tbl_segments);
+ hba->hash_tbl_segments = NULL;
}
if (hba->hash_tbl_pbl) {
@@ -2023,7 +2026,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
if (!dma_segment_array) {
printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
- return -ENOMEM;
+ goto cleanup_ht;
}
for (i = 0; i < segment_count; ++i) {
@@ -2034,15 +2037,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
- while (--i >= 0) {
- dma_free_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- hba->hash_tbl_segments[i],
- dma_segment_array[i]);
- hba->hash_tbl_segments[i] = NULL;
- }
- kfree(dma_segment_array);
- return -ENOMEM;
+ goto cleanup_dma;
}
memset(hba->hash_tbl_segments[i], 0,
BNX2FC_HASH_TBL_CHUNK_SIZE);
@@ -2054,8 +2049,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
- kfree(dma_segment_array);
- return -ENOMEM;
+ goto cleanup_dma;
}
memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
@@ -2080,6 +2074,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
}
kfree(dma_segment_array);
return 0;
+
+cleanup_dma:
+ for (i = 0; i < segment_count; ++i) {
+ if (hba->hash_tbl_segments[i])
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_segment_array[i]);
+ }
+
+ kfree(dma_segment_array);
+
+cleanup_ht:
+ kfree(hba->hash_tbl_segments);
+ hba->hash_tbl_segments = NULL;
+ return -ENOMEM;
}
/**
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 32a5e0a2a669..7bc47fc7c686 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -282,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
arr_sz, GFP_KERNEL);
if (!cmgr->free_list_lock) {
printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+ kfree(cmgr->free_list);
+ cmgr->free_list = NULL;
goto mem_err;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 5248c888552b..7bcf67eec921 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -120,6 +120,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,
"%s: blk_get_request failed\n", __func__);
return NULL;
}
+ blk_rq_set_block_pc(rq);
if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
blk_put_request(rq);
@@ -128,7 +129,6 @@ static struct request *get_alua_req(struct scsi_device *sdev,
return NULL;
}
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->retries = ALUA_FAILOVER_RETRIES;
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e1c8be06de9d..6f07f7fe3aa1 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -280,6 +280,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
return NULL;
}
+ blk_rq_set_block_pc(rq);
rq->cmd_len = COMMAND_SIZE(cmd);
rq->cmd[0] = cmd;
@@ -304,7 +305,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
break;
}
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->timeout = CLARIION_TIMEOUT;
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 084062bb8ee9..e9d9fea9e272 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -120,7 +120,7 @@ retry:
if (!req)
return SCSI_DH_RES_TEMP_UNAVAIL;
- req->cmd_type = REQ_TYPE_BLOCK_PC;
+ blk_rq_set_block_pc(req);
req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
@@ -250,7 +250,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
if (!req)
return SCSI_DH_RES_TEMP_UNAVAIL;
- req->cmd_type = REQ_TYPE_BLOCK_PC;
+ blk_rq_set_block_pc(req);
req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(START_STOP);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 4b9cf93f3fb6..826069db9848 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -279,6 +279,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
"get_rdac_req: blk_get_request failed.\n");
return NULL;
}
+ blk_rq_set_block_pc(rq);
if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
blk_put_request(rq);
@@ -287,7 +288,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return NULL;
}
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->retries = RDAC_RETRIES;
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index eb29fe7eaf49..0a667fe05006 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -3,8 +3,6 @@
#define PSEUDO_DMA
#define DONT_USE_INTR
#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */
-#define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\
- NDEBUG_SELECTION+NDEBUG_ARBITRATION)
#define DMA_WORKS_RIGHT
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index f37f3e3dd5d5..6504a195c874 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -390,7 +390,7 @@ static int esas2r_probe(struct pci_dev *pcid,
esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
"pci_enable_device() OK");
esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
- "after pci_device_enable() enable_cnt: %d",
+ "after pci_enable_device() enable_cnt: %d",
pcid->enable_cnt.counter);
host = scsi_host_alloc(&driver_template, host_alloc_size);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 528d43b7b569..1d3521e13d77 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,14 +39,15 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.5.0.45"
+#define DRV_VERSION "1.6.0.10"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
#define DESC_CLEAN_LOW_WATERMARK 8
#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
-#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
+#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */
+#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 32
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index b6073f875761..2c613bdea78f 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -25,6 +25,21 @@ static struct dentry *fnic_trace_debugfs_file;
static struct dentry *fnic_trace_enable;
static struct dentry *fnic_stats_debugfs_root;
+static struct dentry *fnic_fc_trace_debugfs_file;
+static struct dentry *fnic_fc_rdata_trace_debugfs_file;
+static struct dentry *fnic_fc_trace_enable;
+static struct dentry *fnic_fc_trace_clear;
+
+struct fc_trace_flag_type {
+ u8 fc_row_file;
+ u8 fc_normal_file;
+ u8 fnic_trace;
+ u8 fc_trace;
+ u8 fc_clear;
+};
+
+static struct fc_trace_flag_type *fc_trc_flag;
+
/*
* fnic_debugfs_init - Initialize debugfs for fnic debug logging
*
@@ -56,6 +71,18 @@ int fnic_debugfs_init(void)
return rc;
}
+ /* Allocate memory to structure */
+ fc_trc_flag = (struct fc_trace_flag_type *)
+ vmalloc(sizeof(struct fc_trace_flag_type));
+
+ if (fc_trc_flag) {
+ fc_trc_flag->fc_row_file = 0;
+ fc_trc_flag->fc_normal_file = 1;
+ fc_trc_flag->fnic_trace = 2;
+ fc_trc_flag->fc_trace = 3;
+ fc_trc_flag->fc_clear = 4;
+ }
+
rc = 0;
return rc;
}
@@ -74,15 +101,19 @@ void fnic_debugfs_terminate(void)
debugfs_remove(fnic_trace_debugfs_root);
fnic_trace_debugfs_root = NULL;
+
+ if (fc_trc_flag)
+ vfree(fc_trc_flag);
}
/*
- * fnic_trace_ctrl_open - Open the trace_enable file
+ * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
+ * Or Open fc_trace_enable file for fc_trace
* @inode: The inode pointer.
* @file: The file pointer to attach the trace enable/disable flag.
*
* Description:
- * This routine opens a debugsfs file trace_enable.
+ * This routine opens a debugsfs file trace_enable or fc_trace_enable.
*
* Returns:
* This function returns zero if successful.
@@ -94,15 +125,19 @@ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
}
/*
- * fnic_trace_ctrl_read - Read a trace_enable debugfs file
+ * fnic_trace_ctrl_read -
+ * Read trace_enable ,fc_trace_enable
+ * or fc_trace_clear debugfs file
* @filp: The file pointer to read from.
* @ubuf: The buffer to copy the data to.
* @cnt: The number of bytes to read.
* @ppos: The position in the file to start reading from.
*
* Description:
- * This routine reads value of variable fnic_tracing_enabled
- * and stores into local @buf. It will start reading file at @ppos and
+ * This routine reads value of variable fnic_tracing_enabled or
+ * fnic_fc_tracing_enabled or fnic_fc_trace_cleared
+ * and stores into local @buf.
+ * It will start reading file at @ppos and
* copy up to @cnt of data to @ubuf from @buf.
*
* Returns:
@@ -114,13 +149,25 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
{
char buf[64];
int len;
- len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+ u8 *trace_type;
+ len = 0;
+ trace_type = (u8 *)filp->private_data;
+ if (*trace_type == fc_trc_flag->fnic_trace)
+ len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+ else if (*trace_type == fc_trc_flag->fc_trace)
+ len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled);
+ else if (*trace_type == fc_trc_flag->fc_clear)
+ len = sprintf(buf, "%u\n", fnic_fc_trace_cleared);
+ else
+ pr_err("fnic: Cannot read to any debugfs file\n");
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
/*
- * fnic_trace_ctrl_write - Write to trace_enable debugfs file
+ * fnic_trace_ctrl_write -
+ * Write to trace_enable, fc_trace_enable or
+ * fc_trace_clear debugfs file
* @filp: The file pointer to write from.
* @ubuf: The buffer to copy the data from.
* @cnt: The number of bytes to write.
@@ -128,7 +175,8 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
*
* Description:
* This routine writes data from user buffer @ubuf to buffer @buf and
- * sets fnic_tracing_enabled value as per user input.
+ * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared
+ * value as per user input.
*
* Returns:
* This function returns the amount of data that was written.
@@ -140,6 +188,8 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
char buf[64];
unsigned long val;
int ret;
+ u8 *trace_type;
+ trace_type = (u8 *)filp->private_data;
if (cnt >= sizeof(buf))
return -EINVAL;
@@ -153,12 +203,27 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
if (ret < 0)
return ret;
- fnic_tracing_enabled = val;
+ if (*trace_type == fc_trc_flag->fnic_trace)
+ fnic_tracing_enabled = val;
+ else if (*trace_type == fc_trc_flag->fc_trace)
+ fnic_fc_tracing_enabled = val;
+ else if (*trace_type == fc_trc_flag->fc_clear)
+ fnic_fc_trace_cleared = val;
+ else
+ pr_err("fnic: cannot write to any debufs file\n");
+
(*ppos)++;
return cnt;
}
+static const struct file_operations fnic_trace_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_ctrl_open,
+ .read = fnic_trace_ctrl_read,
+ .write = fnic_trace_ctrl_write,
+};
+
/*
* fnic_trace_debugfs_open - Open the fnic trace log
* @inode: The inode pointer
@@ -178,19 +243,36 @@ static int fnic_trace_debugfs_open(struct inode *inode,
struct file *file)
{
fnic_dbgfs_t *fnic_dbg_prt;
+ u8 *rdata_ptr;
+ rdata_ptr = (u8 *)inode->i_private;
fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
if (!fnic_dbg_prt)
return -ENOMEM;
- fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
- if (!fnic_dbg_prt->buffer) {
- kfree(fnic_dbg_prt);
- return -ENOMEM;
+ if (*rdata_ptr == fc_trc_flag->fnic_trace) {
+ fnic_dbg_prt->buffer = vmalloc(3 *
+ (trace_max_pages * PAGE_SIZE));
+ if (!fnic_dbg_prt->buffer) {
+ kfree(fnic_dbg_prt);
+ return -ENOMEM;
+ }
+ memset((void *)fnic_dbg_prt->buffer, 0,
+ 3 * (trace_max_pages * PAGE_SIZE));
+ fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+ } else {
+ fnic_dbg_prt->buffer =
+ vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
+ if (!fnic_dbg_prt->buffer) {
+ kfree(fnic_dbg_prt);
+ return -ENOMEM;
+ }
+ memset((void *)fnic_dbg_prt->buffer, 0,
+ 3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
+ fnic_dbg_prt->buffer_len =
+ fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr);
}
- memset((void *)fnic_dbg_prt->buffer, 0,
- (3*(trace_max_pages * PAGE_SIZE)));
- fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
file->private_data = fnic_dbg_prt;
+
return 0;
}
@@ -272,13 +354,6 @@ static int fnic_trace_debugfs_release(struct inode *inode,
return 0;
}
-static const struct file_operations fnic_trace_ctrl_fops = {
- .owner = THIS_MODULE,
- .open = fnic_trace_ctrl_open,
- .read = fnic_trace_ctrl_read,
- .write = fnic_trace_ctrl_write,
-};
-
static const struct file_operations fnic_trace_debugfs_fops = {
.owner = THIS_MODULE,
.open = fnic_trace_debugfs_open,
@@ -306,9 +381,10 @@ int fnic_trace_debugfs_init(void)
return rc;
}
fnic_trace_enable = debugfs_create_file("tracing_enable",
- S_IFREG|S_IRUGO|S_IWUSR,
- fnic_trace_debugfs_root,
- NULL, &fnic_trace_ctrl_fops);
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fnic_trace),
+ &fnic_trace_ctrl_fops);
if (!fnic_trace_enable) {
printk(KERN_DEBUG
@@ -317,10 +393,10 @@ int fnic_trace_debugfs_init(void)
}
fnic_trace_debugfs_file = debugfs_create_file("trace",
- S_IFREG|S_IRUGO|S_IWUSR,
- fnic_trace_debugfs_root,
- NULL,
- &fnic_trace_debugfs_fops);
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fnic_trace),
+ &fnic_trace_debugfs_fops);
if (!fnic_trace_debugfs_file) {
printk(KERN_DEBUG
@@ -340,14 +416,104 @@ int fnic_trace_debugfs_init(void)
*/
void fnic_trace_debugfs_terminate(void)
{
- if (fnic_trace_debugfs_file) {
- debugfs_remove(fnic_trace_debugfs_file);
- fnic_trace_debugfs_file = NULL;
+ debugfs_remove(fnic_trace_debugfs_file);
+ fnic_trace_debugfs_file = NULL;
+
+ debugfs_remove(fnic_trace_enable);
+ fnic_trace_enable = NULL;
+}
+
+/*
+ * fnic_fc_trace_debugfs_init -
+ * Initialize debugfs for fnic control frame trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic_fc debugfs
+ * file system. If not already created, this routine will create the
+ * create file trace to log fnic fc trace buffer output into debugfs and
+ * it will also create file fc_trace_enable to control enable/disable of
+ * trace logging into trace buffer.
+ */
+
+int fnic_fc_trace_debugfs_init(void)
+{
+ int rc = -1;
+
+ if (!fnic_trace_debugfs_root) {
+ pr_err("fnic:Debugfs root directory doesn't exist\n");
+ return rc;
+ }
+
+ fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fc_trace),
+ &fnic_trace_ctrl_fops);
+
+ if (!fnic_fc_trace_enable) {
+ pr_err("fnic: Failed create fc_trace_enable file\n");
+ return rc;
+ }
+
+ fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fc_clear),
+ &fnic_trace_ctrl_fops);
+
+ if (!fnic_fc_trace_clear) {
+ pr_err("fnic: Failed to create fc_trace_enable file\n");
+ return rc;
+ }
+
+ fnic_fc_rdata_trace_debugfs_file =
+ debugfs_create_file("fc_trace_rdata",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fc_normal_file),
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_fc_rdata_trace_debugfs_file) {
+ pr_err("fnic: Failed create fc_rdata_trace file\n");
+ return rc;
}
- if (fnic_trace_enable) {
- debugfs_remove(fnic_trace_enable);
- fnic_trace_enable = NULL;
+
+ fnic_fc_trace_debugfs_file =
+ debugfs_create_file("fc_trace",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fc_row_file),
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_fc_trace_debugfs_file) {
+ pr_err("fnic: Failed to create fc_trace file\n");
+ return rc;
}
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic_fc trace logging.
+ */
+
+void fnic_fc_trace_debugfs_terminate(void)
+{
+ debugfs_remove(fnic_fc_trace_debugfs_file);
+ fnic_fc_trace_debugfs_file = NULL;
+
+ debugfs_remove(fnic_fc_rdata_trace_debugfs_file);
+ fnic_fc_rdata_trace_debugfs_file = NULL;
+
+ debugfs_remove(fnic_fc_trace_enable);
+ fnic_fc_trace_enable = NULL;
+
+ debugfs_remove(fnic_fc_trace_clear);
+ fnic_fc_trace_clear = NULL;
}
/*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 1671325aec7f..1b948f633fc5 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -66,19 +66,35 @@ void fnic_handle_link(struct work_struct *work)
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
if (old_link_status == fnic->link_status) {
- if (!fnic->link_status)
+ if (!fnic->link_status) {
/* DOWN -> DOWN */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- else {
+ fnic_fc_trace_set_data(fnic->lport->host->host_no,
+ FNIC_FC_LE, "Link Status: DOWN->DOWN",
+ strlen("Link Status: DOWN->DOWN"));
+ } else {
if (old_link_down_cnt != fnic->link_down_cnt) {
/* UP -> DOWN -> UP */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no,
+ FNIC_FC_LE,
+ "Link Status:UP_DOWN_UP",
+ strlen("Link_Status:UP_DOWN_UP")
+ );
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link down\n");
fcoe_ctlr_link_down(&fnic->ctlr);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
/* start FCoE VLAN discovery */
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no,
+ FNIC_FC_LE,
+ "Link Status: UP_DOWN_UP_VLAN",
+ strlen(
+ "Link Status: UP_DOWN_UP_VLAN")
+ );
fnic_fcoe_send_vlan_req(fnic);
return;
}
@@ -88,22 +104,36 @@ void fnic_handle_link(struct work_struct *work)
} else
/* UP -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no, FNIC_FC_LE,
+ "Link Status: UP_UP",
+ strlen("Link Status: UP_UP"));
}
} else if (fnic->link_status) {
/* DOWN -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
/* start FCoE VLAN discovery */
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no,
+ FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
+ strlen("Link Status: DOWN_UP_VLAN"));
fnic_fcoe_send_vlan_req(fnic);
return;
}
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
+ fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
+ "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
fcoe_ctlr_link_up(&fnic->ctlr);
} else {
/* UP -> DOWN */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no, FNIC_FC_LE,
+ "Link Status: UP_DOWN",
+ strlen("Link Status: UP_DOWN"));
fcoe_ctlr_link_down(&fnic->ctlr);
}
@@ -267,11 +297,6 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
if (desc->fip_dtype == FIP_DT_FLOGI) {
- shost_printk(KERN_DEBUG, lport->host,
- " FIP TYPE FLOGI: fab name:%llx "
- "vfid:%d map:%x\n",
- fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
- fip->sel_fcf->fc_map);
if (dlen < sizeof(*els) + sizeof(*fh) + 1)
return 0;
@@ -616,6 +641,10 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
"using UCSM\n");
goto drop;
}
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
+ FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
skb_queue_tail(&fnic->fip_frame_queue, skb);
queue_work(fnic_fip_queue, &fnic->fip_frame_work);
return 1; /* let caller know packet was used */
@@ -844,6 +873,10 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
}
fr_dev(fp) = fnic->lport;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
+ (char *)skb->data, skb->len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
skb_queue_tail(&fnic->frame_queue, skb);
queue_work(fnic_event_queue, &fnic->frame_work);
@@ -951,6 +984,15 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
+ FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
+ } else {
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
+ FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
}
pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
@@ -1023,6 +1065,11 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
+ (char *)eth_hdr, tot_len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
+
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 33e4ec2bfe73..8c56fdc3a456 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -74,6 +74,11 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
"for fnic trace buffer");
+unsigned int fnic_fc_trace_max_pages = 64;
+module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_fc_trace_max_pages,
+ "Total allocated memory pages for fc trace buffer");
+
static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
@@ -111,7 +116,7 @@ static struct scsi_host_template fnic_host_template = {
.change_queue_type = fc_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
- .can_queue = FNIC_MAX_IO_REQ,
+ .can_queue = FNIC_DFLT_IO_REQ,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
.max_sectors = 0xffff,
@@ -773,6 +778,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
shost_printk(KERN_INFO, fnic->lport->host,
"firmware uses non-FIP mode\n");
fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
+ fnic->ctlr.state = FIP_ST_NON_FIP;
}
fnic->state = FNIC_IN_FC_MODE;
@@ -1033,11 +1039,20 @@ static int __init fnic_init_module(void)
/* Allocate memory for trace buffer */
err = fnic_trace_buf_init();
if (err < 0) {
- printk(KERN_ERR PFX "Trace buffer initialization Failed "
- "Fnic Tracing utility is disabled\n");
+ printk(KERN_ERR PFX
+ "Trace buffer initialization Failed. "
+ "Fnic Tracing utility is disabled\n");
fnic_trace_free();
}
+ /* Allocate memory for fc trace buffer */
+ err = fnic_fc_trace_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "FC trace buffer initialization Failed "
+ "FC frame tracing utility is disabled\n");
+ fnic_fc_trace_free();
+ }
+
/* Create a cache for allocation of default size sgls */
len = sizeof(struct fnic_dflt_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
@@ -1118,6 +1133,7 @@ err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
fnic_trace_free();
+ fnic_fc_trace_free();
fnic_debugfs_terminate();
return err;
}
@@ -1135,6 +1151,7 @@ static void __exit fnic_cleanup_module(void)
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
fnic_trace_free();
+ fnic_fc_trace_free();
fnic_debugfs_terminate();
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 0521436d05d6..ea28b5ca4c73 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1312,8 +1312,9 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
cleanup_scsi_cmd:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
- " DID_TRANSPORT_DISRUPTED\n");
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
+ __func__, (jiffies - start_time));
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
@@ -1733,6 +1734,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
struct fnic_stats *fnic_stats;
struct abort_stats *abts_stats;
struct terminate_stats *term_stats;
+ enum fnic_ioreq_state old_ioreq_state;
int tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -1793,6 +1795,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
* the completion wont be done till mid-layer, since abort
* has already started.
*/
+ old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
@@ -1816,6 +1819,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
io_req->abts_done = NULL;
@@ -1859,12 +1864,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
if (task_req == FCPIO_ITMF_ABT_TASK) {
- FNIC_SCSI_DBG(KERN_INFO,
- fnic->lport->host, "Abort Driver Timeout\n");
atomic64_inc(&abts_stats->abort_drv_timeouts);
} else {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "Terminate Driver Timeout\n");
atomic64_inc(&term_stats->terminate_drv_timeouts);
}
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index e002e7187dc0..c77285926827 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
+#include <linux/time.h>
#include "fnic_io.h"
#include "fnic.h"
@@ -32,6 +33,16 @@ static DEFINE_SPINLOCK(fnic_trace_lock);
static fnic_trace_dbg_t fnic_trace_entries;
int fnic_tracing_enabled = 1;
+/* static char *fnic_fc_ctlr_trace_buf_p; */
+
+static int fc_trace_max_entries;
+static unsigned long fnic_fc_ctlr_trace_buf_p;
+static fnic_trace_dbg_t fc_trace_entries;
+int fnic_fc_tracing_enabled = 1;
+int fnic_fc_trace_cleared = 1;
+static DEFINE_SPINLOCK(fnic_fc_trace_lock);
+
+
/*
* fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
*
@@ -428,10 +439,10 @@ int fnic_trace_buf_init(void)
}
err = fnic_trace_debugfs_init();
if (err < 0) {
- printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
+ pr_err("fnic: Failed to initialize debugfs for tracing\n");
goto err_fnic_trace_debugfs_init;
}
- printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
+ pr_info("fnic: Successfully Initialized Trace Buffer\n");
return err;
err_fnic_trace_debugfs_init:
fnic_trace_free();
@@ -456,3 +467,314 @@ void fnic_trace_free(void)
}
printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
}
+
+/*
+ * fnic_fc_ctlr_trace_buf_init -
+ * Initialize trace buffer to log fnic control frames
+ * Description:
+ * Initialize trace buffer data structure by allocating
+ * required memory for trace data as well as for Indexes.
+ * Frame size is 256 bytes and
+ * memory is allocated for 1024 entries of 256 bytes.
+ * Page_offset(Index) is set to the address of trace entry
+ * and page_offset is initialized by adding frame size
+ * to the previous page_offset entry.
+ */
+
+int fnic_fc_trace_init(void)
+{
+ unsigned long fc_trace_buf_head;
+ int err = 0;
+ int i;
+
+ fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
+ FC_TRC_SIZE_BYTES;
+ fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc(
+ fnic_fc_trace_max_pages * PAGE_SIZE);
+ if (!fnic_fc_ctlr_trace_buf_p) {
+ pr_err("fnic: Failed to allocate memory for "
+ "FC Control Trace Buf\n");
+ err = -ENOMEM;
+ goto err_fnic_fc_ctlr_trace_buf_init;
+ }
+
+ memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
+ fnic_fc_trace_max_pages * PAGE_SIZE);
+
+ /* Allocate memory for page offset */
+ fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries *
+ sizeof(unsigned long));
+ if (!fc_trace_entries.page_offset) {
+ pr_err("fnic:Failed to allocate memory for page_offset\n");
+ if (fnic_fc_ctlr_trace_buf_p) {
+ pr_err("fnic: Freeing FC Control Trace Buf\n");
+ vfree((void *)fnic_fc_ctlr_trace_buf_p);
+ fnic_fc_ctlr_trace_buf_p = 0;
+ }
+ err = -ENOMEM;
+ goto err_fnic_fc_ctlr_trace_buf_init;
+ }
+ memset((void *)fc_trace_entries.page_offset, 0,
+ (fc_trace_max_entries * sizeof(unsigned long)));
+
+ fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
+ fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
+
+ /*
+ * Set up fc_trace_entries.page_offset field with memory location
+ * for every trace entry
+ */
+ for (i = 0; i < fc_trace_max_entries; i++) {
+ fc_trace_entries.page_offset[i] = fc_trace_buf_head;
+ fc_trace_buf_head += FC_TRC_SIZE_BYTES;
+ }
+ err = fnic_fc_trace_debugfs_init();
+ if (err < 0) {
+ pr_err("fnic: Failed to initialize FC_CTLR tracing.\n");
+ goto err_fnic_fc_ctlr_trace_debugfs_init;
+ }
+ pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
+ return err;
+
+err_fnic_fc_ctlr_trace_debugfs_init:
+ fnic_fc_trace_free();
+err_fnic_fc_ctlr_trace_buf_init:
+ return err;
+}
+
+/*
+ * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures.
+ */
+void fnic_fc_trace_free(void)
+{
+ fnic_fc_tracing_enabled = 0;
+ fnic_fc_trace_debugfs_terminate();
+ if (fc_trace_entries.page_offset) {
+ vfree((void *)fc_trace_entries.page_offset);
+ fc_trace_entries.page_offset = NULL;
+ }
+ if (fnic_fc_ctlr_trace_buf_p) {
+ vfree((void *)fnic_fc_ctlr_trace_buf_p);
+ fnic_fc_ctlr_trace_buf_p = 0;
+ }
+ pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n");
+}
+
+/*
+ * fnic_fc_ctlr_set_trace_data:
+ * Maintain rd & wr idx accordingly and set data
+ * Passed parameters:
+ * host_no: host number accociated with fnic
+ * frame_type: send_frame, rece_frame or link event
+ * fc_frame: pointer to fc_frame
+ * frame_len: Length of the fc_frame
+ * Description:
+ * This routine will get next available wr_idx and
+ * copy all passed trace data to the buffer pointed by wr_idx
+ * and increment wr_idx. It will also make sure that we dont
+ * overwrite the entry which we are reading and also
+ * wrap around if we reach the maximum entries.
+ * Returned Value:
+ * It will return 0 for success or -1 for failure
+ */
+int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
+ char *frame, u32 fc_trc_frame_len)
+{
+ unsigned long flags;
+ struct fc_trace_hdr *fc_buf;
+ unsigned long eth_fcoe_hdr_len;
+ char *fc_trace;
+
+ if (fnic_fc_tracing_enabled == 0)
+ return 0;
+
+ spin_lock_irqsave(&fnic_fc_trace_lock, flags);
+
+ if (fnic_fc_trace_cleared == 1) {
+ fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
+ pr_info("fnic: Reseting the read idx\n");
+ memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
+ fnic_fc_trace_max_pages * PAGE_SIZE);
+ fnic_fc_trace_cleared = 0;
+ }
+
+ fc_buf = (struct fc_trace_hdr *)
+ fc_trace_entries.page_offset[fc_trace_entries.wr_idx];
+
+ fc_trace_entries.wr_idx++;
+
+ if (fc_trace_entries.wr_idx >= fc_trace_max_entries)
+ fc_trace_entries.wr_idx = 0;
+
+ if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
+ fc_trace_entries.rd_idx++;
+ if (fc_trace_entries.rd_idx >= fc_trace_max_entries)
+ fc_trace_entries.rd_idx = 0;
+ }
+
+ fc_buf->time_stamp = CURRENT_TIME;
+ fc_buf->host_no = host_no;
+ fc_buf->frame_type = frame_type;
+
+ fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf);
+
+ /* During the receive path, we do not have eth hdr as well as fcoe hdr
+ * at trace entry point so we will stuff 0xff just to make it generic.
+ */
+ if (frame_type == FNIC_FC_RECV) {
+ eth_fcoe_hdr_len = sizeof(struct ethhdr) +
+ sizeof(struct fcoe_hdr);
+ fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len;
+ memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
+ /* Copy the rest of data frame */
+ memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
+ min_t(u8, fc_trc_frame_len,
+ (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
+ } else {
+ memcpy((char *)fc_trace, (void *)frame,
+ min_t(u8, fc_trc_frame_len,
+ (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
+ }
+
+ /* Store the actual received length */
+ fc_buf->frame_len = fc_trc_frame_len;
+
+ spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+ return 0;
+}
+
+/*
+ * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
+ * Passed parameter:
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ * rdata_flag: 1 => Unformated file
+ * 0 => formated file
+ * Description:
+ * This routine will copy the trace data to memory file with
+ * proper formatting and also copy to another memory
+ * file without formatting for further procesing.
+ * Retrun Value:
+ * Number of bytes that were dumped into fnic_dbgfs_t
+ */
+
+int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
+{
+ int rd_idx, wr_idx;
+ unsigned long flags;
+ int len = 0, j;
+ struct fc_trace_hdr *tdata;
+ char *fc_trace;
+
+ spin_lock_irqsave(&fnic_fc_trace_lock, flags);
+ if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
+ spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+ pr_info("fnic: Buffer is empty\n");
+ return 0;
+ }
+ rd_idx = fc_trace_entries.rd_idx;
+ wr_idx = fc_trace_entries.wr_idx;
+ if (rdata_flag == 0) {
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+ "Time Stamp (UTC)\t\t"
+ "Host No: F Type: len: FCoE_FRAME:\n");
+ }
+
+ while (rd_idx != wr_idx) {
+ tdata = (struct fc_trace_hdr *)
+ fc_trace_entries.page_offset[rd_idx];
+ if (!tdata) {
+ pr_info("fnic: Rd data is NULL\n");
+ spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+ return 0;
+ }
+ if (rdata_flag == 0) {
+ copy_and_format_trace_data(tdata,
+ fnic_dbgfs_prt, &len, rdata_flag);
+ } else {
+ fc_trace = (char *)tdata;
+ for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (fnic_fc_trace_max_pages * PAGE_SIZE * 3)
+ - len, "%02x", fc_trace[j] & 0xff);
+ } /* for loop */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+ "\n");
+ }
+ rd_idx++;
+ if (rd_idx > (fc_trace_max_entries - 1))
+ rd_idx = 0;
+ }
+
+ spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+ return len;
+}
+
+/*
+ * copy_and_format_trace_data: Copy formatted data to char * buffer
+ * Passed Parameter:
+ * @fc_trace_hdr_t: pointer to trace data
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ * @orig_len: pointer to len
+ * rdata_flag: 0 => Formated file, 1 => Unformated file
+ * Description:
+ * This routine will format and copy the passed trace data
+ * for formated file or unformated file accordingly.
+ */
+
+void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
+ fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len,
+ u8 rdata_flag)
+{
+ struct tm tm;
+ int j, i = 1, len;
+ char *fc_trace, *fmt;
+ int ethhdr_len = sizeof(struct ethhdr) - 1;
+ int fcoehdr_len = sizeof(struct fcoe_hdr);
+ int fchdr_len = sizeof(struct fc_frame_header);
+ int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
+
+ tdata->frame_type = tdata->frame_type & 0x7F;
+
+ len = *orig_len;
+
+ time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
+
+ fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+ fmt,
+ tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ tdata->time_stamp.tv_nsec, tdata->host_no,
+ tdata->frame_type, tdata->frame_len);
+
+ fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
+
+ for (j = 0; j < min_t(u8, tdata->frame_len,
+ (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
+ if (tdata->frame_type == FNIC_FC_LE) {
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ max_size - len, "%c", fc_trace[j]);
+ } else {
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ max_size - len, "%02x", fc_trace[j] & 0xff);
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ max_size - len, " ");
+ if (j == ethhdr_len ||
+ j == ethhdr_len + fcoehdr_len ||
+ j == ethhdr_len + fcoehdr_len + fchdr_len ||
+ (i > 3 && j%fchdr_len == 0)) {
+ len += snprintf(fnic_dbgfs_prt->buffer
+ + len, (fnic_fc_trace_max_pages
+ * PAGE_SIZE * 3) - len,
+ "\n\t\t\t\t\t\t\t\t");
+ i++;
+ }
+ } /* end of else*/
+ } /* End of for loop*/
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ max_size - len, "\n");
+ *orig_len = len;
+}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index d412f2ee3c4f..a8aa0578fcb0 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -19,6 +19,17 @@
#define __FNIC_TRACE_H__
#define FNIC_ENTRY_SIZE_BYTES 64
+#define FC_TRC_SIZE_BYTES 256
+#define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr)
+
+/*
+ * Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type
+ * of frame 1 => Eth frame, 0=> FC frame
+ */
+
+#define FNIC_FC_RECV 0x52 /* Character R */
+#define FNIC_FC_SEND 0x54 /* Character T */
+#define FNIC_FC_LE 0x4C /* Character L */
extern ssize_t simple_read_from_buffer(void __user *to,
size_t count,
@@ -30,6 +41,10 @@ extern unsigned int fnic_trace_max_pages;
extern int fnic_tracing_enabled;
extern unsigned int trace_max_pages;
+extern unsigned int fnic_fc_trace_max_pages;
+extern int fnic_fc_tracing_enabled;
+extern int fnic_fc_trace_cleared;
+
typedef struct fnic_trace_dbg {
int wr_idx;
int rd_idx;
@@ -56,6 +71,16 @@ struct fnic_trace_data {
typedef struct fnic_trace_data fnic_trace_data_t;
+struct fc_trace_hdr {
+ struct timespec time_stamp;
+ u32 host_no;
+ u8 frame_type;
+ u8 frame_len;
+} __attribute__((__packed__));
+
+#define FC_TRACE_ADDRESS(a) \
+ ((unsigned long)(a) + sizeof(struct fc_trace_hdr))
+
#define FNIC_TRACE_ENTRY_SIZE \
(FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
@@ -88,4 +113,17 @@ int fnic_debugfs_init(void);
void fnic_debugfs_terminate(void);
int fnic_trace_debugfs_init(void);
void fnic_trace_debugfs_terminate(void);
+
+/* Fnic FC CTLR Trace releated function */
+int fnic_fc_trace_init(void);
+void fnic_fc_trace_free(void);
+int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
+ char *frame, u32 fc_frame_len);
+int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag);
+void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
+ fnic_dbgfs_t *fnic_dbgfs_prt,
+ int *len, u8 rdata_flag);
+int fnic_fc_trace_debugfs_init(void);
+void fnic_fc_trace_debugfs_terminate(void);
+
#endif
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 7176365e916b..a1bc8ca958e1 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -78,10 +78,6 @@
*
*/
-/*
- * $Log: generic_NCR5380.c,v $
- */
-
/* settings for DTC3181E card with only Mustek scanner attached */
#define USLEEP
#define USLEEP_POLL 1
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 1bcdb7beb77b..703adf78e0b2 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -25,10 +25,6 @@
* 1+ (800) 334-5454
*/
-/*
- * $Log: generic_NCR5380.h,v $
- */
-
#ifndef GENERIC_NCR5380_H
#define GENERIC_NCR5380_H
@@ -58,8 +54,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
#define CAN_QUEUE 16
#endif
-#ifndef HOSTS_C
-
#define __STRVAL(x) #x
#define STRVAL(x) __STRVAL(x)
@@ -131,7 +125,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
#define BOARD_NCR53C400A 2
#define BOARD_DTC3181E 3
-#endif /* else def HOSTS_C */
#endif /* ndef ASM */
#endif /* GENERIC_NCR5380_H */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 9a6e4a2cd072..31184b35370f 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -48,6 +48,7 @@
#include <linux/bitmap.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
+#include <linux/percpu.h>
#include <asm/div64.h>
#include "hpsa_cmd.h"
#include "hpsa.h"
@@ -115,9 +116,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
@@ -165,9 +172,15 @@ static struct board_type products[] = {
{0x21C3103C, "Smart Array", &SA5_access},
{0x21C4103C, "Smart Array", &SA5_access},
{0x21C5103C, "Smart Array", &SA5_access},
+ {0x21C6103C, "Smart Array", &SA5_access},
{0x21C7103C, "Smart Array", &SA5_access},
{0x21C8103C, "Smart Array", &SA5_access},
{0x21C9103C, "Smart Array", &SA5_access},
+ {0x21CA103C, "Smart Array", &SA5_access},
+ {0x21CB103C, "Smart Array", &SA5_access},
+ {0x21CC103C, "Smart Array", &SA5_access},
+ {0x21CD103C, "Smart Array", &SA5_access},
+ {0x21CE103C, "Smart Array", &SA5_access},
{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -181,7 +194,8 @@ static int number_of_controllers;
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
-static void start_io(struct ctlr_info *h);
+static void lock_and_start_io(struct ctlr_info *h);
+static void start_io(struct ctlr_info *h, unsigned long *flags);
#ifdef CONFIG_COMPAT
static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
@@ -683,7 +697,7 @@ static inline void addQ(struct list_head *list, struct CommandList *c)
static inline u32 next_command(struct ctlr_info *h, u8 q)
{
u32 a;
- struct reply_pool *rq = &h->reply_queue[q];
+ struct reply_queue_buffer *rq = &h->reply_queue[q];
unsigned long flags;
if (h->transMethod & CFGTBL_Trans_io_accel1)
@@ -832,8 +846,8 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
+ start_io(h, &flags);
spin_unlock_irqrestore(&h->lock, flags);
- start_io(h);
}
static inline void removeQ(struct CommandList *c)
@@ -1542,9 +1556,13 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
dev_warn(&h->pdev->dev,
"%s: task complete with check condition.\n",
"HP SSD Smart Path");
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
if (c2->error_data.data_present !=
- IOACCEL2_SENSE_DATA_PRESENT)
+ IOACCEL2_SENSE_DATA_PRESENT) {
+ memset(cmd->sense_buffer, 0,
+ SCSI_SENSE_BUFFERSIZE);
break;
+ }
/* copy the sense data */
data_len = c2->error_data.sense_data_len;
if (data_len > SCSI_SENSE_BUFFERSIZE)
@@ -1554,7 +1572,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
sizeof(c2->error_data.sense_data_buff);
memcpy(cmd->sense_buffer,
c2->error_data.sense_data_buff, data_len);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
retry = 1;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
@@ -1639,16 +1656,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
if (is_logical_dev_addr_mode(dev->scsi3addr) &&
c2->error_data.serv_response ==
IOACCEL2_SERV_RESPONSE_FAILURE) {
- if (c2->error_data.status ==
- IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
- dev_warn(&h->pdev->dev,
- "%s: Path is unavailable, retrying on standard path.\n",
- "HP SSD Smart Path");
- else
- dev_warn(&h->pdev->dev,
- "%s: Error 0x%02x, retrying on standard path.\n",
- "HP SSD Smart Path", c2->error_data.status);
-
dev->offload_enabled = 0;
h->drv_req_rescan = 1; /* schedule controller for a rescan */
cmd->result = DID_SOFT_ERROR << 16;
@@ -1979,20 +1986,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
wait_for_completion(&wait);
}
+static u32 lockup_detected(struct ctlr_info *h)
+{
+ int cpu;
+ u32 rc, *lockup_detected;
+
+ cpu = get_cpu();
+ lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
+ rc = *lockup_detected;
+ put_cpu();
+ return rc;
+}
+
static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
struct CommandList *c)
{
- unsigned long flags;
-
/* If controller lockup detected, fake a hardware error. */
- spin_lock_irqsave(&h->lock, flags);
- if (unlikely(h->lockup_detected)) {
- spin_unlock_irqrestore(&h->lock, flags);
+ if (unlikely(lockup_detected(h)))
c->err_info->CommandStatus = CMD_HARDWARE_ERR;
- } else {
- spin_unlock_irqrestore(&h->lock, flags);
+ else
hpsa_scsi_do_simple_cmd_core(h, c);
- }
}
#define MAX_DRIVER_CMD_RETRIES 25
@@ -2417,7 +2430,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
buflen = 16;
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
- return -1;
+ return -ENOMEM;
rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
if (rc == 0)
memcpy(device_id, &buf[8], buflen);
@@ -2503,27 +2516,21 @@ static int hpsa_get_volume_status(struct ctlr_info *h,
return HPSA_VPD_LV_STATUS_UNSUPPORTED;
/* Does controller have VPD for logical volume status? */
- if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
- dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
goto exit_failed;
- }
/* Get the size of the VPD return buffer */
rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
buf, HPSA_VPD_HEADER_SZ);
- if (rc != 0) {
- dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+ if (rc != 0)
goto exit_failed;
- }
size = buf[3];
/* Now get the whole VPD buffer */
rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
buf, size + HPSA_VPD_HEADER_SZ);
- if (rc != 0) {
- dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+ if (rc != 0)
goto exit_failed;
- }
status = buf[4]; /* status byte */
kfree(buf);
@@ -2536,11 +2543,11 @@ exit_failed:
/* Determine offline status of a volume.
* Return either:
* 0 (not offline)
- * -1 (offline for unknown reasons)
+ * 0xff (offline for unknown reasons)
* # (integer code indicating one of several NOT READY states
* describing why a volume is to be kept offline)
*/
-static unsigned char hpsa_volume_offline(struct ctlr_info *h,
+static int hpsa_volume_offline(struct ctlr_info *h,
unsigned char scsi3addr[])
{
struct CommandList *c;
@@ -2639,11 +2646,15 @@ static int hpsa_update_device_info(struct ctlr_info *h,
if (this_device->devtype == TYPE_DISK &&
is_logical_dev_addr_mode(scsi3addr)) {
+ int volume_offline;
+
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
- this_device->volume_offline =
- hpsa_volume_offline(h, scsi3addr);
+ volume_offline = hpsa_volume_offline(h, scsi3addr);
+ if (volume_offline < 0 || volume_offline > 0xff)
+ volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
+ this_device->volume_offline = volume_offline & 0xff;
} else {
this_device->raid_level = RAID_UNKNOWN;
this_device->offload_config = 0;
@@ -2836,6 +2847,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
/* Get the list of physical devices */
physicals = kzalloc(reportsize, GFP_KERNEL);
+ if (physicals == NULL)
+ return 0;
if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
reportsize, extended)) {
dev_err(&h->pdev->dev,
@@ -2847,26 +2860,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
responsesize;
-
/* find ioaccel2 handle in list of physicals: */
for (i = 0; i < nphysicals; i++) {
+ struct ext_report_lun_entry *entry = &physicals->LUN[i];
+
/* handle is in bytes 28-31 of each lun */
- if (memcmp(&((struct ReportExtendedLUNdata *)
- physicals)->LUN[i][20], &find, 4) != 0) {
+ if (entry->ioaccel_handle != find)
continue; /* didn't match */
- }
found = 1;
- memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
- physicals)->LUN[i][0], 8);
+ memcpy(scsi3addr, entry->lunid, 8);
if (h->raid_offload_debug > 0)
dev_info(&h->pdev->dev,
- "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
__func__, find,
- ((struct ReportExtendedLUNdata *)
- physicals)->LUN[i][20],
- scsi3addr[0], scsi3addr[1], scsi3addr[2],
- scsi3addr[3], scsi3addr[4], scsi3addr[5],
- scsi3addr[6], scsi3addr[7]);
+ entry->ioaccel_handle, scsi3addr);
break; /* found it */
}
@@ -2951,7 +2958,8 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
return RAID_CTLR_LUNID;
if (i < logicals_start)
- return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
+ return &physdev_list->LUN[i -
+ (raid_ctlr_position == 0)].lunid[0];
if (i < last_device)
return &logdev_list->LUN[i - nphysicals -
@@ -2963,19 +2971,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
static int hpsa_hba_mode_enabled(struct ctlr_info *h)
{
int rc;
+ int hba_mode_enabled;
struct bmic_controller_parameters *ctlr_params;
ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
GFP_KERNEL);
if (!ctlr_params)
- return 0;
+ return -ENOMEM;
rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
sizeof(struct bmic_controller_parameters));
- if (rc != 0) {
+ if (rc) {
kfree(ctlr_params);
- return 0;
+ return rc;
}
- return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0;
+
+ hba_mode_enabled =
+ ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
+ kfree(ctlr_params);
+ return hba_mode_enabled;
}
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
@@ -3001,7 +3014,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
int i, n_ext_target_devs, ndevs_to_allocate;
int raid_ctlr_position;
- u8 rescan_hba_mode;
+ int rescan_hba_mode;
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -3016,6 +3029,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
memset(lunzerobits, 0, sizeof(lunzerobits));
rescan_hba_mode = hpsa_hba_mode_enabled(h);
+ if (rescan_hba_mode < 0)
+ goto out;
if (!h->hba_mode_enabled && rescan_hba_mode)
dev_warn(&h->pdev->dev, "HBA mode enabled\n");
@@ -3053,7 +3068,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
ndev_allocated++;
}
- if (unlikely(is_scsi_rev_5(h)))
+ if (is_scsi_rev_5(h))
raid_ctlr_position = 0;
else
raid_ctlr_position = nphysicals + nlogicals;
@@ -3950,7 +3965,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
struct hpsa_scsi_dev_t *dev;
unsigned char scsi3addr[8];
struct CommandList *c;
- unsigned long flags;
int rc = 0;
/* Get the ptr to our adapter structure out of cmd->host. */
@@ -3963,14 +3977,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
}
memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
- spin_lock_irqsave(&h->lock, flags);
- if (unlikely(h->lockup_detected)) {
- spin_unlock_irqrestore(&h->lock, flags);
+ if (unlikely(lockup_detected(h))) {
cmd->result = DID_ERROR << 16;
done(cmd);
return 0;
}
- spin_unlock_irqrestore(&h->lock, flags);
c = cmd_alloc(h);
if (c == NULL) { /* trouble... */
dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
@@ -4082,16 +4093,13 @@ static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
* we can prevent new rescan threads from piling up on a
* locked up controller.
*/
- spin_lock_irqsave(&h->lock, flags);
- if (unlikely(h->lockup_detected)) {
- spin_unlock_irqrestore(&h->lock, flags);
+ if (unlikely(lockup_detected(h))) {
spin_lock_irqsave(&h->scan_lock, flags);
h->scan_finished = 1;
wake_up_all(&h->scan_wait_queue);
spin_unlock_irqrestore(&h->scan_lock, flags);
return 1;
}
- spin_unlock_irqrestore(&h->lock, flags);
return 0;
}
@@ -4942,7 +4950,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (buff == NULL)
return -EFAULT;
- if (iocommand.Request.Type.Direction == XFER_WRITE) {
+ if (iocommand.Request.Type.Direction & XFER_WRITE) {
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf,
iocommand.buf_size)) {
@@ -5005,7 +5013,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
rc = -EFAULT;
goto out;
}
- if (iocommand.Request.Type.Direction == XFER_READ &&
+ if ((iocommand.Request.Type.Direction & XFER_READ) &&
iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
@@ -5082,7 +5090,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = -ENOMEM;
goto cleanup1;
}
- if (ioc->Request.Type.Direction == XFER_WRITE) {
+ if (ioc->Request.Type.Direction & XFER_WRITE) {
if (copy_from_user(buff[sg_used], data_ptr, sz)) {
status = -ENOMEM;
goto cleanup1;
@@ -5134,7 +5142,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = -EFAULT;
goto cleanup0;
}
- if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
+ if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
/* Copy the data out of the buffer we created */
BYTE __user *ptr = ioc->buf;
for (i = 0; i < sg_used; i++) {
@@ -5438,13 +5446,12 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
/* Takes cmds off the submission queue and sends them to the hardware,
* then puts them on the queue of cmds waiting for completion.
+ * Assumes h->lock is held
*/
-static void start_io(struct ctlr_info *h)
+static void start_io(struct ctlr_info *h, unsigned long *flags)
{
struct CommandList *c;
- unsigned long flags;
- spin_lock_irqsave(&h->lock, flags);
while (!list_empty(&h->reqQ)) {
c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
@@ -5467,14 +5474,20 @@ static void start_io(struct ctlr_info *h)
* condition.
*/
h->commands_outstanding++;
- if (h->commands_outstanding > h->max_outstanding)
- h->max_outstanding = h->commands_outstanding;
/* Tell the controller execute command */
- spin_unlock_irqrestore(&h->lock, flags);
+ spin_unlock_irqrestore(&h->lock, *flags);
h->access.submit_command(h, c);
- spin_lock_irqsave(&h->lock, flags);
+ spin_lock_irqsave(&h->lock, *flags);
}
+}
+
+static void lock_and_start_io(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+ start_io(h, &flags);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -5542,7 +5555,7 @@ static inline void finish_cmd(struct CommandList *c)
else if (c->cmd_type == CMD_IOCTL_PEND)
complete(c->waiting);
if (unlikely(io_may_be_stalled))
- start_io(h);
+ lock_and_start_io(h);
}
static inline u32 hpsa_tag_contains_index(u32 tag)
@@ -5819,12 +5832,12 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
dev_info(&pdev->dev, "using doorbell to reset controller\n");
writel(use_doorbell, vaddr + SA5_DOORBELL);
- /* PMC hardware guys tell us we need a 5 second delay after
+ /* PMC hardware guys tell us we need a 10 second delay after
* doorbell reset and before any attempt to talk to the board
* at all to ensure that this actually works and doesn't fall
* over in some weird corner cases.
*/
- msleep(5000);
+ msleep(10000);
} else { /* Try to do it the PCI power state way */
/* Quoting from the Open CISS Specification: "The Power
@@ -6145,6 +6158,8 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
dev_info(&h->pdev->dev, "MSIX\n");
h->msix_vector = MAX_REPLY_QUEUES;
+ if (h->msix_vector > num_online_cpus())
+ h->msix_vector = num_online_cpus();
err = pci_enable_msix(h->pdev, hpsa_msix_entries,
h->msix_vector);
if (err > 0) {
@@ -6594,6 +6609,17 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
}
+static void hpsa_irq_affinity_hints(struct ctlr_info *h)
+{
+ int i, cpu, rc;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < h->msix_vector; i++) {
+ rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+}
+
static int hpsa_request_irq(struct ctlr_info *h,
irqreturn_t (*msixhandler)(int, void *),
irqreturn_t (*intxhandler)(int, void *))
@@ -6613,6 +6639,7 @@ static int hpsa_request_irq(struct ctlr_info *h,
rc = request_irq(h->intr[i], msixhandler,
0, h->devname,
&h->q[i]);
+ hpsa_irq_affinity_hints(h);
} else {
/* Use single reply pool */
if (h->msix_vector > 0 || h->msi_vector) {
@@ -6664,12 +6691,15 @@ static void free_irqs(struct ctlr_info *h)
if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
/* Single reply queue, only one irq to free */
i = h->intr_mode;
+ irq_set_affinity_hint(h->intr[i], NULL);
free_irq(h->intr[i], &h->q[i]);
return;
}
- for (i = 0; i < h->msix_vector; i++)
+ for (i = 0; i < h->msix_vector; i++) {
+ irq_set_affinity_hint(h->intr[i], NULL);
free_irq(h->intr[i], &h->q[i]);
+ }
}
static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
@@ -6686,6 +6716,20 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
#endif /* CONFIG_PCI_MSI */
}
+static void hpsa_free_reply_queues(struct ctlr_info *h)
+{
+ int i;
+
+ for (i = 0; i < h->nreply_queues; i++) {
+ if (!h->reply_queue[i].head)
+ continue;
+ pci_free_consistent(h->pdev, h->reply_queue_size,
+ h->reply_queue[i].head, h->reply_queue[i].busaddr);
+ h->reply_queue[i].head = NULL;
+ h->reply_queue[i].busaddr = 0;
+ }
+}
+
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
{
hpsa_free_irqs_and_disable_msix(h);
@@ -6693,8 +6737,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
hpsa_free_cmd_pool(h);
kfree(h->ioaccel1_blockFetchTable);
kfree(h->blockFetchTable);
- pci_free_consistent(h->pdev, h->reply_pool_size,
- h->reply_pool, h->reply_pool_dhandle);
+ hpsa_free_reply_queues(h);
if (h->vaddr)
iounmap(h->vaddr);
if (h->transtable)
@@ -6719,16 +6762,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
}
}
+static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
+{
+ int i, cpu;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < num_online_cpus(); i++) {
+ u32 *lockup_detected;
+ lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
+ *lockup_detected = value;
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ wmb(); /* be sure the per-cpu variables are out to memory */
+}
+
static void controller_lockup_detected(struct ctlr_info *h)
{
unsigned long flags;
+ u32 lockup_detected;
h->access.set_intr_mask(h, HPSA_INTR_OFF);
spin_lock_irqsave(&h->lock, flags);
- h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (!lockup_detected) {
+ /* no heartbeat, but controller gave us a zero. */
+ dev_warn(&h->pdev->dev,
+ "lockup detected but scratchpad register is zero\n");
+ lockup_detected = 0xffffffff;
+ }
+ set_lockup_detected_for_all_cpus(h, lockup_detected);
spin_unlock_irqrestore(&h->lock, flags);
dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
- h->lockup_detected);
+ lockup_detected);
pci_disable_device(h->pdev);
spin_lock_irqsave(&h->lock, flags);
fail_all_cmds_on_list(h, &h->cmpQ);
@@ -6863,7 +6928,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
struct ctlr_info *h = container_of(to_delayed_work(work),
struct ctlr_info, monitor_ctlr_work);
detect_controller_lockup(h);
- if (h->lockup_detected)
+ if (lockup_detected(h))
return;
if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
@@ -6913,7 +6978,6 @@ reinit_after_soft_reset:
* the 5 lower bits of the address are used by the hardware. and by
* the driver. See comments in hpsa.h for more info.
*/
-#define COMMANDLIST_ALIGNMENT 128
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
@@ -6928,6 +6992,13 @@ reinit_after_soft_reset:
spin_lock_init(&h->offline_device_lock);
spin_lock_init(&h->scan_lock);
spin_lock_init(&h->passthru_count_lock);
+
+ /* Allocate and clear per-cpu variable lockup_detected */
+ h->lockup_detected = alloc_percpu(u32);
+ if (!h->lockup_detected)
+ goto clean1;
+ set_lockup_detected_for_all_cpus(h, 0);
+
rc = hpsa_pci_init(h);
if (rc != 0)
goto clean1;
@@ -7051,6 +7122,8 @@ clean4:
free_irqs(h);
clean2:
clean1:
+ if (h->lockup_detected)
+ free_percpu(h->lockup_detected);
kfree(h);
return rc;
}
@@ -7059,16 +7132,10 @@ static void hpsa_flush_cache(struct ctlr_info *h)
{
char *flush_buf;
struct CommandList *c;
- unsigned long flags;
/* Don't bother trying to flush the cache if locked up */
- spin_lock_irqsave(&h->lock, flags);
- if (unlikely(h->lockup_detected)) {
- spin_unlock_irqrestore(&h->lock, flags);
+ if (unlikely(lockup_detected(h)))
return;
- }
- spin_unlock_irqrestore(&h->lock, flags);
-
flush_buf = kzalloc(4, GFP_KERNEL);
if (!flush_buf)
return;
@@ -7144,8 +7211,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool, h->errinfo_pool_dhandle);
- pci_free_consistent(h->pdev, h->reply_pool_size,
- h->reply_pool, h->reply_pool_dhandle);
+ hpsa_free_reply_queues(h);
kfree(h->cmd_pool_bits);
kfree(h->blockFetchTable);
kfree(h->ioaccel1_blockFetchTable);
@@ -7153,6 +7219,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
kfree(h->hba_inquiry_data);
pci_disable_device(pdev);
pci_release_regions(pdev);
+ free_percpu(h->lockup_detected);
kfree(h);
}
@@ -7257,8 +7324,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
* 10 = 6 s/g entry or 24k
*/
+ /* If the controller supports either ioaccel method then
+ * we can also use the RAID stack submit path that does not
+ * perform the superfluous readl() after each command submission.
+ */
+ if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
+ access = SA5_performant_access_no_read;
+
/* Controller spec: zero out this buffer. */
- memset(h->reply_pool, 0, h->reply_pool_size);
+ for (i = 0; i < h->nreply_queues; i++)
+ memset(h->reply_queue[i].head, 0, h->reply_queue_size);
bft[7] = SG_ENTRIES_IN_CMD + 4;
calc_bucket_map(bft, ARRAY_SIZE(bft),
@@ -7274,8 +7349,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
for (i = 0; i < h->nreply_queues; i++) {
writel(0, &h->transtable->RepQAddr[i].upper);
- writel(h->reply_pool_dhandle +
- (h->max_commands * sizeof(u64) * i),
+ writel(h->reply_queue[i].busaddr,
&h->transtable->RepQAddr[i].lower);
}
@@ -7323,8 +7397,10 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
h->ioaccel1_blockFetchTable);
/* initialize all reply queue entries to unused */
- memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
- h->reply_pool_size);
+ for (i = 0; i < h->nreply_queues; i++)
+ memset(h->reply_queue[i].head,
+ (u8) IOACCEL_MODE1_REPLY_UNUSED,
+ h->reply_queue_size);
/* set all the constant fields in the accelerator command
* frames once at init time to save CPU cycles later.
@@ -7386,7 +7462,6 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
* because the 7 lower bits of the address are used by the
* hardware.
*/
-#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
IOACCEL1_COMMANDLIST_ALIGNMENT);
h->ioaccel_cmd_pool =
@@ -7424,7 +7499,6 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
-#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
IOACCEL2_COMMANDLIST_ALIGNMENT);
h->ioaccel2_cmd_pool =
@@ -7482,16 +7556,17 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
}
}
- /* TODO, check that this next line h->nreply_queues is correct */
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
- h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
- h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
- &(h->reply_pool_dhandle));
+ h->reply_queue_size = h->max_commands * sizeof(u64);
for (i = 0; i < h->nreply_queues; i++) {
- h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
+ h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
+ h->reply_queue_size,
+ &(h->reply_queue[i].busaddr));
+ if (!h->reply_queue[i].head)
+ goto clean_up;
h->reply_queue[i].size = h->max_commands;
h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
h->reply_queue[i].current_entry = 0;
@@ -7500,18 +7575,14 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
/* Need a block fetch table for performant mode */
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
sizeof(u32)), GFP_KERNEL);
-
- if ((h->reply_pool == NULL)
- || (h->blockFetchTable == NULL))
+ if (!h->blockFetchTable)
goto clean_up;
hpsa_enter_performant_mode(h, trans_support);
return;
clean_up:
- if (h->reply_pool)
- pci_free_consistent(h->pdev, h->reply_pool_size,
- h->reply_pool, h->reply_pool_dhandle);
+ hpsa_free_reply_queues(h);
kfree(h->blockFetchTable);
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 44235a27e1b6..24472cec7de3 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -57,11 +57,12 @@ struct hpsa_scsi_dev_t {
};
-struct reply_pool {
+struct reply_queue_buffer {
u64 *head;
size_t size;
u8 wraparound;
u32 current_entry;
+ dma_addr_t busaddr;
};
#pragma pack(1)
@@ -90,6 +91,7 @@ struct bmic_controller_parameters {
u8 automatic_drive_slamming;
u8 reserved1;
u8 nvram_flags;
+#define HBA_MODE_ENABLED_FLAG (1 << 3)
u8 cache_nvram_flags;
u8 drive_config_flags;
u16 reserved2;
@@ -115,11 +117,8 @@ struct ctlr_info {
int nr_cmds; /* Number of commands allowed on this controller */
struct CfgTable __iomem *cfgtable;
int interrupts_enabled;
- int major;
int max_commands;
int commands_outstanding;
- int max_outstanding; /* Debug */
- int usage_count; /* number of opens all all minor devices */
# define PERF_MODE_INT 0
# define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2
@@ -176,11 +175,9 @@ struct ctlr_info {
/*
* Performant mode completion buffers
*/
- u64 *reply_pool;
- size_t reply_pool_size;
- struct reply_pool reply_queue[MAX_REPLY_QUEUES];
+ size_t reply_queue_size;
+ struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
u8 nreply_queues;
- dma_addr_t reply_pool_dhandle;
u32 *blockFetchTable;
u32 *ioaccel1_blockFetchTable;
u32 *ioaccel2_blockFetchTable;
@@ -195,7 +192,7 @@ struct ctlr_info {
u64 last_heartbeat_timestamp;
u32 heartbeat_sample_interval;
atomic_t firmware_flash_in_progress;
- u32 lockup_detected;
+ u32 *lockup_detected;
struct delayed_work monitor_ctlr_work;
int remove_in_progress;
u32 fifo_recently_full;
@@ -232,11 +229,9 @@ struct ctlr_info {
#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
#define RESCAN_REQUIRED_EVENT_BITS \
- (CTLR_STATE_CHANGE_EVENT | \
- CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
+ (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
- CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \
CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
spinlock_t offline_device_lock;
@@ -345,22 +340,23 @@ struct offline_device_entry {
static void SA5_submit_command(struct ctlr_info *h,
struct CommandList *c)
{
- dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
- c->Header.Tag.lower);
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
}
+static void SA5_submit_command_no_read(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+}
+
static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
struct CommandList *c)
{
- dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
- c->Header.Tag.lower);
if (c->cmd_type == CMD_IOACCEL2)
writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
else
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
- (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
}
/*
@@ -398,7 +394,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
{
- struct reply_pool *rq = &h->reply_queue[q];
+ struct reply_queue_buffer *rq = &h->reply_queue[q];
unsigned long flags, register_value = FIFO_EMPTY;
/* msi auto clears the interrupt pending bit. */
@@ -477,7 +473,6 @@ static bool SA5_intr_pending(struct ctlr_info *h)
{
unsigned long register_value =
readl(h->vaddr + SA5_INTR_STATUS);
- dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
return register_value & SA5_INTR_PENDING;
}
@@ -514,7 +509,7 @@ static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
{
u64 register_value;
- struct reply_pool *rq = &h->reply_queue[q];
+ struct reply_queue_buffer *rq = &h->reply_queue[q];
unsigned long flags;
BUG_ON(q >= h->nreply_queues);
@@ -572,6 +567,14 @@ static struct access_method SA5_performant_access = {
SA5_performant_completed,
};
+static struct access_method SA5_performant_access_no_read = {
+ SA5_submit_command_no_read,
+ SA5_performant_intr_mask,
+ SA5_fifo_full,
+ SA5_performant_intr_pending,
+ SA5_performant_completed,
+};
+
struct board_type {
u32 board_id;
char *product_name;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index b5cc7052339f..b5125dc31439 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -151,7 +151,7 @@
#define HPSA_VPD_HEADER_SZ 4
/* Logical volume states */
-#define HPSA_VPD_LV_STATUS_UNSUPPORTED -1
+#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
#define HPSA_LV_OK 0x0
#define HPSA_LV_UNDERGOING_ERASE 0x0F
#define HPSA_LV_UNDERGOING_RPI 0x12
@@ -238,11 +238,21 @@ struct ReportLUNdata {
u8 LUN[HPSA_MAX_LUN][8];
};
+struct ext_report_lun_entry {
+ u8 lunid[8];
+ u8 wwid[8];
+ u8 device_type;
+ u8 device_flags;
+ u8 lun_count; /* multi-lun device, how many luns */
+ u8 redundant_paths;
+ u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
+};
+
struct ReportExtendedLUNdata {
u8 LUNListLength[4];
u8 extended_response_flag;
u8 reserved[3];
- u8 LUN[HPSA_MAX_LUN][24];
+ struct ext_report_lun_entry LUN[HPSA_MAX_LUN];
};
struct SenseSubsystem_info {
@@ -375,6 +385,7 @@ struct ctlr_info; /* defined in hpsa.h */
* or a bus address.
*/
+#define COMMANDLIST_ALIGNMENT 128
struct CommandList {
struct CommandListHeader Header;
struct RequestBlock Request;
@@ -389,21 +400,7 @@ struct CommandList {
struct list_head list;
struct completion *waiting;
void *scsi_cmd;
-
-/* on 64 bit architectures, to get this to be 32-byte-aligned
- * it so happens we need PAD_64 bytes of padding, on 32 bit systems,
- * we need PAD_32 bytes of padding (see below). This does that.
- * If it happens that 64 bit and 32 bit systems need different
- * padding, PAD_32 and PAD_64 can be set independently, and.
- * the code below will do the right thing.
- */
-#define IS_32_BIT ((8 - sizeof(long))/4)
-#define IS_64_BIT (!IS_32_BIT)
-#define PAD_32 (40)
-#define PAD_64 (12)
-#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
- u8 pad[COMMANDLIST_PAD];
-};
+} __aligned(COMMANDLIST_ALIGNMENT);
/* Max S/G elements in I/O accelerator command */
#define IOACCEL1_MAXSGENTRIES 24
@@ -413,6 +410,7 @@ struct CommandList {
* Structure for I/O accelerator (mode 1) commands.
* Note that this structure must be 128-byte aligned in size.
*/
+#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
struct io_accel1_cmd {
u16 dev_handle; /* 0x00 - 0x01 */
u8 reserved1; /* 0x02 */
@@ -440,12 +438,7 @@ struct io_accel1_cmd {
struct vals32 host_addr; /* 0x70 - 0x77 */
u8 CISS_LUN[8]; /* 0x78 - 0x7F */
struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
-#define IOACCEL1_PAD_64 0
-#define IOACCEL1_PAD_32 0
-#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \
- IS_64_BIT * IOACCEL1_PAD_64)
- u8 pad[IOACCEL1_PAD];
-};
+} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
#define IOACCEL1_FUNCTION_SCSIIO 0x00
#define IOACCEL1_SGLOFFSET 32
@@ -510,14 +503,11 @@ struct io_accel2_scsi_response {
u8 sense_data_buff[32]; /* sense/response data buffer */
};
-#define IOACCEL2_64_PAD 76
-#define IOACCEL2_32_PAD 76
-#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \
- IS_64_BIT * IOACCEL2_64_PAD)
/*
* Structure for I/O accelerator (mode 2 or m2) commands.
* Note that this structure must be 128-byte aligned in size.
*/
+#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
struct io_accel2_cmd {
u8 IU_type; /* IU Type */
u8 direction; /* direction, memtype, and encryption */
@@ -544,8 +534,7 @@ struct io_accel2_cmd {
u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
struct io_accel2_scsi_response error_data;
- u8 pad[IOACCEL2_PAD];
-};
+} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
/*
* defines for Mode 2 command struct
@@ -636,7 +625,7 @@ struct TransTable_struct {
u32 RepQCount;
u32 RepQCtrAddrLow32;
u32 RepQCtrAddrHigh32;
-#define MAX_REPLY_QUEUES 8
+#define MAX_REPLY_QUEUES 64
struct vals32 RepQAddr[MAX_REPLY_QUEUES];
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 2ebfb2bb0f42..7b23f21f22f1 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
+
+ /* Ensure the read of the valid bit occurs before reading any
+ * other bits of the CRQ entry
+ */
+ rmb();
} else
crq = NULL;
spin_unlock_irqrestore(&queue->lock, flags);
@@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
{
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ /*
+ * Ensure the command buffer is flushed to memory before handing it
+ * over to the VIOS to prevent it from fetching any stale data.
+ */
+ mb();
return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
@@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
evt->hostdata->dev);
if (evt->cmnd_done)
evt->cmnd_done(evt->cmnd);
- } else if (evt->done)
+ } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
+ evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
evt->done(evt);
free_event_struct(&evt->hostdata->pool, evt);
spin_lock_irqsave(hostdata->host->host_lock, flags);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 11854845393b..a669f2d11c31 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -244,7 +244,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
sk->sk_data_ready = tcp_sw_conn->old_data_ready;
sk->sk_state_change = tcp_sw_conn->old_state_change;
sk->sk_write_space = tcp_sw_conn->old_write_space;
- sk->sk_no_check = 0;
+ sk->sk_no_check_tx = 0;
write_unlock_bh(&sk->sk_callback_lock);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 26dc005bb0f0..3d1bc67bac9d 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
struct iscsi_scsi_req *hdr;
- unsigned hdrlength, cmd_len;
+ unsigned hdrlength, cmd_len, transfer_length;
itt_t itt;
int rc;
@@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
task->protected = true;
+ transfer_length = scsi_transfer_length(sc);
+ hdr->data_length = cpu_to_be32(transfer_length);
if (sc->sc_data_direction == DMA_TO_DEVICE) {
- unsigned out_len = scsi_out(sc)->length;
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
- hdr->data_length = cpu_to_be32(out_len);
hdr->flags |= ISCSI_FLAG_CMD_WRITE;
/*
* Write counters:
@@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
memset(r2t, 0, sizeof(*r2t));
if (session->imm_data_en) {
- if (out_len >= session->first_burst)
+ if (transfer_length >= session->first_burst)
task->imm_count = min(session->first_burst,
conn->max_xmit_dlength);
else
- task->imm_count = min(out_len,
- conn->max_xmit_dlength);
+ task->imm_count = min(transfer_length,
+ conn->max_xmit_dlength);
hton24(hdr->dlength, task->imm_count);
} else
zero_data(hdr->dlength);
if (!session->initial_r2t_en) {
- r2t->data_length = min(session->first_burst, out_len) -
+ r2t->data_length = min(session->first_burst,
+ transfer_length) -
task->imm_count;
r2t->data_offset = task->imm_count;
r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
@@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
} else {
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
zero_data(hdr->dlength);
- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
if (sc->sc_data_direction == DMA_FROM_DEVICE)
hdr->flags |= ISCSI_FLAG_CMD_READ;
@@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
scsi_bidi_cmnd(sc) ? "bidirectional" :
sc->sc_data_direction == DMA_TO_DEVICE ?
"write" : "read", conn->id, sc, sc->cmnd[0],
- task->itt, scsi_bufflen(sc),
+ task->itt, transfer_length,
scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
session->cmdsn,
session->max_cmdsn - session->exp_cmdsn + 1);
@@ -1442,9 +1442,9 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
conn->task = NULL;
}
/* regular RX path uses back_lock */
- spin_lock_bh(&conn->session->back_lock);
+ spin_lock(&conn->session->back_lock);
__iscsi_put_task(task);
- spin_unlock_bh(&conn->session->back_lock);
+ spin_unlock(&conn->session->back_lock);
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 94a3cafe7197..434e9037908e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -640,6 +640,7 @@ struct lpfc_hba {
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
+#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8d5b6ceec9c9..1d7a5c34ee8c 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -919,10 +919,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
phba->cfg_sriov_nr_virtfn = 0;
}
+ if (opcode == LPFC_FW_DUMP)
+ phba->hba_flag |= HBA_FW_DUMP_OP;
+
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- if (status != 0)
+ if (status != 0) {
+ phba->hba_flag &= ~HBA_FW_DUMP_OP;
return status;
+ }
/* wait for the device to be quiesced before firmware reset */
msleep(100);
@@ -2364,7 +2369,7 @@ lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
uint8_t wwpn[WWN_SZ];
int rc;
- if (!phba->cfg_EnableXLane)
+ if (!phba->cfg_fof)
return -EPERM;
/* count may include a LF at end of string */
@@ -2432,7 +2437,7 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
uint8_t wwpn[WWN_SZ];
int rc;
- if (!phba->cfg_EnableXLane)
+ if (!phba->cfg_fof)
return -EPERM;
/* count may include a LF at end of string */
@@ -2499,7 +2504,7 @@ lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
int val = 0;
- if (!phba->cfg_EnableXLane)
+ if (!phba->cfg_fof)
return -EPERM;
if (!isdigit(buf[0]))
@@ -2565,7 +2570,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
int rc = 0;
- if (!phba->cfg_EnableXLane)
+ if (!phba->cfg_fof)
return -EPERM;
if (oas_state) {
@@ -2670,7 +2675,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
uint64_t oas_lun;
int len = 0;
- if (!phba->cfg_EnableXLane)
+ if (!phba->cfg_fof)
return -EPERM;
if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
@@ -2716,7 +2721,7 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
uint64_t scsi_lun;
ssize_t rc;
- if (!phba->cfg_EnableXLane)
+ if (!phba->cfg_fof)
return -EPERM;
if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
@@ -4655,7 +4660,7 @@ LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
# Value range is [0x0,0x7f]. Default value is 0
*/
-LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
+LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
/*
# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index ca2f4ea7cdef..5b5c825d9576 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index a94d4c9dfaa5..928ef609f363 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2010-2012 Emulex. All rights reserved. *
+ * Copyright (C) 2010-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index adda0bf7a244..db5604f01a1a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -289,6 +289,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -310,6 +311,9 @@ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
uint64_t, lpfc_ctx_cmd);
+int
+lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
+ uint16_t, uint64_t, lpfc_ctx_cmd);
void lpfc_mbox_timeout(unsigned long);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 828c08e9389e..b0aedce3f54b 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2012 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -2314,7 +2314,7 @@ proc_cq:
goto too_big;
}
- if (phba->cfg_EnableXLane) {
+ if (phba->cfg_fof) {
/* OAS CQ */
qp = phba->sli4_hba.oas_cq;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 624fe0b3cc0b..7a5d81a65be8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 294c072e9083..2a17e31265b8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -5634,6 +5634,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
GFP_KERNEL);
+ if (ndlp->active_rrqs_xri_bitmap)
+ memset(ndlp->active_rrqs_xri_bitmap, 0,
+ ndlp->phba->cfg_rrq_xri_bitmap_sz);
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 3d9438ce59ab..236259252379 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index fd79f7de7666..f432ec180cf8 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 635eeb3d6987..06f9a5b79e66 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -820,57 +820,153 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
}
/**
- * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
+ * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
+ * rspiocb which got deferred
+ *
* @phba: pointer to lpfc HBA data structure.
*
- * This routine will do uninitialization after the HBA is reset when bring
- * down the SLI Layer.
+ * This routine will cleanup completed slow path events after HBA is reset
+ * when bringing down the SLI Layer.
+ *
*
* Return codes
- * 0 - success.
- * Any other value - error.
+ * void.
**/
-static int
-lpfc_hba_down_post_s3(struct lpfc_hba *phba)
+static void
+lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq *rspiocbq;
+ struct hbq_dmabuf *dmabuf;
+ struct lpfc_cq_event *cq_event;
+
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
+ /* Get the response iocb from the head of work queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_queue_event,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+
+ switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
+ case CQE_CODE_COMPL_WQE:
+ rspiocbq = container_of(cq_event, struct lpfc_iocbq,
+ cq_event);
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+ break;
+ case CQE_CODE_RECEIVE:
+ case CQE_CODE_RECEIVE_V1:
+ dmabuf = container_of(cq_event, struct hbq_dmabuf,
+ cq_event);
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ }
+ }
+}
+
+/**
+ * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will cleanup posted ELS buffers after the HBA is reset
+ * when bringing down the SLI Layer.
+ *
+ *
+ * Return codes
+ * void.
+ **/
+static void
+lpfc_hba_free_post_buf(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_dmabuf *mp, *next_mp;
- LIST_HEAD(completions);
- int i;
+ LIST_HEAD(buflist);
+ int count;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
lpfc_sli_hbqbuf_free_all(phba);
else {
/* Cleanup preposted buffers on the ELS ring */
pring = &psli->ring[LPFC_ELS_RING];
- list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&pring->postbufq, &buflist);
+ spin_unlock_irq(&phba->hbalock);
+
+ count = 0;
+ list_for_each_entry_safe(mp, next_mp, &buflist, list) {
list_del(&mp->list);
- pring->postbufq_cnt--;
+ count++;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
+
+ spin_lock_irq(&phba->hbalock);
+ pring->postbufq_cnt -= count;
+ spin_unlock_irq(&phba->hbalock);
}
+}
+
+/**
+ * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will cleanup the txcmplq after the HBA is reset when bringing
+ * down the SLI Layer.
+ *
+ * Return codes
+ * void
+ **/
+static void
+lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ LIST_HEAD(completions);
+ int i;
- spin_lock_irq(&phba->hbalock);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
-
+ if (phba->sli_rev >= LPFC_SLI_REV4)
+ spin_lock_irq(&pring->ring_lock);
+ else
+ spin_lock_irq(&phba->hbalock);
/* At this point in time the HBA is either reset or DOA. Either
* way, nothing should be on txcmplq as it will NEVER complete.
*/
list_splice_init(&pring->txcmplq, &completions);
- spin_unlock_irq(&phba->hbalock);
+ pring->txcmplq_cnt = 0;
+
+ if (phba->sli_rev >= LPFC_SLI_REV4)
+ spin_unlock_irq(&pring->ring_lock);
+ else
+ spin_unlock_irq(&phba->hbalock);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
-
lpfc_sli_abort_iocb_ring(phba, pring);
- spin_lock_irq(&phba->hbalock);
}
- spin_unlock_irq(&phba->hbalock);
+}
+/**
+ * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
+ int i;
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s3(struct lpfc_hba *phba)
+{
+ lpfc_hba_free_post_buf(phba);
+ lpfc_hba_clean_txcmplq(phba);
return 0;
}
@@ -890,13 +986,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
{
struct lpfc_scsi_buf *psb, *psb_next;
LIST_HEAD(aborts);
- int ret;
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
- ret = lpfc_hba_down_post_s3(phba);
- if (ret)
- return ret;
+ lpfc_hba_free_post_buf(phba);
+ lpfc_hba_clean_txcmplq(phba);
+
/* At this point in time the HBA is either reset or DOA. Either
* way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
* on the lpfc_sgl_list so that it can either be freed if the
@@ -932,6 +1027,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+
+ lpfc_sli4_free_sp_events(phba);
return 0;
}
@@ -1250,7 +1347,6 @@ static void
lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
{
uint32_t old_host_status = phba->work_hs;
- struct lpfc_sli_ring *pring;
struct lpfc_sli *psli = &phba->sli;
/* If the pci channel is offline, ignore possible errors,
@@ -1279,8 +1375,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
* dropped by the firmware. Error iocb (I/O) on txcmplq and let the
* SCSI layer retry it after re-establishing link.
*/
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ lpfc_sli_abort_fcp_rings(phba);
/*
* There was a firmware error. Take the hba offline and then
@@ -1348,7 +1443,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
uint32_t event_data;
unsigned long temperature;
struct temp_event temp_event_data;
@@ -1400,8 +1494,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
* Error iocb (I/O) on txcmplq and let the SCSI layer
* retry it after re-establishing link.
*/
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ lpfc_sli_abort_fcp_rings(phba);
/*
* There was a firmware error. Take the hba offline and then
@@ -1940,78 +2033,81 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
switch (dev_id) {
case PCI_DEVICE_ID_FIREFLY:
- m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
+ m = (typeof(m)){"LP6000", "PCI",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_SUPERFLY:
if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
- m = (typeof(m)){"LP7000", "PCI",
- "Fibre Channel Adapter"};
+ m = (typeof(m)){"LP7000", "PCI", ""};
else
- m = (typeof(m)){"LP7000E", "PCI",
- "Fibre Channel Adapter"};
+ m = (typeof(m)){"LP7000E", "PCI", ""};
+ m.function = "Obsolete, Unsupported Fibre Channel Adapter";
break;
case PCI_DEVICE_ID_DRAGONFLY:
m = (typeof(m)){"LP8000", "PCI",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_CENTAUR:
if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
- m = (typeof(m)){"LP9002", "PCI",
- "Fibre Channel Adapter"};
+ m = (typeof(m)){"LP9002", "PCI", ""};
else
- m = (typeof(m)){"LP9000", "PCI",
- "Fibre Channel Adapter"};
+ m = (typeof(m)){"LP9000", "PCI", ""};
+ m.function = "Obsolete, Unsupported Fibre Channel Adapter";
break;
case PCI_DEVICE_ID_RFLY:
m = (typeof(m)){"LP952", "PCI",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_PEGASUS:
m = (typeof(m)){"LP9802", "PCI-X",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_THOR:
m = (typeof(m)){"LP10000", "PCI-X",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_VIPER:
m = (typeof(m)){"LPX1000", "PCI-X",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_PFLY:
m = (typeof(m)){"LP982", "PCI-X",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_TFLY:
m = (typeof(m)){"LP1050", "PCI-X",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_HELIOS:
m = (typeof(m)){"LP11000", "PCI-X2",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_HELIOS_SCSP:
m = (typeof(m)){"LP11000-SP", "PCI-X2",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_HELIOS_DCSP:
m = (typeof(m)){"LP11002-SP", "PCI-X2",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_NEPTUNE:
- m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
+ m = (typeof(m)){"LPe1000", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_NEPTUNE_SCSP:
- m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
+ m = (typeof(m)){"LPe1000-SP", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_NEPTUNE_DCSP:
- m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
+ m = (typeof(m)){"LPe1002-SP", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_BMID:
m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_BSMB:
- m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
+ m = (typeof(m)){"LP111", "PCI-X2",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_ZEPHYR:
m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
@@ -2030,16 +2126,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LP101:
- m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
+ m = (typeof(m)){"LP101", "PCI-X",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LP10000S:
- m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
+ m = (typeof(m)){"LP10000-S", "PCI",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LP11000S:
- m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
+ m = (typeof(m)){"LP11000-S", "PCI-X2",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LPE11000S:
- m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
+ m = (typeof(m)){"LPe11000-S", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_SAT:
m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
@@ -2060,20 +2160,21 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_HORNET:
- m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
+ m = (typeof(m)){"LP21000", "PCIe",
+ "Obsolete, Unsupported FCoE Adapter"};
GE = 1;
break;
case PCI_DEVICE_ID_PROTEUS_VF:
m = (typeof(m)){"LPev12000", "PCIe IOV",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_PROTEUS_PF:
m = (typeof(m)){"LPev12000", "PCIe IOV",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_PROTEUS_S:
m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_TIGERSHARK:
oneConnect = 1;
@@ -2089,17 +2190,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
break;
case PCI_DEVICE_ID_BALIUS:
m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
- "Fibre Channel Adapter"};
+ "Obsolete, Unsupported Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FC:
- case PCI_DEVICE_ID_LANCER_FC_VF:
m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
break;
+ case PCI_DEVICE_ID_LANCER_FC_VF:
+ m = (typeof(m)){"LPe16000", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
case PCI_DEVICE_ID_LANCER_FCOE:
- case PCI_DEVICE_ID_LANCER_FCOE_VF:
oneConnect = 1;
m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
break;
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
+ oneConnect = 1;
+ m = (typeof(m)){"OCe15100", "PCIe",
+ "Obsolete, Unsupported FCoE"};
+ break;
case PCI_DEVICE_ID_SKYHAWK:
case PCI_DEVICE_ID_SKYHAWK_VF:
oneConnect = 1;
@@ -4614,7 +4722,10 @@ lpfc_reset_hba(struct lpfc_hba *phba)
phba->link_state = LPFC_HBA_ERROR;
return;
}
- lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ else
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
lpfc_online(phba);
@@ -9663,9 +9774,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
static void
lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
-
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2723 PCI channel I/O abort preparing for recovery\n");
@@ -9673,8 +9781,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
* There may be errored I/Os through HBA, abort all I/Os on txcmplq
* and let the SCSI mid-layer to retry them to recover.
*/
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ lpfc_sli_abort_fcp_rings(phba);
}
/**
@@ -10417,17 +10524,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
static void
lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
-
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2828 PCI channel I/O abort preparing for recovery\n");
/*
* There may be errored I/Os through HBA, abort all I/Os on txcmplq
* and let the SCSI mid-layer to retry them to recover.
*/
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ lpfc_sli_abort_fcp_rings(phba);
}
/**
@@ -10898,7 +11001,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
if (phba->sli4_hba.pc_sli4_params.oas_supported) {
phba->cfg_fof = 1;
} else {
- phba->cfg_EnableXLane = 0;
+ phba->cfg_fof = 0;
if (phba->device_data_mem_pool)
mempool_destroy(phba->device_data_mem_pool);
phba->device_data_mem_pool = NULL;
@@ -10928,7 +11031,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
if (rc)
return -ENOMEM;
- if (phba->cfg_EnableXLane) {
+ if (phba->cfg_fof) {
rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
@@ -10947,8 +11050,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
return 0;
out_oas_wq:
- if (phba->cfg_EnableXLane)
- lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
+ lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
out_oas_cq:
lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
return rc;
@@ -10982,7 +11084,7 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fof_eq = qdesc;
- if (phba->cfg_EnableXLane) {
+ if (phba->cfg_fof) {
/* Create OAS CQ */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ed419aad2b1f..3fa65338d3f5 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2012 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 462453ee0bda..2df11daad85b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -73,7 +73,7 @@ lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
- if (vport->phba->cfg_EnableXLane)
+ if (vport->phba->cfg_fof)
return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
else
return (struct lpfc_rport_data *)sdev->hostdata;
@@ -3462,7 +3462,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* If the OAS driver feature is enabled and the lun is enabled for
* OAS, set the oas iocb related flags.
*/
- if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
+ if ((phba->cfg_fof) && ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->oas_enabled)
lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
return 0;
@@ -4314,6 +4314,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcp_cmnd->fcpCntl1 = SIMPLE_Q;
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
+ piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -4782,7 +4783,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct lpfc_scsi_buf *lpfc_cmd;
IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
- unsigned long flags;
+ struct lpfc_sli_ring *pring_s4;
+ int ring_number, ret_val;
+ unsigned long flags, iflags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
status = fc_block_scsi_eh(cmnd);
@@ -4833,6 +4836,14 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
BUG_ON(iocb->context1 != lpfc_cmd);
+ /* abort issued in recovery is still in progress */
+ if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "3389 SCSI Layer I/O Abort Request is pending\n");
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ goto wait_for_cmpl;
+ }
+
abtsiocb = __lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
ret = FAILED;
@@ -4871,11 +4882,23 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
+ pring_s4 = &phba->sli.ring[ring_number];
+ /* Note: both hbalock and ring_lock must be set here */
+ spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+ ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+ abtsiocb, 0);
+ spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+ } else {
+ ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
+ abtsiocb, 0);
+ }
/* no longer need the lock after this point */
spin_unlock_irqrestore(&phba->hbalock, flags);
- if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
- IOCB_ERROR) {
+
+ if (ret_val == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
@@ -4885,12 +4908,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+wait_for_cmpl:
lpfc_cmd->waitq = &waitq;
/* Wait for abort to complete */
wait_event_timeout(waitq,
(lpfc_cmd->pCmd != cmnd),
msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
+
+ spin_lock_irqsave(shost->host_lock, flags);
lpfc_cmd->waitq = NULL;
+ spin_unlock_irqrestore(shost->host_lock, flags);
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
@@ -5172,8 +5199,9 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
if (cnt)
- lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
- tgt_id, lun_id, context);
+ lpfc_sli_abort_taskmgmt(vport,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ tgt_id, lun_id, context);
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
@@ -5491,7 +5519,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
- if (phba->cfg_EnableXLane) {
+ if (phba->cfg_fof) {
/*
* Check to see if the device data structure for the lun
@@ -5616,7 +5644,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
struct lpfc_device_data *device_data = sdev->hostdata;
atomic_dec(&phba->sdev_cnt);
- if ((phba->cfg_EnableXLane) && (device_data)) {
+ if ((phba->cfg_fof) && (device_data)) {
spin_lock_irqsave(&phba->devicelock, flags);
device_data->available = false;
if (!device_data->oas_enabled)
@@ -5655,7 +5683,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
int memory_flags;
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
- !(phba->cfg_EnableXLane))
+ !(phba->cfg_fof))
return NULL;
/* Attempt to create the device data to contain lun info */
@@ -5693,7 +5721,7 @@ lpfc_delete_device_data(struct lpfc_hba *phba,
{
if (unlikely(!phba) || !lun_info ||
- !(phba->cfg_EnableXLane))
+ !(phba->cfg_fof))
return;
if (!list_empty(&lun_info->listentry))
@@ -5727,7 +5755,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
struct lpfc_device_data *lun_info;
if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
- !phba->cfg_EnableXLane)
+ !phba->cfg_fof)
return NULL;
/* Check to see if the lun is already enabled for OAS. */
@@ -5789,7 +5817,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
!starting_lun || !found_vport_wwpn ||
!found_target_wwpn || !found_lun || !found_lun_status ||
(*starting_lun == NO_MORE_OAS_LUN) ||
- !phba->cfg_EnableXLane)
+ !phba->cfg_fof)
return false;
lun = *starting_lun;
@@ -5873,7 +5901,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
unsigned long flags;
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
- !phba->cfg_EnableXLane)
+ !phba->cfg_fof)
return false;
spin_lock_irqsave(&phba->devicelock, flags);
@@ -5930,7 +5958,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
unsigned long flags;
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
- !phba->cfg_EnableXLane)
+ !phba->cfg_fof)
return false;
spin_lock_irqsave(&phba->devicelock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 0120bfccf50b..0389ac1e7b83 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6bb51f8e3c1b..32ada0505576 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -265,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
return NULL;
q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
+ * instructions allowing action on content before valid bit checked,
+ * add barrier here as well. May not be needed as "content" is a
+ * single 32-bit entity here (vs multi word structure for cq's).
+ */
+ mb();
return eqe;
}
@@ -370,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
cqe = q->qe[q->hba_index].cqe;
q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Speculative instructions were allowing a bcopy at the start
+ * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
+ * after our return, to copy data before the valid bit check above
+ * was done. As such, some of the copied data was stale. The barrier
+ * ensures the check is before any data is copied.
+ */
+ mb();
return cqe;
}
@@ -3511,14 +3532,27 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
/* Error everything on txq and txcmplq
* First do the txq.
*/
- spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txq, &completions);
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ spin_lock_irq(&pring->ring_lock);
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_lock_irq(&phba->hbalock);
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
- spin_unlock_irq(&phba->hbalock);
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
@@ -3526,6 +3560,36 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
}
/**
+ * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in FCP rings and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ uint32_t i;
+
+ /* Look on all the FCP Rings for the iotag */
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+ } else {
+ pring = &psli->ring[psli->fcp_ring];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+}
+
+
+/**
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
* @phba: Pointer to HBA context object.
*
@@ -3542,28 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
LIST_HEAD(txcmplq);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
-
- /* Currently, only one fcp ring */
- pring = &psli->ring[psli->fcp_ring];
+ uint32_t i;
spin_lock_irq(&phba->hbalock);
- /* Retrieve everything on txq */
- list_splice_init(&pring->txq, &txq);
-
- /* Retrieve everything on the txcmplq */
- list_splice_init(&pring->txcmplq, &txcmplq);
-
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
- /* Flush the txq */
- lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_DOWN);
+ /* Look on all the FCP Rings for the iotag */
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+
+ spin_lock_irq(&pring->ring_lock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txq_cnt = 0;
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
+
+ /* Flush the txq */
+ lpfc_sli_cancel_iocbs(phba, &txq,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ /* Flush the txcmpq */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ }
+ } else {
+ pring = &psli->ring[psli->fcp_ring];
- /* Flush the txcmpq */
- lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_DOWN);
+ spin_lock_irq(&phba->hbalock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txq_cnt = 0;
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Flush the txq */
+ lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ /* Flush the txcmpq */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ }
}
/**
@@ -3966,12 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint16_t cfg_value;
- int rc;
+ int rc = 0;
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0295 Reset HBA Data: x%x x%x\n",
- phba->pport->port_state, psli->sli_flag);
+ "0295 Reset HBA Data: x%x x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag,
+ phba->hba_flag);
/* perform board reset */
phba->fc_eventTag = 0;
@@ -3984,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
+ /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
+ if (phba->hba_flag & HBA_FW_DUMP_OP) {
+ phba->hba_flag &= ~HBA_FW_DUMP_OP;
+ return rc;
+ }
+
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
@@ -4981,7 +5079,7 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
}
- if (phba->cfg_EnableXLane)
+ if (phba->cfg_fof)
lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
if (phba->sli4_hba.hba_eq) {
@@ -6701,7 +6799,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
MAILBOX_t *mb = &pmbox->u.mb;
struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
/* If the mailbox completed, process the completion and return */
if (lpfc_sli4_process_missed_mbox_completions(phba))
@@ -6743,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ lpfc_sli_abort_fcp_rings(phba);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0345 Resetting board due to mailbox timeout\n");
@@ -8112,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word10 = 0;
/* words0-2 bpl convert bde */
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -8618,8 +8715,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
- LPFC_IO_OAS))) {
+ if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
} else {
wq = phba->sli4_hba.oas_wq;
@@ -8714,7 +8810,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
if (phba->sli_rev == LPFC_SLI_REV4) {
if (piocb->iocb_flag & LPFC_IO_FCP) {
- if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+ if (!phba->cfg_fof || (!(piocb->iocb_flag &
LPFC_IO_OAS))) {
if (unlikely(!phba->sli4_hba.fcp_wq))
return IOCB_ERROR;
@@ -9149,6 +9245,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
pring->sli.sli3.next_cmdidx = 0;
pring->sli.sli3.local_getidx = 0;
pring->sli.sli3.cmdidx = 0;
+ pring->flag = 0;
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
@@ -9784,43 +9881,6 @@ abort_iotag_exit:
}
/**
- * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- *
- * This function aborts all iocbs in the given ring and frees all the iocb
- * objects in txq. This function issues abort iocbs unconditionally for all
- * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
- * to complete before the return of this function. The caller is not required
- * to hold any locks.
- **/
-static void
-lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
-{
- LIST_HEAD(completions);
- struct lpfc_iocbq *iocb, *next_iocb;
-
- if (pring->ringno == LPFC_ELS_RING)
- lpfc_fabric_abort_hba(phba);
-
- spin_lock_irq(&phba->hbalock);
-
- /* Take off all the iocbs on txq for cancelling */
- list_splice_init(&pring->txq, &completions);
- pring->txq_cnt = 0;
-
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_abort_iotag_issue(phba, pring, iocb);
-
- spin_unlock_irq(&phba->hbalock);
-
- /* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
-}
-
-/**
* lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
* @phba: pointer to lpfc HBA data structure.
*
@@ -9835,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- lpfc_sli_iocb_ring_abort(phba, pring);
+ lpfc_sli_abort_iocb_ring(phba, pring);
}
}
@@ -10060,6 +10120,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
}
/**
+ * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it aborted .
+ * This function is called with no locks held right after a taskmgmt
+ * command is sent.
+ **/
+int
+lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+ uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *abtsiocbq;
+ struct lpfc_iocbq *iocbq;
+ IOCB_t *icmd;
+ int sum, i, ret_val;
+ unsigned long iflags;
+ struct lpfc_sli_ring *pring_s4;
+ uint32_t ring_number;
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* all I/Os are in process of being flushed */
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+ sum = 0;
+
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ cmd) != 0)
+ continue;
+
+ /*
+ * If the iocbq is already being aborted, don't take a second
+ * action, but do count it.
+ */
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
+ /* issue ABTS for this IOCB based on iotag */
+ abtsiocbq = __lpfc_sli_get_iocbq(phba);
+ if (abtsiocbq == NULL)
+ continue;
+
+ icmd = &iocbq->iocb;
+ abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+ abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ abtsiocbq->iocb.un.acxri.abortIoTag =
+ iocbq->sli4_xritag;
+ else
+ abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
+ abtsiocbq->iocb.ulpLe = 1;
+ abtsiocbq->iocb.ulpClass = icmd->ulpClass;
+ abtsiocbq->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
+ abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+
+ if (lpfc_is_link_up(phba))
+ abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ else
+ abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+ /* Setup callback routine and issue the command. */
+ abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+
+ /*
+ * Indicate the IO is being aborted by the driver and set
+ * the caller's flag into the aborted IO.
+ */
+ iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ ring_number = MAX_SLI3_CONFIGURED_RINGS +
+ iocbq->fcp_wqidx;
+ pring_s4 = &phba->sli.ring[ring_number];
+ /* Note: both hbalock and ring_lock must be set here */
+ spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+ ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+ abtsiocbq, 0);
+ spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+ } else {
+ ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocbq, 0);
+ }
+
+
+ if (ret_val == IOCB_ERROR)
+ __lpfc_sli_release_iocbq(phba, abtsiocbq);
+ else
+ sum++;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return sum;
+}
+
+/**
* lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
* @phba: Pointer to HBA context object.
* @cmdiocbq: Pointer to command iocb.
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6f04080f4ea8..edb48832c39b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 9b8cda866176..7f50aa04d66a 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e32cbec70324..41675c1193e7 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.45"
+#define LPFC_DRIVER_VERSION "10.2.8001.0."
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -30,4 +30,4 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright(c) 2004-2013 Emulex. All rights reserved."
+#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved."
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index f5cdc68cd5b6..6a039eb1cbce 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -25,10 +25,6 @@
* 1+ (800) 334-5454
*/
-/*
- * $Log: mac_NCR5380.c,v $
- */
-
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/ctype.h>
@@ -58,12 +54,6 @@
#include "NCR5380.h"
-#if 0
-#define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION)
-#else
-#define NDEBUG (NDEBUG_ABORT)
-#endif
-
#define RESET_BOOT
#define DRIVER_SETUP
diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h
index 7dc62fce1c4c..06969b06e54b 100644
--- a/drivers/scsi/mac_scsi.h
+++ b/drivers/scsi/mac_scsi.h
@@ -22,10 +22,6 @@
* 1+ (800) 334-5454
*/
-/*
- * $Log: cumana_NCR5380.h,v $
- */
-
#ifndef MAC_NCR5380_H
#define MAC_NCR5380_H
@@ -51,8 +47,6 @@
#include <scsi/scsicam.h>
-#ifndef HOSTS_C
-
#define NCR5380_implementation_fields \
int port, ctrl
@@ -75,10 +69,6 @@
#define NCR5380_show_info macscsi_show_info
#define NCR5380_write_info macscsi_write_info
-#define BOARD_NORMAL 0
-#define BOARD_NCR53C400 1
-
-#endif /* ndef HOSTS_C */
#endif /* ndef ASM */
#endif /* MAC_NCR5380_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d84d02c2aad9..112799b131a9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3061,7 +3061,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
u32 cur_state;
u32 abs_state, curr_abs_state;
- fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
+ fw_state = abs_state & MFI_STATE_MASK;
if (fw_state != MFI_STATE_READY)
printk(KERN_INFO "megasas: Waiting for FW to come to ready"
@@ -3069,9 +3070,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
while (fw_state != MFI_STATE_READY) {
- abs_state =
- instance->instancet->read_fw_status_reg(instance->reg_set);
-
switch (fw_state) {
case MFI_STATE_FAULT:
@@ -3223,10 +3221,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
* The cur_state should not last for more than max_wait secs
*/
for (i = 0; i < (max_wait * 1000); i++) {
- fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &
- MFI_STATE_MASK ;
- curr_abs_state =
- instance->instancet->read_fw_status_reg(instance->reg_set);
+ curr_abs_state = instance->instancet->
+ read_fw_status_reg(instance->reg_set);
if (abs_state == curr_abs_state) {
msleep(1);
@@ -3242,6 +3238,9 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
"in %d secs\n", fw_state, max_wait);
return -ENODEV;
}
+
+ abs_state = curr_abs_state;
+ fw_state = curr_abs_state & MFI_STATE_MASK;
}
printk(KERN_INFO "megasas: FW now in Ready state\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index bde63f7452bd..8b88118e20e6 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1739,14 +1739,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
list_for_each_entry_safe(chain_req, next,
&ioc->scsi_lookup[i].chain_list, tracker_list) {
list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
+ list_add(&chain_req->tracker_list,
&ioc->free_chain_list);
}
}
ioc->scsi_lookup[i].cb_idx = 0xFF;
ioc->scsi_lookup[i].scmd = NULL;
ioc->scsi_lookup[i].direct_io = 0;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ list_add(&ioc->scsi_lookup[i].tracker_list,
&ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -1764,13 +1764,13 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
/* hi-priority */
i = smid - ioc->hi_priority_smid;
ioc->hpr_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ list_add(&ioc->hpr_lookup[i].tracker_list,
&ioc->hpr_free_list);
} else if (smid <= ioc->hba_queue_depth) {
/* internal queue */
i = smid - ioc->internal_smid;
ioc->internal_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ list_add(&ioc->internal_lookup[i].tracker_list,
&ioc->internal_free_list);
}
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 1f2ac3a28621..fd3b998c75b1 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -1065,7 +1065,7 @@ void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout, unsigned long serial_number, enum mutex_type m_type);
+ ulong timeout, enum mutex_type m_type);
void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index b7f887c9b0bf..62df8f9d4271 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -987,7 +987,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg,
mpt2sas_scsih_issue_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
- 0, TM_MUTEX_ON);
+ TM_MUTEX_ON);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
} else
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 6fd7d40b2c4d..5055f925d2cd 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2368,7 +2368,6 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
* @timeout: timeout in seconds
- * @serial_number: the serial_number from scmd
* @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
* Context: user
*
@@ -2381,7 +2380,7 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
int
mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
- unsigned long serial_number, enum mutex_type m_type)
+ enum mutex_type m_type)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
@@ -2634,8 +2633,7 @@ _scsih_abort(struct scsi_cmnd *scmd)
handle = sas_device_priv_data->sas_target->handle;
r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
- scmd->serial_number, TM_MUTEX_ON);
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2696,8 +2694,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
- TM_MUTEX_ON);
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2757,7 +2754,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30, 0, TM_MUTEX_ON);
+ 30, TM_MUTEX_ON);
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -3953,9 +3950,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
static int
-_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
- struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
struct MPT2SAS_DEVICE *sas_device_priv_data;
struct MPT2SAS_TARGET *sas_target_priv_data;
struct _raid_device *raid_device;
@@ -3963,7 +3960,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
u32 mpi_control;
u16 smid;
- scmd->scsi_done = done;
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4039,7 +4035,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
MPT_TARGET_FLAGS_RAID_COMPONENT)
mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
else
- mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
@@ -4083,8 +4079,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
return SCSI_MLQUEUE_HOST_BUSY;
}
-static DEF_SCSI_QCMD(_scsih_qcmd)
-
/**
* _scsih_normalize_sense - normalize descriptor and fixed format sense data
* @sense_buffer: sense data returned by target
@@ -5880,7 +5874,7 @@ broadcast_aen_retry:
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
TM_MUTEX_OFF);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
@@ -5922,7 +5916,7 @@ broadcast_aen_retry:
r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
- scmd->serial_number, TM_MUTEX_OFF);
+ TM_MUTEX_OFF);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : "
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 0ebf5d913c80..9b90a6fef706 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -993,7 +993,7 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout, unsigned long serial_number, enum mutex_type m_type);
+ ulong timeout, enum mutex_type m_type);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 9b89de14a0a3..ba9cbe598a91 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -980,7 +980,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
mpt3sas_scsih_issue_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
- 0, TM_MUTEX_ON);
+ TM_MUTEX_ON);
} else
mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a961fe11b527..18e713db1d32 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2029,7 +2029,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
* @timeout: timeout in seconds
- * @serial_number: the serial_number from scmd
* @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
* Context: user
*
@@ -2042,7 +2041,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
- unsigned long serial_number, enum mutex_type m_type)
+ enum mutex_type m_type)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
@@ -2293,8 +2292,7 @@ _scsih_abort(struct scsi_cmnd *scmd)
handle = sas_device_priv_data->sas_target->handle;
r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
- scmd->serial_number, TM_MUTEX_ON);
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2353,8 +2351,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
- TM_MUTEX_ON);
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2414,7 +2411,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30, 0, TM_MUTEX_ON);
+ 30, TM_MUTEX_ON);
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -3518,7 +3515,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
/**
- * _scsih_qcmd_lck - main scsi request entry point
+ * _scsih_qcmd - main scsi request entry point
* @scmd: pointer to scsi command object
* @done: function pointer to be invoked on completion
*
@@ -3529,9 +3526,9 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
static int
-_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
- struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
Mpi2SCSIIORequest_t *mpi_request;
@@ -3544,7 +3541,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
scsi_print_command(scmd);
#endif
- scmd->scsi_done = done;
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
@@ -3659,8 +3655,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
out:
return SCSI_MLQUEUE_HOST_BUSY;
}
-static DEF_SCSI_QCMD(_scsih_qcmd)
-
/**
* _scsih_normalize_sense - normalize descriptor and fixed format sense data
@@ -5425,7 +5419,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
TM_MUTEX_OFF);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
@@ -5467,7 +5461,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
- scmd->serial_number, TM_MUTEX_OFF);
+ TM_MUTEX_OFF);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 1e4479f3331a..9270d15ff1a4 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -564,7 +564,7 @@ static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
u32 tmp;
tmp = mr32(MVS_GBL_CTL);
- tmp |= (IRQ_SAS_A | IRQ_SAS_B);
+ tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
mw32(MVS_GBL_INT_STAT, tmp);
writel(tmp, regs + 0x0C);
writel(tmp, regs + 0x10);
@@ -580,7 +580,7 @@ static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
tmp = mr32(MVS_GBL_CTL);
- tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
+ tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
mw32(MVS_GBL_INT_STAT, tmp);
writel(tmp, regs + 0x0C);
writel(tmp, regs + 0x10);
@@ -596,7 +596,7 @@ static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
if (!(mvi->flags & MVF_FLAG_SOC)) {
stat = mr32(MVS_GBL_INT_STAT);
- if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
+ if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B)))
return 0;
}
return stat;
@@ -606,8 +606,8 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
{
void __iomem *regs = mvi->regs;
- if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
- ((stat & IRQ_SAS_B) && mvi->id == 1)) {
+ if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) ||
+ ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) {
mw32_f(MVS_INT_STAT, CINT_DONE);
spin_lock(&mvi->lock);
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 487aa6f97412..14e197497b46 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -150,35 +150,35 @@ enum chip_register_bits {
enum pci_interrupt_cause {
/* MAIN_IRQ_CAUSE (R10200) Bits*/
- IRQ_COM_IN_I2O_IOP0 = (1 << 0),
- IRQ_COM_IN_I2O_IOP1 = (1 << 1),
- IRQ_COM_IN_I2O_IOP2 = (1 << 2),
- IRQ_COM_IN_I2O_IOP3 = (1 << 3),
- IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
- IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
- IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
- IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
- IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
- IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
- IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
- IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
- IRQ_PCIF_DRBL0 = (1 << 12),
- IRQ_PCIF_DRBL1 = (1 << 13),
- IRQ_PCIF_DRBL2 = (1 << 14),
- IRQ_PCIF_DRBL3 = (1 << 15),
- IRQ_XOR_A = (1 << 16),
- IRQ_XOR_B = (1 << 17),
- IRQ_SAS_A = (1 << 18),
- IRQ_SAS_B = (1 << 19),
- IRQ_CPU_CNTRL = (1 << 20),
- IRQ_GPIO = (1 << 21),
- IRQ_UART = (1 << 22),
- IRQ_SPI = (1 << 23),
- IRQ_I2C = (1 << 24),
- IRQ_SGPIO = (1 << 25),
- IRQ_COM_ERR = (1 << 29),
- IRQ_I2O_ERR = (1 << 30),
- IRQ_PCIE_ERR = (1 << 31),
+ MVS_IRQ_COM_IN_I2O_IOP0 = (1 << 0),
+ MVS_IRQ_COM_IN_I2O_IOP1 = (1 << 1),
+ MVS_IRQ_COM_IN_I2O_IOP2 = (1 << 2),
+ MVS_IRQ_COM_IN_I2O_IOP3 = (1 << 3),
+ MVS_IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
+ MVS_IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
+ MVS_IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
+ MVS_IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
+ MVS_IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
+ MVS_IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
+ MVS_IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
+ MVS_IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
+ MVS_IRQ_PCIF_DRBL0 = (1 << 12),
+ MVS_IRQ_PCIF_DRBL1 = (1 << 13),
+ MVS_IRQ_PCIF_DRBL2 = (1 << 14),
+ MVS_IRQ_PCIF_DRBL3 = (1 << 15),
+ MVS_IRQ_XOR_A = (1 << 16),
+ MVS_IRQ_XOR_B = (1 << 17),
+ MVS_IRQ_SAS_A = (1 << 18),
+ MVS_IRQ_SAS_B = (1 << 19),
+ MVS_IRQ_CPU_CNTRL = (1 << 20),
+ MVS_IRQ_GPIO = (1 << 21),
+ MVS_IRQ_UART = (1 << 22),
+ MVS_IRQ_SPI = (1 << 23),
+ MVS_IRQ_I2C = (1 << 24),
+ MVS_IRQ_SGPIO = (1 << 25),
+ MVS_IRQ_COM_ERR = (1 << 29),
+ MVS_IRQ_I2O_ERR = (1 << 30),
+ MVS_IRQ_PCIE_ERR = (1 << 31),
};
union reg_phy_cfg {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 5ff978be249d..eacee48a955c 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -728,6 +728,15 @@ static struct pci_device_id mvs_pci_table[] = {
.class_mask = 0,
.driver_data = chip_9485,
},
+ {
+ .vendor = PCI_VENDOR_ID_MARVELL_EXT,
+ .device = 0x9485,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = 0x9485,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = chip_9485,
+ },
{ PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
{ PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
{ PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index bac04c2335aa..5f4cbf0c4759 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1570,6 +1570,7 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
if (unlikely(!req))
return ERR_PTR(-ENOMEM);
+ blk_rq_set_block_pc(req);
return req;
}
}
@@ -1590,7 +1591,6 @@ static int _init_blk_request(struct osd_request *or,
}
or->request = req;
- req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_QUIET;
req->timeout = or->timeout;
@@ -1608,7 +1608,7 @@ static int _init_blk_request(struct osd_request *or,
ret = PTR_ERR(req);
goto out;
}
- req->cmd_type = REQ_TYPE_BLOCK_PC;
+ blk_rq_set_block_pc(req);
or->in.req = or->request->next_rq = req;
}
} else if (has_in)
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 21883a2d6324..0727ea7cc387 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -365,7 +365,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
if (!req)
return DRIVER_ERROR << 24;
- req->cmd_type = REQ_TYPE_BLOCK_PC;
+ blk_rq_set_block_pc(req);
req->cmd_flags |= REQ_QUIET;
SRpnt->bio = NULL;
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
index 3721342835e9..aa528f53c533 100644
--- a/drivers/scsi/pas16.h
+++ b/drivers/scsi/pas16.h
@@ -129,8 +129,6 @@ static int pas16_bus_reset(Scsi_Cmnd *);
#define CAN_QUEUE 32
#endif
-#ifndef HOSTS_C
-
#define NCR5380_implementation_fields \
volatile unsigned short io_port
@@ -171,6 +169,5 @@ static int pas16_bus_reset(Scsi_Cmnd *);
#define PAS16_IRQS 0xd4a8
-#endif /* else def HOSTS_C */
#endif /* ndef ASM */
#endif /* PAS16_H */
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 28b4e8139153..a368d77b8d41 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -395,6 +395,8 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
payload.offset = 0;
payload.length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
+ if (!payload.func_specific)
+ return -ENOMEM;
PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
wait_for_completion(&completion);
virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
@@ -402,6 +404,7 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
bios_index++)
str += sprintf(str, "%c",
*((u8 *)((u8 *)virt_addr+bios_index)));
+ kfree(payload.func_specific);
return str - buf;
}
static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
@@ -729,7 +732,7 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
flash_error_table[i].reason);
}
-static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO,
+static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_interface_rev,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index c4f31b21feb8..e90c89f1d480 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -677,7 +677,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
* pm8001_get_phy_settings_info : Read phy setting values.
* @pm8001_ha : our hba.
*/
-void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
{
#ifdef PM8001_READ_VPD
@@ -691,11 +691,15 @@ void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
payload.offset = 0;
payload.length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
+ if (!payload.func_specific)
+ return -ENOMEM;
/* Read phy setting values from flash */
PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
wait_for_completion(&completion);
pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
+ kfree(payload.func_specific);
#endif
+ return 0;
}
#ifdef PM8001_USE_MSIX
@@ -879,8 +883,11 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
pm8001_init_sas_add(pm8001_ha);
/* phy setting support for motherboard controller */
if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
- pdev->subsystem_vendor != 0)
- pm8001_get_phy_settings_info(pm8001_ha);
+ pdev->subsystem_vendor != 0) {
+ rc = pm8001_get_phy_settings_info(pm8001_ha);
+ if (rc)
+ goto err_out_shost;
+ }
pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
if (rc)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 07befcf365b8..16fe5196e6d9 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -664,7 +664,7 @@ do_read:
}
rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
- addr, offset, SFP_BLOCK_SIZE, 0);
+ addr, offset, SFP_BLOCK_SIZE, BIT_1);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x706d,
"Unable to read SFP data (%x/%x/%x).\n", rval,
@@ -1495,7 +1495,7 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
if (!ha->fw_dumped)
size = 0;
- else if (IS_QLA82XX(ha))
+ else if (IS_P3P_TYPE(ha))
size = ha->md_template_size + ha->md_dump_size;
else
size = ha->fw_dump_len;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 71ff340f6de4..524f9eb7fcd1 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -2054,9 +2054,49 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
- ql_log(ql_log_warn, vha, 0x708c,
+ ql_dbg(ql_dbg_user, vha, 0x708c,
"Unknown serdes cmd %x.\n", sr.cmd);
- rval = -EDOM;
+ rval = -EINVAL;
+ break;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ rval ? EXT_STATUS_MAILBOX : 0;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
+qla8044_serdes_op(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval = 0;
+ struct qla_serdes_reg_ex sr;
+
+ memset(&sr, 0, sizeof(sr));
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
+
+ switch (sr.cmd) {
+ case INT_SC_SERDES_WRITE_REG:
+ rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ break;
+ case INT_SC_SERDES_READ_REG:
+ rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ break;
+ default:
+ ql_dbg(ql_dbg_user, vha, 0x70cf,
+ "Unknown serdes cmd %x.\n", sr.cmd);
+ rval = -EINVAL;
break;
}
@@ -2121,6 +2161,9 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_SERDES_OP:
return qla26xx_serdes_op(bsg_job);
+ case QL_VND_SERDES_OP_EX:
+ return qla8044_serdes_op(bsg_job);
+
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index e5c2126221e9..d38f9efa56fa 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -24,6 +24,7 @@
#define QL_VND_READ_I2C 0x11
#define QL_VND_FX00_MGMT_CMD 0x12
#define QL_VND_SERDES_OP 0x13
+#define QL_VND_SERDES_OP_EX 0x14
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -225,4 +226,10 @@ struct qla_serdes_reg {
uint16_t val;
} __packed;
+struct qla_serdes_reg_ex {
+ uint16_t cmd;
+ uint32_t addr;
+ uint32_t val;
+} __packed;
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 97255f7c3975..c72ee97bf3f7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -15,7 +15,7 @@
* | | | 0x0144,0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e-0x0170 |
- * | Mailbox commands | 0x1187 | 0x1018-0x1019 |
+ * | Mailbox commands | 0x118d | 0x1018-0x1019 |
* | | | 0x10ca |
* | | | 0x1115-0x1116 |
* | | | 0x111a-0x111b |
@@ -45,12 +45,16 @@
* | | | 0x70ad-0x70ae |
* | | | 0x70d7-0x70db |
* | | | 0x70de-0x70df |
- * | Task Management | 0x803d | 0x8025-0x8026 |
- * | | | 0x800b,0x8039 |
+ * | Task Management | 0x803d | 0x8000,0x800b |
+ * | | | 0x8019 |
+ * | | | 0x8025,0x8026 |
+ * | | | 0x8031,0x8032 |
+ * | | | 0x8039,0x803c |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb14c | 0xb002,0xb024 |
+ * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
* | | | 0xb09e,0xb0ae |
+ * | | | 0xb0c3,0xb0c6 |
* | | | 0xb0e0-0xb0ef |
* | | | 0xb085,0xb0dc |
* | | | 0xb107,0xb108 |
@@ -60,12 +64,12 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
- * | Misc | 0xd2ff | 0xd017-0xd019 |
+ * | Misc | 0xd212 | 0xd017-0xd019 |
* | | | 0xd020 |
- * | | | 0xd02e-0xd0ff |
+ * | | | 0xd030-0xd0ff |
* | | | 0xd101-0xd1fe |
- * | | | 0xd212-0xd2fe |
- * | Target Mode | 0xe070 | 0xe021 |
+ * | | | 0xd213-0xd2fe |
+ * | Target Mode | 0xe078 | |
* | Target Mode Management | 0xf072 | 0xf002-0xf003 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000b | |
@@ -277,9 +281,15 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
if (rval != QLA_SUCCESS)
return rval;
+ set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
+
/* External Memory. */
- return qla24xx_dump_ram(ha, 0x100000, *nxt,
+ rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
ha->fw_memory_size - 0x100000 + 1, nxt);
+ if (rval == QLA_SUCCESS)
+ set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
+
+ return rval;
}
static uint32_t *
@@ -296,23 +306,15 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
return buf;
}
-int
-qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
+void
+qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
{
- int rval = QLA_SUCCESS;
- uint32_t cnt;
-
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
- for (cnt = 30000;
- ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
- rval == QLA_SUCCESS; cnt--) {
- if (cnt)
- udelay(100);
- else
- rval = QLA_FUNCTION_TIMEOUT;
- }
- return rval;
+ /* 100 usec delay is sufficient enough for hardware to pause RISC */
+ udelay(100);
+ if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
+ set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
}
int
@@ -320,10 +322,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
{
int rval = QLA_SUCCESS;
uint32_t cnt;
- uint16_t mb0, wd;
+ uint16_t wd;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- /* Reset RISC. */
+ /*
+ * Reset RISC. The delay is dependent on system architecture.
+ * Driver can proceed with the reset sequence after waiting
+ * for a timeout period.
+ */
WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) {
if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
@@ -331,19 +337,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
udelay(10);
}
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
WRT_REG_DWORD(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
udelay(100);
- /* Wait for firmware to complete NVRAM accesses. */
- mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
- for (cnt = 10000 ; cnt && mb0; cnt--) {
- udelay(5);
- mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
- barrier();
- }
/* Wait for soft-reset to complete. */
for (cnt = 0; cnt < 30000; cnt++) {
@@ -353,16 +354,21 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
udelay(10);
}
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
+
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
- for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
- udelay(100);
+ udelay(10);
else
rval = QLA_FUNCTION_TIMEOUT;
}
+ if (rval == QLA_SUCCESS)
+ set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
return rval;
}
@@ -659,12 +665,13 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0xd000,
- "Failed to dump firmware (%x).\n", rval);
+ "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
+ rval, ha->fw_dump_cap_flags);
ha->fw_dumped = 0;
} else {
ql_log(ql_log_info, vha, 0xd001,
- "Firmware dump saved to temp buffer (%ld/%p).\n",
- vha->host_no, ha->fw_dump);
+ "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
+ vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
ha->fw_dumped = 1;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
@@ -1053,6 +1060,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
risc_address = ext_mem_cnt = 0;
flags = 0;
+ ha->fw_dump_cap_flags = 0;
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1075,10 +1083,11 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
- /* Pause RISC. */
- rval = qla24xx_pause_risc(reg);
- if (rval != QLA_SUCCESS)
- goto qla24xx_fw_dump_failed_0;
+ /*
+ * Pause RISC. No need to track timeout, as resetting the chip
+ * is the right approach incase of pause timeout
+ */
+ qla24xx_pause_risc(reg, ha);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
@@ -1302,6 +1311,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
risc_address = ext_mem_cnt = 0;
flags = 0;
+ ha->fw_dump_cap_flags = 0;
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1325,10 +1335,11 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
- /* Pause RISC. */
- rval = qla24xx_pause_risc(reg);
- if (rval != QLA_SUCCESS)
- goto qla25xx_fw_dump_failed_0;
+ /*
+ * Pause RISC. No need to track timeout, as resetting the chip
+ * is the right approach incase of pause timeout
+ */
+ qla24xx_pause_risc(reg, ha);
/* Host/Risc registers. */
iter_reg = fw->host_risc_reg;
@@ -1619,6 +1630,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
risc_address = ext_mem_cnt = 0;
flags = 0;
+ ha->fw_dump_cap_flags = 0;
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1641,10 +1653,11 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
- /* Pause RISC. */
- rval = qla24xx_pause_risc(reg);
- if (rval != QLA_SUCCESS)
- goto qla81xx_fw_dump_failed_0;
+ /*
+ * Pause RISC. No need to track timeout, as resetting the chip
+ * is the right approach incase of pause timeout
+ */
+ qla24xx_pause_risc(reg, ha);
/* Host/Risc registers. */
iter_reg = fw->host_risc_reg;
@@ -1938,6 +1951,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
risc_address = ext_mem_cnt = 0;
flags = 0;
+ ha->fw_dump_cap_flags = 0;
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1959,10 +1973,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
- /* Pause RISC. */
- rval = qla24xx_pause_risc(reg);
- if (rval != QLA_SUCCESS)
- goto qla83xx_fw_dump_failed_0;
+ /*
+ * Pause RISC. No need to track timeout, as resetting the chip
+ * is the right approach incase of pause timeout
+ */
+ qla24xx_pause_risc(reg, ha);
WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
dmp_reg = &reg->iobase_window;
@@ -2385,9 +2400,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt += sizeof(fw->code_ram);
nxt += (ha->fw_memory_size - 0x100000 + 1);
goto copy_queue;
- } else
+ } else {
+ set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
ql_log(ql_log_warn, vha, 0xd010,
"bigger hammer success?\n");
+ }
}
rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index cc961040f8b1..e1fc4e66966a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -353,5 +353,6 @@ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
-extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *);
+extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
+ struct qla_hw_data *);
extern int qla24xx_soft_reset(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6a106136716c..de5d0ae19d83 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -965,6 +965,13 @@ struct mbx_cmd_32 {
*/
#define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */
+/*
+ * ISP8044 mailbox commands
+ */
+#define MBC_SET_GET_ETH_SERDES_REG 0x150
+#define HCS_WRITE_SERDES 0x3
+#define HCS_READ_SERDES 0x4
+
/* Firmware return data sizes */
#define FCAL_MAP_SIZE 128
@@ -1622,25 +1629,35 @@ typedef struct {
#define PO_MODE_DIF_PASS 2
#define PO_MODE_DIF_REPLACE 3
#define PO_MODE_DIF_TCP_CKSUM 6
-#define PO_ENABLE_DIF_BUNDLING BIT_8
#define PO_ENABLE_INCR_GUARD_SEED BIT_3
-#define PO_DISABLE_INCR_REF_TAG BIT_5
#define PO_DISABLE_GUARD_CHECK BIT_4
+#define PO_DISABLE_INCR_REF_TAG BIT_5
+#define PO_DIS_HEADER_MODE BIT_7
+#define PO_ENABLE_DIF_BUNDLING BIT_8
+#define PO_DIS_FRAME_MODE BIT_9
+#define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */
+#define PO_DIS_VALD_APP_REF_ESC BIT_11
+
+#define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */
+#define PO_DIS_REF_TAG_REPL BIT_13
+#define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */
+#define PO_DIS_REF_TAG_VALD BIT_15
+
/*
* ISP queue - 64-Bit addressing, continuation crc entry structure definition.
*/
struct crc_context {
uint32_t handle; /* System handle. */
- uint32_t ref_tag;
- uint16_t app_tag;
+ __le32 ref_tag;
+ __le16 app_tag;
uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
- uint16_t guard_seed; /* Initial Guard Seed */
- uint16_t prot_opts; /* Requested Data Protection Mode */
- uint16_t blk_size; /* Data size in bytes */
+ __le16 guard_seed; /* Initial Guard Seed */
+ __le16 prot_opts; /* Requested Data Protection Mode */
+ __le16 blk_size; /* Data size in bytes */
uint16_t runt_blk_guard; /* Guard value for runt block (tape
* only) */
- uint32_t byte_count; /* Total byte count/ total data
+ __le32 byte_count; /* Total byte count/ total data
* transfer count */
union {
struct {
@@ -1654,10 +1671,10 @@ struct crc_context {
uint32_t reserved_6;
} nobundling;
struct {
- uint32_t dif_byte_count; /* Total DIF byte
+ __le32 dif_byte_count; /* Total DIF byte
* count */
uint16_t reserved_1;
- uint16_t dseg_count; /* Data segment count */
+ __le16 dseg_count; /* Data segment count */
uint32_t reserved_2;
uint32_t data_address[2];
uint32_t data_length;
@@ -1748,6 +1765,8 @@ typedef struct {
#define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */
#define CS_PORT_BUSY 0x2B /* Port Busy */
#define CS_COMPLETE_CHKCOND 0x30 /* Error? */
+#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
+ failure */
#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
#define CS_UNKNOWN 0x81 /* Driver defined */
#define CS_RETRY 0x82 /* Driver defined */
@@ -2676,6 +2695,7 @@ struct rsp_que {
uint32_t __iomem *rsp_q_out;
uint16_t ring_index;
uint16_t out_ptr;
+ uint16_t *in_ptr; /* queue shadow in index */
uint16_t length;
uint16_t options;
uint16_t rid;
@@ -2702,6 +2722,7 @@ struct req_que {
uint32_t __iomem *req_q_out;
uint16_t ring_index;
uint16_t in_ptr;
+ uint16_t *out_ptr; /* queue shadow out index */
uint16_t cnt;
uint16_t length;
uint16_t options;
@@ -2907,6 +2928,8 @@ struct qla_hw_data {
#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
+#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
+
uint32_t device_type;
#define DT_ISP2100 BIT_0
#define DT_ISP2200 BIT_1
@@ -2928,7 +2951,8 @@ struct qla_hw_data {
#define DT_ISPFX00 BIT_17
#define DT_ISP8044 BIT_18
#define DT_ISP2071 BIT_19
-#define DT_ISP_LAST (DT_ISP2071 << 1)
+#define DT_ISP2271 BIT_20
+#define DT_ISP_LAST (DT_ISP2271 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@@ -2959,6 +2983,7 @@ struct qla_hw_data {
#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
+#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2967,7 +2992,7 @@ struct qla_hw_data {
#define IS_QLA25XX(ha) (IS_QLA2532(ha))
#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
-#define IS_QLA27XX(ha) (IS_QLA2071(ha))
+#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha))
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -3006,6 +3031,7 @@ struct qla_hw_data {
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
+#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3136,7 +3162,15 @@ struct qla_hw_data {
struct qla2xxx_fw_dump *fw_dump;
uint32_t fw_dump_len;
int fw_dumped;
+ unsigned long fw_dump_cap_flags;
+#define RISC_PAUSE_CMPL 0
+#define DMA_SHUTDOWN_CMPL 1
+#define ISP_RESET_CMPL 2
+#define RISC_RDY_AFT_RESET 3
+#define RISC_SRAM_DUMP_CMPL 4
+#define RISC_EXT_MEM_DUMP_CMPL 5
int fw_dump_reading;
+ int prev_minidump_failed;
dma_addr_t eft_dma;
void *eft;
/* Current size of mctp dump is 0x086064 bytes */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 32ab80957688..2ca39b8e7166 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 3a7353eaccbd..eb8f57249f1d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -371,7 +371,10 @@ struct init_cb_24xx {
* BIT 14 = Data Rate bit 1
* BIT 15 = Data Rate bit 2
* BIT 16 = Enable 75 ohm Termination Select
- * BIT 17-31 = Reserved
+ * BIT 17-28 = Reserved
+ * BIT 29 = Enable response queue 0 in index shadowing
+ * BIT 30 = Enable request queue 0 out index shadowing
+ * BIT 31 = Reserved
*/
uint32_t firmware_options_3;
uint16_t qos;
@@ -1134,13 +1137,6 @@ struct device_reg_24xx {
#define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */
#define MAX_MULTI_ID_FABRIC 256 /* ... */
-#define for_each_mapped_vp_idx(_ha, _idx) \
- for (_idx = find_next_bit((_ha)->vp_idx_map, \
- (_ha)->max_npiv_vports + 1, 1); \
- _idx <= (_ha)->max_npiv_vports; \
- _idx = find_next_bit((_ha)->vp_idx_map, \
- (_ha)->max_npiv_vports + 1, _idx + 1)) \
-
struct mid_conf_entry_24xx {
uint16_t reserved_1;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e665e8109933..d48dea8fab1b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -220,6 +220,13 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
+extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
+ uint32_t *, uint16_t, struct qla_tgt_cmd *);
+extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
+ uint32_t *, uint16_t, struct qla_tgt_cmd *);
+extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
+ uint32_t *, uint16_t, struct qla_tgt_cmd *);
+
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -347,6 +354,11 @@ extern int
qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *);
extern int
+qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int
+qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *);
+
+extern int
qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
extern int
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index e377f9d2f92a..a0df3b1b3823 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 38aeb54cd9d8..e2184412617d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1476,6 +1476,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
}
ha->fw_dumped = 0;
+ ha->fw_dump_cap_flags = 0;
dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
req_q_size = rsp_q_size = 0;
@@ -2061,6 +2062,10 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+ if (IS_SHADOW_REG_CAPABLE(ha))
+ icb->firmware_options_2 |=
+ __constant_cpu_to_le32(BIT_30|BIT_29);
+
if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = __constant_cpu_to_le16(rid);
@@ -2138,6 +2143,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req)
continue;
+ req->out_ptr = (void *)(req->ring + req->length);
+ *req->out_ptr = 0;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
@@ -2153,6 +2160,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
rsp = ha->rsp_q_map[que];
if (!rsp)
continue;
+ rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ *rsp->in_ptr = 0;
/* Initialize response queue entries */
if (IS_QLAFX00(ha))
qlafx00_init_response_q_entries(rsp);
@@ -3406,7 +3415,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- fcport->loop_id = FC_NO_LOOP_ID;
+ qla2x00_clear_loop_id(fcport);
}
}
}
@@ -4727,7 +4736,6 @@ static int
qla2x00_restart_isp(scsi_qla_host_t *vha)
{
int status = 0;
- uint32_t wait_time;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
@@ -4744,14 +4752,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
if (!status && !(status = qla2x00_init_rings(vha))) {
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
ha->flags.chip_reset_done = 1;
+
/* Initialize the queues in use */
qla25xx_init_queues(ha);
status = qla2x00_fw_ready(vha);
if (!status) {
- ql_dbg(ql_dbg_taskm, vha, 0x8031,
- "Start configure loop status = %d.\n", status);
-
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4766,24 +4772,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
qlt_24xx_process_atio_queue(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- /* Wait at most MAX_TARGET RSCNs for a stable link. */
- wait_time = 256;
- do {
- clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- qla2x00_configure_loop(vha);
- wait_time--;
- } while (!atomic_read(&vha->loop_down_timer) &&
- !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
- && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
- &vha->dpc_flags)));
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
/* if no cable then assume it's good */
if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
-
- ql_dbg(ql_dbg_taskm, vha, 0x8032,
- "Configure loop done, status = 0x%x.\n", status);
}
return (status);
}
@@ -6130,7 +6124,6 @@ int
qla82xx_restart_isp(scsi_qla_host_t *vha)
{
int status, rval;
- uint32_t wait_time;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
@@ -6144,31 +6137,15 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
status = qla2x00_fw_ready(vha);
if (!status) {
- ql_log(ql_log_info, vha, 0x803c,
- "Start configure loop, status =%d.\n", status);
-
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
-
vha->flags.online = 1;
- /* Wait at most MAX_TARGET RSCNs for a stable link. */
- wait_time = 256;
- do {
- clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- qla2x00_configure_loop(vha);
- wait_time--;
- } while (!atomic_read(&vha->loop_down_timer) &&
- !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
- wait_time &&
- (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
/* if no cable then assume it's good */
if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
-
- ql_log(ql_log_info, vha, 0x8000,
- "Configure loop done, status = 0x%x.\n", status);
}
if (!status) {
@@ -6182,8 +6159,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
vha->marker_needed = 1;
}
- vha->flags.online = 1;
-
ha->isp_ops->enable_intrs(ha);
ha->isp_abort_cnt = 0;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index ce8b5fb0f347..b3b1d6fc2d6c 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,10 +1,11 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
+#include "qla_target.h"
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
@@ -128,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
}
static inline void
-qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
+qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp,
+ struct qla_tgt_cmd *tc)
{
struct dsd_dma *dsd_ptr, *tdsd_ptr;
struct crc_context *ctx;
- ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
+ if (sp)
+ ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
+ else if (tc)
+ ctx = (struct crc_context *)tc->ctx;
+ else {
+ BUG();
+ return;
+ }
/* clean up allocated prev pool */
list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index e607568bce49..760931529592 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -936,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
return 1;
}
-static int
+int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -948,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
- uint32_t prot_int;
+ uint32_t prot_int; /* protection interval */
uint32_t partial;
struct qla2_sgx sgx;
dma_addr_t sle_dma;
uint32_t sle_dma_len, tot_prot_dma_len = 0;
- struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-
- prot_int = cmd->device->sector_size;
+ struct scsi_cmnd *cmd;
+ struct scsi_qla_host *vha;
memset(&sgx, 0, sizeof(struct qla2_sgx));
- sgx.tot_bytes = scsi_bufflen(cmd);
- sgx.cur_sg = scsi_sglist(cmd);
- sgx.sp = sp;
-
- sg_prot = scsi_prot_sglist(cmd);
+ if (sp) {
+ vha = sp->fcport->vha;
+ cmd = GET_CMD_SP(sp);
+ prot_int = cmd->device->sector_size;
+
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ sg_prot = scsi_prot_sglist(cmd);
+ } else if (tc) {
+ vha = tc->vha;
+ prot_int = tc->blk_sz;
+ sgx.tot_bytes = tc->bufflen;
+ sgx.cur_sg = tc->sg;
+ sg_prot = tc->prot_sg;
+ } else {
+ BUG();
+ return 1;
+ }
while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
@@ -995,10 +1009,18 @@ alloc_and_fill:
return 1;
}
- list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)
+ sp->u.scmd.ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &(tc->ctx->dsd_list));
+ tc->ctx_dsd_alloced = 1;
+ }
- sp->flags |= SRB_CRC_CTX_DSD_VALID;
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1033,21 +1055,35 @@ alloc_and_fill:
return 0;
}
-static int
+int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
- uint16_t tot_dsds)
+ uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
- struct scatterlist *sg;
+ struct scatterlist *sg, *sgl;
uint32_t *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
- struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_cmnd *cmd;
+ struct scsi_qla_host *vha;
+
+ if (sp) {
+ cmd = GET_CMD_SP(sp);
+ sgl = scsi_sglist(cmd);
+ vha = sp->fcport->vha;
+ } else if (tc) {
+ sgl = tc->sg;
+ vha = tc->vha;
+ } else {
+ BUG();
+ return 1;
+ }
- scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+
+ for_each_sg(sgl, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
@@ -1076,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
return 1;
}
- list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)
+ sp->u.scmd.ctx)->dsd_list);
- sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &(tc->ctx->dsd_list));
+ tc->ctx_dsd_alloced = 1;
+ }
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1102,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
return 0;
}
-static int
+int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd,
- uint16_t tot_dsds)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
- struct scatterlist *sg;
+ struct scatterlist *sg, *sgl;
int i;
struct scsi_cmnd *cmd;
uint32_t *cur_dsd = dsd;
- uint16_t used_dsds = tot_dsds;
+ uint16_t used_dsds = tot_dsds;
+ struct scsi_qla_host *vha;
+
+ if (sp) {
+ cmd = GET_CMD_SP(sp);
+ sgl = scsi_prot_sglist(cmd);
+ vha = sp->fcport->vha;
+ } else if (tc) {
+ vha = tc->vha;
+ sgl = tc->prot_sg;
+ } else {
+ BUG();
+ return 1;
+ }
- cmd = GET_CMD_SP(sp);
- scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe021,
+ "%s: enter\n", __func__);
+
+ for_each_sg(sgl, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
@@ -1147,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
return 1;
}
- list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)
+ sp->u.scmd.ctx)->dsd_list);
- sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &(tc->ctx->dsd_list));
+ tc->ctx_dsd_alloced = 1;
+ }
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1386,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
if (!bundling && tot_prot_dsds) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
- cur_dsd, tot_dsds))
+ cur_dsd, tot_dsds, NULL))
goto crc_queuing_error;
} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
- (tot_dsds - tot_prot_dsds)))
+ (tot_dsds - tot_prot_dsds), NULL))
goto crc_queuing_error;
if (bundling && tot_prot_dsds) {
@@ -1398,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
- tot_prot_dsds))
+ tot_prot_dsds, NULL))
goto crc_queuing_error;
}
return QLA_SUCCESS;
@@ -1478,8 +1542,8 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
-
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1697,8 +1761,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
tot_prot_dsds = nseg;
tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
-
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2825,8 +2889,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
-
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 95314ef2e505..a56825c73c31 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -2009,11 +2009,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
ql_dbg(ql_dbg_io, vha, 0x3017,
"Invalid status handle (0x%x).\n", sts->handle);
- if (IS_P3P_TYPE(ha))
- set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
- else
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ if (IS_P3P_TYPE(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return;
}
@@ -2472,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (pkt->entry_status != 0) {
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
- (void)qlt_24xx_process_response_error(vha, pkt);
+ if (qlt_24xx_process_response_error(vha, pkt))
+ goto process_err;
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
continue;
}
+process_err:
switch (pkt->entry_type) {
case STATUS_TYPE:
@@ -2494,10 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_logio_entry(vha, rsp->req,
(struct logio_entry_24xx *)pkt);
break;
- case CT_IOCB_TYPE:
+ case CT_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
break;
- case ELS_IOCB_TYPE:
+ case ELS_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
@@ -2506,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case NOTIFY_ACK_TYPE:
+ case CTIO_CRC2:
qlt_response_pkt_all_vps(vha, (response_t *)pkt);
break;
case MARKER_TYPE:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2528709c4add..1c33a77db5c2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1319,7 +1319,7 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
left = 0;
- list = kzalloc(dma_size, GFP_KERNEL);
+ list = kmemdup(pmap, dma_size, GFP_KERNEL);
if (!list) {
ql_log(ql_log_warn, vha, 0x1140,
"%s(%ld): failed to allocate node names list "
@@ -1328,7 +1328,6 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
goto out_free;
}
- memcpy(list, pmap, dma_size);
restart:
dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
}
@@ -2644,7 +2643,10 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx, vha, 0x1090,
"Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(abt->nport_handle));
- rval = QLA_FUNCTION_FAILED;
+ if (abt->nport_handle == CS_IOCB_ERROR)
+ rval = QLA_FUNCTION_PARAMETER_ERROR;
+ else
+ rval = QLA_FUNCTION_FAILED;
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
"Done %s.\n", __func__);
@@ -2879,6 +2881,78 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
return rval;
}
+int
+qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA8044(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
+ mcp->mb[1] = HCS_WRITE_SERDES;
+ mcp->mb[3] = LSW(addr);
+ mcp->mb[4] = MSW(addr);
+ mcp->mb[5] = LSW(data);
+ mcp->mb[6] = MSW(data);
+ mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1187,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA8044(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
+ mcp->mb[1] = HCS_READ_SERDES;
+ mcp->mb[3] = LSW(addr);
+ mcp->mb[4] = MSW(addr);
+ mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ *data = mcp->mb[2] << 16 | mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x118a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
/**
* qla2x00_set_serdes_params() -
* @ha: HA context
@@ -3660,6 +3734,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
"Entered %s.\n", __func__);
+ if (IS_SHADOW_REG_CAPABLE(ha))
+ req->options |= BIT_13;
+
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = req->options;
mcp->mb[2] = MSW(LSD(req->dma));
@@ -3679,7 +3756,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
/* que in ptr index */
mcp->mb[8] = 0;
/* que out ptr index */
- mcp->mb[9] = 0;
+ mcp->mb[9] = *req->out_ptr = 0;
mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
@@ -3688,7 +3765,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
mcp->in_mb |= MBX_1;
- if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
mcp->out_mb |= MBX_15;
/* debug q create issue in SR-IOV */
mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -3697,7 +3774,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
WRT_REG_DWORD(req->req_q_in, 0);
- if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
WRT_REG_DWORD(req->req_q_out, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3726,6 +3803,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
"Entered %s.\n", __func__);
+ if (IS_SHADOW_REG_CAPABLE(ha))
+ rsp->options |= BIT_13;
+
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = rsp->options;
mcp->mb[2] = MSW(LSD(rsp->dma));
@@ -3740,7 +3820,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mcp->mb[4] = rsp->id;
/* que in ptr index */
- mcp->mb[8] = 0;
+ mcp->mb[8] = *rsp->in_ptr = 0;
/* que out ptr index */
mcp->mb[9] = 0;
mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f0a852257f99..89998244f48d 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 0aaf6a9c87d3..abeb3901498b 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -527,21 +527,63 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
int i, core;
uint32_t cnt;
+ uint32_t reg_val;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
+
+ /* stop the XOR DMA engines */
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
+
+ /* stop the IDMA engines */
+ reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
+ reg_val &= ~(1<<12);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
+
+ reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
+ reg_val &= ~(1<<12);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
+
+ reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
+ reg_val &= ~(1<<12);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
+
+ reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
+ reg_val &= ~(1<<12);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
+
+ for (i = 0; i < 100000; i++) {
+ if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
+ (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
+ break;
+ udelay(100);
+ }
/* Set all 4 cores in reset */
for (i = 0; i < 4; i++) {
QLAFX00_SET_HBA_SOC_REG(ha,
(SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
- }
-
- /* Set all 4 core Clock gating control */
- for (i = 0; i < 4; i++) {
QLAFX00_SET_HBA_SOC_REG(ha,
(SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
}
/* Reset all units in Fabric */
- QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
+
+ /* */
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
+
+ /* Set all 4 core Memory Power Down Registers */
+ for (i = 0; i < 5; i++) {
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0));
+ }
/* Reset all interrupt control registers */
for (i = 0; i < 115; i++) {
@@ -564,20 +606,19 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
/* Kick in Fabric units */
QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
/* Kick in Core0 to start boot process */
QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
/* Wait 10secs for soft-reset to complete. */
for (cnt = 10; cnt; cnt--) {
msleep(1000);
barrier();
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
/**
@@ -597,7 +638,6 @@ qlafx00_soft_reset(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
qlafx00_soc_cpu_reset(vha);
- ha->isp_ops->enable_intrs(ha);
}
/**
@@ -2675,7 +2715,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
uint16_t lreq_q_out = 0;
lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
- lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out);
+ lreq_q_out = rsp->ring_index;
while (lreq_q_in != lreq_q_out) {
lptr = rsp->ring_ptr;
@@ -3426,7 +3466,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
sp->fcport->vha, 0x3047,
(uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
- memcpy((void *)pfxiocb, &fx_iocb,
+ memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
sizeof(struct fxdisc_entry_fx00));
wmb();
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index e529dfaeb854..aeaa1b40b1fc 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -351,6 +351,7 @@ struct config_info_data {
#define SOC_FABRIC_RST_CONTROL_REG 0x0020840
#define SOC_FABRIC_CONTROL_REG 0x0020200
#define SOC_FABRIC_CONFIG_REG 0x0020204
+#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG 0x001820C
#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00
#define SOC_CORE_TIMER_REG 0x0021850
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 5511e24b1f11..58f3c912d96e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -848,6 +848,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
{
int done = 0, timeout = 0;
uint32_t lock_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (!done) {
/* acquire semaphore2 from PCI HW block */
@@ -856,17 +857,21 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
break;
if (timeout >= qla82xx_rom_lock_timeout) {
lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+ ql_log(ql_log_warn, vha, 0xb157,
+ "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
+ __func__, ha->portnum, lock_owner);
return -1;
}
timeout++;
}
- qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+ qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum);
return 0;
}
static void
qla82xx_rom_unlock(struct qla_hw_data *ha)
{
+ qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff);
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
}
@@ -950,6 +955,7 @@ static int
qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
{
int ret, loops = 0;
+ uint32_t lock_owner = 0;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
@@ -958,8 +964,10 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
loops++;
}
if (loops >= 50000) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
ql_log(ql_log_fatal, vha, 0x00b9,
- "Failed to acquire SEM2 lock.\n");
+ "Failed to acquire SEM2 lock, Lock Owner %u.\n",
+ lock_owner);
return -1;
}
ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -1057,6 +1065,7 @@ static int
ql82xx_rom_lock_d(struct qla_hw_data *ha)
{
int loops = 0;
+ uint32_t lock_owner = 0;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
@@ -1065,8 +1074,9 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
loops++;
}
if (loops >= 50000) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
ql_log(ql_log_warn, vha, 0xb010,
- "ROM lock failed.\n");
+ "ROM lock failed, Lock Owner %u.\n", lock_owner);
return -1;
}
return 0;
@@ -2811,12 +2821,14 @@ static void
qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ uint32_t lock_owner = 0;
- if (qla82xx_rom_lock(ha))
+ if (qla82xx_rom_lock(ha)) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
/* Someone else is holding the lock. */
ql_log(ql_log_info, vha, 0xb022,
- "Resetting rom_lock.\n");
-
+ "Resetting rom_lock, Lock Owner %u.\n", lock_owner);
+ }
/*
* Either we got the lock, or someone
* else died while holding it.
@@ -2840,47 +2852,30 @@ static int
qla82xx_device_bootstrap(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
- int i, timeout;
+ int i;
uint32_t old_count, count;
struct qla_hw_data *ha = vha->hw;
- int need_reset = 0, peg_stuck = 1;
+ int need_reset = 0;
need_reset = qla82xx_need_reset(ha);
- old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
-
- for (i = 0; i < 10; i++) {
- timeout = msleep_interruptible(200);
- if (timeout) {
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA8XXX_DEV_FAILED);
- return QLA_FUNCTION_FAILED;
- }
-
- count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
- if (count != old_count)
- peg_stuck = 0;
- }
-
if (need_reset) {
/* We are trying to perform a recovery here. */
- if (peg_stuck)
+ if (ha->flags.isp82xx_fw_hung)
qla82xx_rom_lock_recovery(ha);
- goto dev_initialize;
} else {
- /* Start of day for this ha context. */
- if (peg_stuck) {
- /* Either we are the first or recovery in progress. */
- qla82xx_rom_lock_recovery(ha);
- goto dev_initialize;
- } else
- /* Firmware already running. */
- goto dev_ready;
+ old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ for (i = 0; i < 10; i++) {
+ msleep(200);
+ count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ if (count != old_count) {
+ rval = QLA_SUCCESS;
+ goto dev_ready;
+ }
+ }
+ qla82xx_rom_lock_recovery(ha);
}
- return rval;
-
-dev_initialize:
/* set to DEV_INITIALIZING */
ql_log(ql_log_info, vha, 0x009e,
"HW State: INITIALIZING.\n");
@@ -3142,18 +3137,18 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
if (ql2xmdenable) {
if (!ha->fw_dumped) {
- if (fw_major_version != ha->fw_major_version ||
+ if ((fw_major_version != ha->fw_major_version ||
fw_minor_version != ha->fw_minor_version ||
- fw_subminor_version != ha->fw_subminor_version) {
+ fw_subminor_version != ha->fw_subminor_version) ||
+ (ha->prev_minidump_failed)) {
ql_dbg(ql_dbg_p3p, vha, 0xb02d,
- "Firmware version differs "
- "Previous version: %d:%d:%d - "
- "New version: %d:%d:%d\n",
+ "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n",
fw_major_version, fw_minor_version,
fw_subminor_version,
ha->fw_major_version,
ha->fw_minor_version,
- ha->fw_subminor_version);
+ ha->fw_subminor_version,
+ ha->prev_minidump_failed);
/* Release MiniDump resources */
qla82xx_md_free(vha);
/* ALlocate MiniDump resources */
@@ -3682,8 +3677,10 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- if (!sp->u.scmd.ctx ||
- (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
+ if ((!sp->u.scmd.ctx ||
+ (sp->flags &
+ SRB_FCP_CMND_DMA_VALID)) &&
+ !ha->flags.isp82xx_fw_hung) {
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 1bb93dbbccbb..59c477883a73 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -333,9 +333,6 @@
#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
-/* Lock IDs for ROM lock */
-#define ROM_LOCK_DRIVER 0x0d417340
-
#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
#define QLA82XX_PCI_CRB_WINDOW(A) \
(QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
@@ -1186,6 +1183,7 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF)
#define qla82xx_get_temp_state(x) ((x) & 0xffff)
#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 86cf10815db0..da9e3902f219 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1,17 +1,20 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include <linux/vmalloc.h>
+#include <linux/delay.h>
#include "qla_def.h"
#include "qla_gbl.h"
#include <linux/delay.h>
+#define TIMEOUT_100_MS 100
+
/* 8044 Flash Read/Write functions */
uint32_t
qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
@@ -117,6 +120,95 @@ qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
qla8044_wr_reg_indirect(vha, waddr, value);
}
+static int
+qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
+ uint32_t mask)
+{
+ unsigned long timeout;
+ uint32_t temp;
+
+ /* jiffies after 100ms */
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+ do {
+ qla8044_rd_reg_indirect(vha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ if (time_after_eq(jiffies, timeout)) {
+ ql_log(ql_log_warn, vha, 0xb151,
+ "Error in processing rdmdio entry\n");
+ return -1;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static uint32_t
+qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
+ uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
+{
+ uint32_t temp;
+ int ret = 0;
+
+ ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+ if (ret == -1)
+ return -1;
+
+ temp = (0x40000000 | addr);
+ qla8044_wr_reg_indirect(vha, addr1, temp);
+
+ ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+ if (ret == -1)
+ return 0;
+
+ qla8044_rd_reg_indirect(vha, addr3, &ret);
+
+ return ret;
+}
+
+
+static int
+qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
+ uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
+{
+ unsigned long timeout;
+ uint32_t temp;
+
+ /* jiffies after 100 msecs */
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+ do {
+ temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
+ if ((temp & 0x1) != 1)
+ break;
+ if (time_after_eq(jiffies, timeout)) {
+ ql_log(ql_log_warn, vha, 0xb152,
+ "Error in processing mdiobus idle\n");
+ return -1;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int
+qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
+ uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
+{
+ int ret = 0;
+
+ ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+ if (ret == -1)
+ return -1;
+
+ qla8044_wr_reg_indirect(vha, addr3, value);
+ qla8044_wr_reg_indirect(vha, addr1, addr);
+
+ ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+ if (ret == -1)
+ return -1;
+
+ return 0;
+}
/*
* qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
* Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
@@ -356,8 +448,8 @@ qla8044_flash_lock(scsi_qla_host_t *vha)
lock_owner = qla8044_rd_reg(ha,
QLA8044_FLASH_LOCK_ID);
ql_log(ql_log_warn, vha, 0xb113,
- "%s: flash lock by %d failed, held by %d\n",
- __func__, ha->portnum, lock_owner);
+ "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
+ __func__, ha->portnum, lock_owner);
ret_val = QLA_FUNCTION_FAILED;
break;
}
@@ -1541,7 +1633,7 @@ static void
qla8044_need_reset_handler(struct scsi_qla_host *vha)
{
uint32_t dev_state = 0, drv_state, drv_active;
- unsigned long reset_timeout, dev_init_timeout;
+ unsigned long reset_timeout;
struct qla_hw_data *ha = vha->hw;
ql_log(ql_log_fatal, vha, 0xb0c2,
@@ -1555,84 +1647,78 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
qla8044_idc_lock(ha);
}
+ dev_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_STATE_INDEX);
drv_state = qla8044_rd_direct(vha,
QLA8044_CRB_DRV_STATE_INDEX);
drv_active = qla8044_rd_direct(vha,
QLA8044_CRB_DRV_ACTIVE_INDEX);
ql_log(ql_log_info, vha, 0xb0c5,
- "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
- __func__, vha->host_no, drv_state, drv_active);
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
+ __func__, vha->host_no, drv_state, drv_active, dev_state);
- if (!ha->flags.nic_core_reset_owner) {
- ql_dbg(ql_dbg_p3p, vha, 0xb0c3,
- "%s(%ld): reset acknowledged\n",
- __func__, vha->host_no);
- qla8044_set_rst_ready(vha);
+ qla8044_set_rst_ready(vha);
- /* Non-reset owners ACK Reset and wait for device INIT state
- * as part of Reset Recovery by Reset Owner
- */
- dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+ /* wait for 10 seconds for reset ack from all functions */
+ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
- do {
- if (time_after_eq(jiffies, dev_init_timeout)) {
- ql_log(ql_log_info, vha, 0xb0c4,
- "%s: Non Reset owner: Reset Ack Timeout!\n",
- __func__);
- break;
- }
+ do {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ ql_log(ql_log_info, vha, 0xb0c4,
+ "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
+ __func__, ha->portnum, drv_state, drv_active);
+ break;
+ }
- qla8044_idc_unlock(ha);
- msleep(1000);
- qla8044_idc_lock(ha);
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
- dev_state = qla8044_rd_direct(vha,
- QLA8044_CRB_DEV_STATE_INDEX);
- } while (((drv_state & drv_active) != drv_active) &&
- (dev_state == QLA8XXX_DEV_NEED_RESET));
+ dev_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_STATE_INDEX);
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ } while (((drv_state & drv_active) != drv_active) &&
+ (dev_state == QLA8XXX_DEV_NEED_RESET));
+
+ /* Remove IDC participation of functions not acknowledging */
+ if (drv_state != drv_active) {
+ ql_log(ql_log_info, vha, 0xb0c7,
+ "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
+ __func__, vha->host_no, ha->portnum,
+ (drv_active ^ drv_state));
+ drv_active = drv_active & drv_state;
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
+ drv_active);
} else {
- qla8044_set_rst_ready(vha);
-
- /* wait for 10 seconds for reset ack from all functions */
- reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
-
- while ((drv_state & drv_active) != drv_active) {
- if (time_after_eq(jiffies, reset_timeout)) {
- ql_log(ql_log_info, vha, 0xb0c6,
- "%s: RESET TIMEOUT!"
- "drv_state: 0x%08x, drv_active: 0x%08x\n",
- QLA2XXX_DRIVER_NAME, drv_state, drv_active);
- break;
- }
-
- qla8044_idc_unlock(ha);
- msleep(1000);
- qla8044_idc_lock(ha);
-
- drv_state = qla8044_rd_direct(vha,
- QLA8044_CRB_DRV_STATE_INDEX);
- drv_active = qla8044_rd_direct(vha,
- QLA8044_CRB_DRV_ACTIVE_INDEX);
- }
-
- if (drv_state != drv_active) {
- ql_log(ql_log_info, vha, 0xb0c7,
- "%s(%ld): Reset_owner turning off drv_active "
- "of non-acking function 0x%x\n", __func__,
- vha->host_no, (drv_active ^ drv_state));
- drv_active = drv_active & drv_state;
- qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
- drv_active);
+ /*
+ * Reset owner should execute reset recovery,
+ * if all functions acknowledged
+ */
+ if ((ha->flags.nic_core_reset_owner) &&
+ (dev_state == QLA8XXX_DEV_NEED_RESET)) {
+ ha->flags.nic_core_reset_owner = 0;
+ qla8044_device_bootstrap(vha);
+ return;
}
+ }
- /*
- * Clear RESET OWNER, will be set at next reset
- * by next RST_OWNER
- */
+ /* Exit if non active function */
+ if (!(drv_active & (1 << ha->portnum))) {
ha->flags.nic_core_reset_owner = 0;
+ return;
+ }
- /* Start Reset Recovery */
+ /*
+ * Execute Reset Recovery if Reset Owner or Function 7
+ * is the only active function
+ */
+ if (ha->flags.nic_core_reset_owner ||
+ ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
+ ha->flags.nic_core_reset_owner = 0;
qla8044_device_bootstrap(vha);
}
}
@@ -1655,6 +1741,19 @@ qla8044_set_drv_active(struct scsi_qla_host *vha)
qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
}
+static int
+qla8044_check_drv_active(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ if (drv_active & (1 << ha->portnum))
+ return QLA_SUCCESS;
+ else
+ return QLA_TEST_FAILED;
+}
+
static void
qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
{
@@ -1837,14 +1936,16 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
while (1) {
if (time_after_eq(jiffies, dev_init_timeout)) {
- ql_log(ql_log_warn, vha, 0xb0cf,
- "%s: Device Init Failed 0x%x = %s\n",
- QLA2XXX_DRIVER_NAME, dev_state,
- dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
-
- qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
- QLA8XXX_DEV_FAILED);
+ if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb0cf,
+ "%s: Device Init Failed 0x%x = %s\n",
+ QLA2XXX_DRIVER_NAME, dev_state,
+ dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+ qla8044_wr_direct(vha,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ }
}
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
@@ -2017,6 +2118,13 @@ qla8044_watchdog(struct scsi_qla_host *vha)
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+ if (qla8044_check_fw_alive(vha)) {
+ ha->flags.isp82xx_fw_hung = 1;
+ ql_log(ql_log_warn, vha, 0xb10a,
+ "Firmware hung.\n");
+ qla82xx_clear_pending_mbx(vha);
+ }
+
if (qla8044_check_temp(vha)) {
set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
ha->flags.isp82xx_fw_hung = 1;
@@ -2037,7 +2145,7 @@ qla8044_watchdog(struct scsi_qla_host *vha)
qla2xxx_wake_dpc(vha);
} else {
/* Check firmware health */
- if (qla8044_check_fw_alive(vha)) {
+ if (ha->flags.isp82xx_fw_hung) {
halt_status = qla8044_rd_direct(vha,
QLA8044_PEG_HALT_STATUS1_INDEX);
if (halt_status &
@@ -2073,12 +2181,8 @@ qla8044_watchdog(struct scsi_qla_host *vha)
__func__);
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
- qla82xx_clear_pending_mbx(vha);
}
}
- ha->flags.isp82xx_fw_hung = 1;
- ql_log(ql_log_warn, vha, 0xb10a,
- "Firmware hung.\n");
qla2xxx_wake_dpc(vha);
}
}
@@ -2286,8 +2390,6 @@ qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
}
if (j >= MAX_CTL_CHECK) {
- printk_ratelimited(KERN_ERR
- "%s: failed to read through agent\n", __func__);
write_unlock_irqrestore(&ha->hw_lock, flags);
return QLA_SUCCESS;
}
@@ -2882,6 +2984,231 @@ error_exit:
return rval;
}
+static uint32_t
+qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ int loop_cnt;
+ uint32_t addr1, addr2, value, data, temp, wrVal;
+ uint8_t stride, stride2;
+ uint16_t count;
+ uint32_t poll, mask, data_size, modify_mask;
+ uint32_t wait_count = 0;
+
+ uint32_t *data_ptr = *d_ptr;
+
+ struct qla8044_minidump_entry_rddfe *rddfe;
+ rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
+
+ addr1 = rddfe->addr_1;
+ value = rddfe->value;
+ stride = rddfe->stride;
+ stride2 = rddfe->stride2;
+ count = rddfe->count;
+
+ poll = rddfe->poll;
+ mask = rddfe->mask;
+ modify_mask = rddfe->modify_mask;
+ data_size = rddfe->data_size;
+
+ addr2 = addr1 + stride;
+
+ for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
+ qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql_log(ql_log_warn, vha, 0xb153,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ } else {
+ qla8044_rd_reg_indirect(vha, addr2, &temp);
+ temp = temp & modify_mask;
+ temp = (temp | ((loop_cnt << 16) | loop_cnt));
+ wrVal = ((temp << 16) | temp);
+
+ qla8044_wr_reg_indirect(vha, addr2, wrVal);
+ qla8044_wr_reg_indirect(vha, addr1, value);
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+ if (wait_count == poll) {
+ ql_log(ql_log_warn, vha, 0xb154,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+
+ qla8044_wr_reg_indirect(vha, addr1,
+ ((0x40000000 | value) + stride2));
+ wait_count = 0;
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql_log(ql_log_warn, vha, 0xb155,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+
+ qla8044_rd_reg_indirect(vha, addr2, &data);
+
+ *data_ptr++ = wrVal;
+ *data_ptr++ = data;
+ }
+
+ }
+
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+
+error:
+ return -1;
+
+}
+
+static uint32_t
+qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ int ret = 0;
+ uint32_t addr1, addr2, value1, value2, data, selVal;
+ uint8_t stride1, stride2;
+ uint32_t addr3, addr4, addr5, addr6, addr7;
+ uint16_t count, loop_cnt;
+ uint32_t poll, mask;
+ uint32_t *data_ptr = *d_ptr;
+
+ struct qla8044_minidump_entry_rdmdio *rdmdio;
+
+ rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
+
+ addr1 = rdmdio->addr_1;
+ addr2 = rdmdio->addr_2;
+ value1 = rdmdio->value_1;
+ stride1 = rdmdio->stride_1;
+ stride2 = rdmdio->stride_2;
+ count = rdmdio->count;
+
+ poll = rdmdio->poll;
+ mask = rdmdio->mask;
+ value2 = rdmdio->value_2;
+
+ addr3 = addr1 + stride1;
+
+ for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
+ ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
+ addr3, mask);
+ if (ret == -1)
+ goto error;
+
+ addr4 = addr2 - stride1;
+ ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
+ value2);
+ if (ret == -1)
+ goto error;
+
+ addr5 = addr2 - (2 * stride1);
+ ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
+ value1);
+ if (ret == -1)
+ goto error;
+
+ addr6 = addr2 - (3 * stride1);
+ ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
+ addr6, 0x2);
+ if (ret == -1)
+ goto error;
+
+ ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
+ addr3, mask);
+ if (ret == -1)
+ goto error;
+
+ addr7 = addr2 - (4 * stride1);
+ data = qla8044_ipmdio_rd_reg(vha, addr1, addr3,
+ mask, addr7);
+ if (data == -1)
+ goto error;
+
+ selVal = (value2 << 18) | (value1 << 2) | 2;
+
+ stride2 = rdmdio->stride_2;
+ *data_ptr++ = selVal;
+ *data_ptr++ = data;
+
+ value1 = value1 + stride2;
+ *d_ptr = data_ptr;
+ }
+
+ return 0;
+
+error:
+ return -1;
+}
+
+static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+ uint32_t wait_count = 0;
+ struct qla8044_minidump_entry_pollwr *pollwr_hdr;
+
+ pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
+ addr1 = pollwr_hdr->addr_1;
+ addr2 = pollwr_hdr->addr_2;
+ value1 = pollwr_hdr->value_1;
+ value2 = pollwr_hdr->value_2;
+
+ poll = pollwr_hdr->poll;
+ mask = pollwr_hdr->mask;
+
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &r_value);
+
+ if ((r_value & poll) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+
+ qla8044_wr_reg_indirect(vha, addr2, value2);
+ qla8044_wr_reg_indirect(vha, addr1, value1);
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &r_value);
+
+ if ((r_value & poll) != 0)
+ break;
+ wait_count++;
+ }
+
+ return QLA_SUCCESS;
+
+error:
+ return -1;
+}
+
/*
*
* qla8044_collect_md_data - Retrieve firmware minidump data.
@@ -3089,6 +3416,24 @@ qla8044_collect_md_data(struct scsi_qla_host *vha)
if (rval != QLA_SUCCESS)
qla8044_mark_entry_skipped(vha, entry_hdr, i);
break;
+ case QLA8044_RDDFE:
+ rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA8044_RDMDIO:
+ rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA8044_POLLWR:
+ rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
case QLA82XX_RDNOP:
default:
qla8044_mark_entry_skipped(vha, entry_hdr, i);
@@ -3110,6 +3455,7 @@ skip_nxt_entry:
"Dump data mismatch: Data collected: "
"[0x%x], total_data_size:[0x%x]\n",
data_collected, ha->md_dump_size);
+ rval = QLA_FUNCTION_FAILED;
goto md_failed;
}
@@ -3134,10 +3480,12 @@ qla8044_get_minidump(struct scsi_qla_host *vha)
if (!qla8044_collect_md_data(vha)) {
ha->fw_dumped = 1;
+ ha->prev_minidump_failed = 0;
} else {
ql_log(ql_log_fatal, vha, 0xb0db,
"%s: Unable to collect minidump\n",
__func__);
+ ha->prev_minidump_failed = 1;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 2ab2eabab908..ada36057d7cd 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -133,6 +133,7 @@
#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
#define QLA8044_LINK_SPEED_FACTOR 10
+#define QLA8044_FUN7_ACTIVE_INDEX 0x80
/* FLASH API Defines */
#define QLA8044_FLASH_MAX_WAIT_USEC 100
@@ -431,6 +432,50 @@ struct qla8044_minidump_entry_pollrd {
uint32_t rsvd_1;
} __packed;
+struct qla8044_minidump_entry_rddfe {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t value;
+ uint8_t stride;
+ uint8_t stride2;
+ uint16_t count;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t modify_mask;
+ uint32_t data_size;
+ uint32_t rsvd;
+
+} __packed;
+
+struct qla8044_minidump_entry_rdmdio {
+ struct qla8044_minidump_entry_hdr h;
+
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint8_t stride_1;
+ uint8_t stride_2;
+ uint16_t count;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t value_2;
+ uint32_t data_size;
+
+} __packed;
+
+struct qla8044_minidump_entry_pollwr {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t data_size;
+ uint32_t rsvd;
+
+} __packed;
+
/* RDMUX2 Entry */
struct qla8044_minidump_entry_rdmux2 {
struct qla8044_minidump_entry_hdr h;
@@ -516,6 +561,9 @@ static const uint32_t qla8044_reg_tbl[] = {
#define QLA8044_DBG_RSVD_ARRAY_LEN 8
#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16
#define QLA8044_SS_PCI_INDEX 0
+#define QLA8044_RDDFE 38
+#define QLA8044_RDMDIO 39
+#define QLA8044_POLLWR 40
struct qla8044_minidump_template_hdr {
uint32_t entry_type;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index afc84814e9bb..d96bfb55e57b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -616,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
- qla2x00_clean_dsd_pool(ha, sp);
+ qla2x00_clean_dsd_pool(ha, sp, NULL);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
@@ -781,7 +781,7 @@ static int
qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
{
#define ABORT_POLLING_PERIOD 1000
-#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
+#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
unsigned long wait_iter = ABORT_WAIT_ITER;
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
struct qla_hw_data *ha = vha->hw;
@@ -844,11 +844,8 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
}
/*
- * qla2x00_wait_for_reset_ready
- * Wait till the HBA is online after going through
- * <= MAX_RETRIES_OF_ISP_ABORT or
- * finally HBA is disabled ie marked offline or flash
- * operations are in progress.
+ * qla2x00_wait_for_hba_ready
+ * Wait till the HBA is ready before doing driver unload
*
* Input:
* ha - pointer to host adapter structure
@@ -857,35 +854,15 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
* Does context switching-Release SPIN_LOCK
* (if any) before calling this routine.
*
- * Return:
- * Success (Adapter is online/no flash ops) : 0
- * Failed (Adapter is offline/disabled/flash ops in progress) : 1
*/
-static int
-qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
+static void
+qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
{
- int return_status;
- unsigned long wait_online;
struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
- while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
- test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
- ha->optrom_state != QLA_SWAITING ||
- ha->dpc_active) && time_before(jiffies, wait_online))
+ while ((!(vha->flags.online) || ha->dpc_active ||
+ ha->flags.mbox_busy))
msleep(1000);
-
- if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
- return_status = QLA_SUCCESS;
- else
- return_status = QLA_FUNCTION_FAILED;
-
- ql_dbg(ql_dbg_taskm, vha, 0x8019,
- "%s return status=%d.\n", __func__, return_status);
-
- return return_status;
}
int
@@ -945,7 +922,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
int ret;
unsigned int id, lun;
unsigned long flags;
- int wait = 0;
+ int rval, wait = 0;
struct qla_hw_data *ha = vha->hw;
if (!CMD_SP(cmd))
@@ -974,10 +951,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
sp_get(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- ret = FAILED;
+ rval = ha->isp_ops->abort_command(sp);
+ if (rval) {
+ if (rval == QLA_FUNCTION_PARAMETER_ERROR) {
+ /*
+ * Decrement the ref_count since we can't find the
+ * command
+ */
+ atomic_dec(&sp->ref_count);
+ ret = SUCCESS;
+ } else
+ ret = FAILED;
+
ql_dbg(ql_dbg_taskm, vha, 0x8003,
- "Abort command mbx failed cmd=%p.\n", cmd);
+ "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
} else {
ql_dbg(ql_dbg_taskm, vha, 0x8004,
"Abort command mbx success cmd=%p.\n", cmd);
@@ -985,6 +972,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
+ /*
+ * Clear the slot in the oustanding_cmds array if we can't find the
+ * command to reclaim the resources.
+ */
+ if (rval == QLA_FUNCTION_PARAMETER_ERROR)
+ vha->req->outstanding_cmds[sp->handle] = NULL;
sp->done(ha, sp, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1236,7 +1229,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
ql_log(ql_log_info, vha, 0x8018,
"ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
- if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
+ /*
+ * No point in issuing another reset if one is active. Also do not
+ * attempt a reset if we are updating flash.
+ */
+ if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
goto eh_host_reset_lock;
if (vha != base_vha) {
@@ -2270,6 +2267,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2271:
+ ha->device_type |= DT_ISP2271;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
}
if (IS_QLA82XX(ha))
@@ -2346,7 +2350,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2877,6 +2882,7 @@ skip_dpc:
base_vha->flags.init_done = 1;
base_vha->flags.online = 1;
+ ha->prev_minidump_failed = 0;
ql_dbg(ql_dbg_init, base_vha, 0x00f2,
"Init done and hba is online.\n");
@@ -3136,6 +3142,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
base_vha = pci_get_drvdata(pdev);
ha = base_vha->hw;
+ qla2x00_wait_for_hba_ready(base_vha);
+
set_bit(UNLOADING, &base_vha->dpc_flags);
if (IS_QLAFX00(ha))
@@ -3645,6 +3653,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
ha->eft = NULL;
ha->eft_dma = 0;
ha->fw_dumped = 0;
+ ha->fw_dump_cap_flags = 0;
ha->fw_dump_reading = 0;
ha->fw_dump = NULL;
ha->fw_dump_len = 0;
@@ -4913,12 +4922,13 @@ qla2x00_do_dpc(void *data)
if (qlafx00_reset_initialize(base_vha)) {
/* Failed. Abort isp later. */
if (!test_bit(UNLOADING,
- &base_vha->dpc_flags))
+ &base_vha->dpc_flags)) {
set_bit(ISP_UNRECOVERABLE,
&base_vha->dpc_flags);
ql_dbg(ql_dbg_dpc, base_vha,
0x4021,
"Reset Recovery Failed\n");
+ }
}
}
@@ -5077,8 +5087,10 @@ intr_on_check:
ha->isp_ops->enable_intrs(ha);
if (test_and_clear_bit(BEACON_BLINK_NEEDED,
- &base_vha->dpc_flags))
- ha->isp_ops->beacon_blink(base_vha);
+ &base_vha->dpc_flags)) {
+ if (ha->beacon_blink_led == 1)
+ ha->isp_ops->beacon_blink(base_vha);
+ }
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -5325,7 +5337,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP82XX 7
#define FW_ISP2031 8
#define FW_ISP8031 9
-#define FW_ISP2071 10
+#define FW_ISP27XX 10
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -5337,7 +5349,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP82XX "ql8200_fw.bin"
#define FW_FILE_ISP2031 "ql2600_fw.bin"
#define FW_FILE_ISP8031 "ql8300_fw.bin"
-#define FW_FILE_ISP2071 "ql2700_fw.bin"
+#define FW_FILE_ISP27XX "ql2700_fw.bin"
static DEFINE_MUTEX(qla_fw_lock);
@@ -5353,7 +5365,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP82XX, },
{ .name = FW_FILE_ISP2031, },
{ .name = FW_FILE_ISP8031, },
- { .name = FW_FILE_ISP2071, },
+ { .name = FW_FILE_ISP27XX, },
};
struct fw_blob *
@@ -5382,8 +5394,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP2031];
} else if (IS_QLA8031(ha)) {
blob = &qla_fw_blobs[FW_ISP8031];
- } else if (IS_QLA2071(ha)) {
- blob = &qla_fw_blobs[FW_ISP2071];
+ } else if (IS_QLA27XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP27XX];
} else {
return NULL;
}
@@ -5714,6 +5726,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 46ef0ac48f44..2fb7ebfbbc38 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index f28123e8ed65..bca173e56f16 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1727,11 +1727,8 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
if (IS_QLA2031(ha)) {
led_select_value = qla83xx_select_led_port(ha);
- qla83xx_wr_reg(vha, led_select_value, 0x40002000);
- qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000);
- msleep(1000);
- qla83xx_wr_reg(vha, led_select_value, 0x40004000);
- qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000);
+ qla83xx_wr_reg(vha, led_select_value, 0x40000230);
+ qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230);
} else if (IS_QLA8031(ha)) {
led_select_value = qla83xx_select_led_port(ha);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0cb73074c199..e632e14180cf 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
/*
* Global Variables
*/
-static struct kmem_cache *qla_tgt_cmd_cachep;
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
static mempool_t *qla_tgt_mgmt_cmd_mempool;
static struct workqueue_struct *qla_tgt_wq;
@@ -182,6 +181,11 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
{
+ ql_dbg(ql_dbg_tgt, vha, 0xe072,
+ "%s: qla_target(%d): type %x ox_id %04x\n",
+ __func__, vha->vp_idx, atio->u.raw.entry_type,
+ be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
+
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
{
@@ -236,6 +240,10 @@ void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
{
switch (pkt->entry_type) {
+ case CTIO_CRC2:
+ ql_dbg(ql_dbg_tgt, vha, 0xe073,
+ "qla_target(%d):%s: CRC2 Response pkt\n",
+ vha->vp_idx, __func__);
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -1120,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
ctio->u.status1.flags =
__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
CTIO7_FLAGS_TERMINATE);
- ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+ ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
qla2x00_start_iocbs(vha, vha->req);
@@ -1254,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
{
struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
struct ctio7_to_24xx *ctio;
+ uint16_t temp;
ql_dbg(ql_dbg_tgt, ha, 0xe008,
"Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
@@ -1284,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
CTIO7_FLAGS_SEND_STATUS);
- ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ ctio->u.status1.ox_id = cpu_to_le16(temp);
ctio->u.status1.scsi_status =
__constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
ctio->u.status1.response_len = __constant_cpu_to_le16(8);
@@ -1350,13 +1360,42 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
prm->cmd->sg_mapped = 1;
- /*
- * If greater than four sg entries then we need to allocate
- * the continuation entries
- */
- if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
- prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
- prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
+ if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
+ /*
+ * If greater than four sg entries then we need to allocate
+ * the continuation entries
+ */
+ if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+ prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+ prm->tgt->datasegs_per_cmd,
+ prm->tgt->datasegs_per_cont);
+ } else {
+ /* DIF */
+ if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
+ (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
+ prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
+ prm->tot_dsds = prm->seg_cnt;
+ } else
+ prm->tot_dsds = prm->seg_cnt;
+
+ if (cmd->prot_sg_cnt) {
+ prm->prot_sg = cmd->prot_sg;
+ prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
+ cmd->prot_sg, cmd->prot_sg_cnt,
+ cmd->dma_data_direction);
+ if (unlikely(prm->prot_seg_cnt == 0))
+ goto out_err;
+
+ if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
+ (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
+ /* Dif Bundling not support here */
+ prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
+ cmd->blk_sz);
+ prm->tot_dsds += prm->prot_seg_cnt;
+ } else
+ prm->tot_dsds += prm->prot_seg_cnt;
+ }
+ }
ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
prm->seg_cnt, prm->req_cnt);
@@ -1377,6 +1416,16 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
BUG_ON(!cmd->sg_mapped);
pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
cmd->sg_mapped = 0;
+
+ if (cmd->prot_sg_cnt)
+ pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+ cmd->dma_data_direction);
+
+ if (cmd->ctx_dsd_alloced)
+ qla2x00_clean_dsd_pool(ha, NULL, cmd);
+
+ if (cmd->ctx)
+ dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
}
static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
@@ -1466,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
struct ctio7_to_24xx *pkt;
struct qla_hw_data *ha = vha->hw;
struct atio_from_isp *atio = &prm->cmd->atio;
+ uint16_t temp;
pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
prm->pkt = pkt;
@@ -1494,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
pkt->exchange_addr = atio->u.isp24.exchange_addr;
pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
- pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ pkt->u.status0.ox_id = cpu_to_le16(temp);
pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
ql_dbg(ql_dbg_tgt, vha, 0xe00c,
"qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
- vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
- le16_to_cpu(pkt->u.status0.ox_id));
+ vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
return 0;
}
@@ -1665,8 +1715,9 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
- vha->vp_idx, cmd->tag);
+ ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
+ vha->vp_idx, cmd->tag,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
prm->cmd = cmd;
prm->tgt = tgt;
@@ -1902,6 +1953,328 @@ skip_explict_conf:
/* Sense with len > 24, is it possible ??? */
}
+
+
+/* diff */
+static inline int
+qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
+{
+ /*
+ * Uncomment when corresponding SCSI changes are done.
+ *
+ if (!sp->cmd->prot_chk)
+ return 0;
+ *
+ */
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_STRIP:
+ if (ql2xenablehba_err_chk >= 1)
+ return 1;
+ break;
+ case TARGET_PROT_DOUT_PASS:
+ case TARGET_PROT_DIN_PASS:
+ if (ql2xenablehba_err_chk >= 2)
+ return 1;
+ break;
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_STRIP:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/*
+ * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
+ *
+ */
+static inline void
+qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
+{
+ uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+
+ /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
+ * have been immplemented by TCM, before AppTag is avail.
+ * Look for modesense_handlers[]
+ */
+ ctx->app_tag = 0;
+ ctx->app_tag_mask[0] = 0x0;
+ ctx->app_tag_mask[1] = 0x0;
+
+ switch (se_cmd->prot_type) {
+ case TARGET_DIF_TYPE0_PROT:
+ /*
+ * No check for ql2xenablehba_err_chk, as it would be an
+ * I/O error if hba tag generation is not done.
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+
+ if (!qlt_hba_err_chk_enabled(se_cmd))
+ break;
+
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
+ /*
+ * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
+ * 16 bit app tag.
+ */
+ case TARGET_DIF_TYPE1_PROT:
+ ctx->ref_tag = cpu_to_le32(lba);
+
+ if (!qlt_hba_err_chk_enabled(se_cmd))
+ break;
+
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
+ /*
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
+ * match LBA in CDB + N
+ */
+ case TARGET_DIF_TYPE2_PROT:
+ ctx->ref_tag = cpu_to_le32(lba);
+
+ if (!qlt_hba_err_chk_enabled(se_cmd))
+ break;
+
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
+
+ /* For Type 3 protection: 16 bit GUARD only */
+ case TARGET_DIF_TYPE3_PROT:
+ ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+ ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+ break;
+ }
+}
+
+
+static inline int
+qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
+{
+ uint32_t *cur_dsd;
+ int sgc;
+ uint32_t transfer_length = 0;
+ uint32_t data_bytes;
+ uint32_t dif_bytes;
+ uint8_t bundling = 1;
+ uint8_t *clr_ptr;
+ struct crc_context *crc_ctx_pkt = NULL;
+ struct qla_hw_data *ha;
+ struct ctio_crc2_to_fw *pkt;
+ dma_addr_t crc_ctx_dma;
+ uint16_t fw_prot_opts = 0;
+ struct qla_tgt_cmd *cmd = prm->cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ uint32_t h;
+ struct atio_from_isp *atio = &prm->cmd->atio;
+ uint16_t t16;
+
+ sgc = 0;
+ ha = vha->hw;
+
+ pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
+ prm->pkt = pkt;
+ memset(pkt, 0, sizeof(*pkt));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe071,
+ "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
+ vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
+ prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
+
+ if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
+ (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
+ bundling = 0;
+
+ /* Compute dif len and adjust data len to incude protection */
+ data_bytes = cmd->bufflen;
+ dif_bytes = (data_bytes / cmd->blk_sz) * 8;
+
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_STRIP:
+ transfer_length = data_bytes;
+ data_bytes += dif_bytes;
+ break;
+
+ case TARGET_PROT_DIN_STRIP:
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ transfer_length = data_bytes + dif_bytes;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ if (!qlt_hba_err_chk_enabled(se_cmd))
+ fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+ /* HBA error checking enabled */
+ else if (IS_PI_UNINIT_CAPABLE(ha)) {
+ if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+ (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+ fw_prot_opts |= PO_DIS_VALD_APP_ESC;
+ else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+ fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+ }
+
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_INSERT:
+ fw_prot_opts |= PO_MODE_DIF_INSERT;
+ break;
+ case TARGET_PROT_DIN_STRIP:
+ case TARGET_PROT_DOUT_STRIP:
+ fw_prot_opts |= PO_MODE_DIF_REMOVE;
+ break;
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ fw_prot_opts |= PO_MODE_DIF_PASS;
+ /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
+ break;
+ default:/* Normal Request */
+ fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ }
+
+
+ /* ---- PKT ---- */
+ /* Update entry type to indicate Command Type CRC_2 IOCB */
+ pkt->entry_type = CTIO_CRC2;
+ pkt->entry_count = 1;
+ pkt->vp_index = vha->vp_idx;
+
+ h = qlt_make_handle(vha);
+ if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+ /*
+ * CTIO type 7 from the firmware doesn't provide a way to
+ * know the initiator's LOOP ID, hence we can't find
+ * the session and, so, the command.
+ */
+ return -EAGAIN;
+ } else
+ ha->tgt.cmds[h-1] = prm->cmd;
+
+
+ pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->nport_handle = prm->cmd->loop_id;
+ pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ pkt->exchange_addr = atio->u.isp24.exchange_addr;
+
+ /* silence compile warning */
+ t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ pkt->ox_id = cpu_to_le16(t16);
+
+ t16 = (atio->u.isp24.attr << 9);
+ pkt->flags |= cpu_to_le16(t16);
+ pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
+
+ /* Set transfer direction */
+ if (cmd->dma_data_direction == DMA_TO_DEVICE)
+ pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
+ else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
+ pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
+
+
+ pkt->dseg_count = prm->tot_dsds;
+ /* Fibre channel byte count */
+ pkt->transfer_length = cpu_to_le32(transfer_length);
+
+
+ /* ----- CRC context -------- */
+
+ /* Allocate CRC context from global pool */
+ crc_ctx_pkt = cmd->ctx =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
+
+ if (!crc_ctx_pkt)
+ goto crc_queuing_error;
+
+ /* Zero out CTX area. */
+ clr_ptr = (uint8_t *)crc_ctx_pkt;
+ memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
+
+ crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
+ INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
+
+ /* Set handle */
+ crc_ctx_pkt->handle = pkt->handle;
+
+ qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
+
+ pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
+ pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+ pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+
+
+ if (!bundling) {
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+ } else {
+ /*
+ * Configure Bundling if we need to fetch interlaving
+ * protection PCI accesses
+ */
+ fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
+ crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
+ crc_ctx_pkt->u.bundling.dseg_count =
+ cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+ }
+
+ /* Finish the common fields of CRC pkt */
+ crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
+ crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
+ crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
+ crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+
+
+ /* Walks data segments */
+ pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
+
+ if (!bundling && prm->prot_seg_cnt) {
+ if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
+ prm->tot_dsds, cmd))
+ goto crc_queuing_error;
+ } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
+ (prm->tot_dsds - prm->prot_seg_cnt), cmd))
+ goto crc_queuing_error;
+
+ if (bundling && prm->prot_seg_cnt) {
+ /* Walks dif segments */
+ pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
+
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+ if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
+ prm->prot_seg_cnt, cmd))
+ goto crc_queuing_error;
+ }
+ return QLA_SUCCESS;
+
+crc_queuing_error:
+ /* Cleanup will be performed by the caller */
+
+ return QLA_FUNCTION_FAILED;
+}
+
+
/*
* Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
* QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -1921,9 +2294,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
qlt_check_srr_debug(cmd, &xmit_type);
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
- "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
- "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
- 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
+ "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
+ (xmit_type & QLA_TGT_XMIT_STATUS) ?
+ 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
+ &cmd->se_cmd);
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
@@ -1941,7 +2315,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
if (unlikely(res))
goto out_unmap_unlock;
- res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
+ res = qlt_build_ctio_crc2_pkt(&prm, vha);
+ else
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
if (unlikely(res != 0))
goto out_unmap_unlock;
@@ -1953,7 +2330,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
__constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
CTIO7_FLAGS_STATUS_MODE_0);
- qlt_load_data_segments(&prm, vha);
+ if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
+ qlt_load_data_segments(&prm, vha);
if (prm.add_status_pkt == 0) {
if (xmit_type & QLA_TGT_XMIT_STATUS) {
@@ -1983,8 +2361,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
ql_dbg(ql_dbg_tgt, vha, 0xe019,
"Building additional status packet\n");
+ /*
+ * T10Dif: ctio_crc2_to_fw overlay ontop of
+ * ctio7_to_24xx
+ */
memcpy(ctio, pkt, sizeof(*ctio));
+ /* reset back to CTIO7 */
ctio->entry_count = 1;
+ ctio->entry_type = CTIO_TYPE7;
ctio->dseg_count = 0;
ctio->u.status1.flags &= ~__constant_cpu_to_le16(
CTIO7_FLAGS_DATA_IN);
@@ -1993,6 +2377,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
pkt->u.status0.flags |= __constant_cpu_to_le16(
CTIO7_FLAGS_DONT_RET_CTIO);
+
+ /* qlt_24xx_init_ctio_to_isp will correct
+ * all neccessary fields that's part of CTIO7.
+ * There should be no residual of CTIO-CRC2 data.
+ */
qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
&prm);
pr_debug("Status CTIO7: %p\n", ctio);
@@ -2041,8 +2430,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
return -EIO;
- ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
- (int)vha->vp_idx);
+ ql_dbg(ql_dbg_tgt, vha, 0xe01b,
+ "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
+ __func__, (int)vha->vp_idx, &cmd->se_cmd,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
/* Calculate number of entries and segments required */
if (qlt_pci_map_calc_cnt(&prm) != 0)
@@ -2054,14 +2445,19 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
res = qlt_check_reserve_free_req(vha, prm.req_cnt);
if (res != 0)
goto out_unlock_free_unmap;
+ if (cmd->se_cmd.prot_op)
+ res = qlt_build_ctio_crc2_pkt(&prm, vha);
+ else
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
- res = qlt_24xx_build_ctio_pkt(&prm, vha);
if (unlikely(res != 0))
goto out_unlock_free_unmap;
pkt = (struct ctio7_to_24xx *)prm.pkt;
pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
CTIO7_FLAGS_STATUS_MODE_0);
- qlt_load_data_segments(&prm, vha);
+
+ if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
+ qlt_load_data_segments(&prm, vha);
cmd->state = QLA_TGT_STATE_NEED_DATA;
@@ -2079,6 +2475,143 @@ out_unlock_free_unmap:
}
EXPORT_SYMBOL(qlt_rdy_to_xfer);
+
+/*
+ * Checks the guard or meta-data for the type of error
+ * detected by the HBA.
+ */
+static inline int
+qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
+ struct ctio_crc_from_fw *sts)
+{
+ uint8_t *ap = &sts->actual_dif[0];
+ uint8_t *ep = &sts->expected_dif[0];
+ uint32_t e_ref_tag, a_ref_tag;
+ uint16_t e_app_tag, a_app_tag;
+ uint16_t e_guard, a_guard;
+ uint64_t lba = cmd->se_cmd.t_task_lba;
+
+ a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
+ a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+ a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
+
+ e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
+ e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+ e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe075,
+ "iocb(s) %p Returned STATUS.\n", sts);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xf075,
+ "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
+ cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+ a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
+
+ /*
+ * Ignore sector if:
+ * For type 3: ref & app tag is all 'f's
+ * For type 0,1,2: app tag is all 'f's
+ */
+ if ((a_app_tag == 0xffff) &&
+ ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
+ (a_ref_tag == 0xffffffff))) {
+ uint32_t blocks_done;
+
+ /* 2TB boundary case covered automatically with this */
+ blocks_done = e_ref_tag - (uint32_t)lba + 1;
+ cmd->se_cmd.bad_sector = e_ref_tag;
+ cmd->se_cmd.pi_err = 0;
+ ql_dbg(ql_dbg_tgt, vha, 0xf074,
+ "need to return scsi good\n");
+
+ /* Update protection tag */
+ if (cmd->prot_sg_cnt) {
+ uint32_t i, j = 0, k = 0, num_ent;
+ struct scatterlist *sg, *sgl;
+
+
+ sgl = cmd->prot_sg;
+
+ /* Patch the corresponding protection tags */
+ for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
+ num_ent = sg_dma_len(sg) / 8;
+ if (k + num_ent < blocks_done) {
+ k += num_ent;
+ continue;
+ }
+ j = blocks_done - k - 1;
+ k = blocks_done;
+ break;
+ }
+
+ if (k != blocks_done) {
+ ql_log(ql_log_warn, vha, 0xf076,
+ "unexpected tag values tag:lba=%u:%llu)\n",
+ e_ref_tag, (unsigned long long)lba);
+ goto out;
+ }
+
+#if 0
+ struct sd_dif_tuple *spt;
+ /* TODO:
+ * This section came from initiator. Is it valid here?
+ * should ulp be override with actual val???
+ */
+ spt = page_address(sg_page(sg)) + sg->offset;
+ spt += j;
+
+ spt->app_tag = 0xffff;
+ if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
+ spt->ref_tag = 0xffffffff;
+#endif
+ }
+
+ return 0;
+ }
+
+ /* check guard */
+ if (e_guard != a_guard) {
+ cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
+
+ ql_log(ql_log_warn, vha, 0xe076,
+ "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
+ cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+ a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
+ a_guard, e_guard, cmd);
+ goto out;
+ }
+
+ /* check ref tag */
+ if (e_ref_tag != a_ref_tag) {
+ cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ cmd->se_cmd.bad_sector = e_ref_tag;
+
+ ql_log(ql_log_warn, vha, 0xe077,
+ "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
+ cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+ a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
+ a_guard, e_guard, cmd);
+ goto out;
+ }
+
+ /* check appl tag */
+ if (e_app_tag != a_app_tag) {
+ cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+ cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
+
+ ql_log(ql_log_warn, vha, 0xe078,
+ "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
+ cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+ a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
+ a_guard, e_guard, cmd);
+ goto out;
+ }
+out:
+ return 1;
+}
+
+
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
@@ -2089,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
int ret = 0;
+ uint16_t temp;
ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
@@ -2125,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
CTIO7_FLAGS_TERMINATE);
- ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.ox_id = cpu_to_le16(temp);
/* Most likely, it isn't needed */
ctio24->u.status1.residual = get_unaligned((uint32_t *)
@@ -2155,21 +2690,46 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
rc = __qlt_send_term_exchange(vha, cmd, atio);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
done:
- if (rc == 1) {
+ /*
+ * Terminate exchange will tell fw to release any active CTIO
+ * that's in FW posession and cleanup the exchange.
+ *
+ * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
+ * down at FW. Free the cmd later when CTIO comes back later
+ * w/aborted(0x2) status.
+ *
+ * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
+ * back w/some err. Free the cmd now.
+ */
+ if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
if (!ha_locked && !in_interrupt())
msleep(250); /* just in case */
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
vha->hw->tgt.tgt_ops->free_cmd(cmd);
}
+ return;
}
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
- BUG_ON(cmd->sg_mapped);
+ struct qla_tgt_sess *sess = cmd->sess;
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
+ "%s: se_cmd[%p] ox_id %04x\n",
+ __func__, &cmd->se_cmd,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+
+ BUG_ON(cmd->sg_mapped);
if (unlikely(cmd->free_sg))
kfree(cmd->sg);
- kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+
+ if (!sess || !sess->se_sess) {
+ WARN_ON(1);
+ return;
+ }
+ percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -2374,6 +2934,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
case CTIO_LIP_RESET:
case CTIO_TARGET_RESET:
case CTIO_ABORTED:
+ /* driver request abort via Terminate exchange */
case CTIO_TIMEOUT:
case CTIO_INVALID_RX_ID:
/* They are OK */
@@ -2404,18 +2965,58 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
else
return;
+ case CTIO_DIF_ERROR: {
+ struct ctio_crc_from_fw *crc =
+ (struct ctio_crc_from_fw *)ctio;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
+ "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
+ vha->vp_idx, status, cmd->state, se_cmd,
+ *((u64 *)&crc->actual_dif[0]),
+ *((u64 *)&crc->expected_dif[0]));
+
+ if (qlt_handle_dif_error(vha, cmd, ctio)) {
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ /* scsi Write/xfer rdy complete */
+ goto skip_term;
+ } else {
+ /* scsi read/xmit respond complete
+ * call handle dif to send scsi status
+ * rather than terminate exchange.
+ */
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ ha->tgt.tgt_ops->handle_dif_err(cmd);
+ return;
+ }
+ } else {
+ /* Need to generate a SCSI good completion.
+ * because FW did not send scsi status.
+ */
+ status = 0;
+ goto skip_term;
+ }
+ break;
+ }
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
- "qla_target(%d): CTIO with error status "
- "0x%x received (state %x, se_cmd %p\n",
+ "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
vha->vp_idx, status, cmd->state, se_cmd);
break;
}
- if (cmd->state != QLA_TGT_STATE_NEED_DATA)
+
+ /* "cmd->state == QLA_TGT_STATE_ABORTED" means
+ * cmd is already aborted/terminated, we don't
+ * need to terminate again. The exchange is already
+ * cleaned up/freed at FW level. Just cleanup at driver
+ * level.
+ */
+ if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
+ (cmd->state != QLA_TGT_STATE_ABORTED)) {
if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
return;
+ }
}
+skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
@@ -2444,7 +3045,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
"not return a CTIO complete\n", vha->vp_idx, cmd->state);
}
- if (unlikely(status != CTIO_SUCCESS)) {
+ if (unlikely(status != CTIO_SUCCESS) &&
+ (cmd->state != QLA_TGT_STATE_ABORTED)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
dump_stack();
}
@@ -2489,13 +3091,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
/*
* Process context for I/O path into tcm_qla2xxx code
*/
-static void qlt_do_work(struct work_struct *work)
+static void __qlt_do_work(struct qla_tgt_cmd *cmd)
{
- struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
scsi_qla_host_t *vha = cmd->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_sess *sess = NULL;
+ struct qla_tgt_sess *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio;
unsigned char *cdb;
unsigned long flags;
@@ -2505,41 +3106,6 @@ static void qlt_do_work(struct work_struct *work)
if (tgt->tgt_stop)
goto out_term;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
- atio->u.isp24.fcp_hdr.s_id);
- /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
- if (sess)
- kref_get(&sess->se_sess->sess_kref);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- if (unlikely(!sess)) {
- uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
- "qla_target(%d): Unable to find wwn login"
- " (s_id %x:%x:%x), trying to create it manually\n",
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
-
- if (atio->u.raw.entry_count > 1) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
- "Dropping multy entry cmd %p\n", cmd);
- goto out_term;
- }
-
- mutex_lock(&vha->vha_tgt.tgt_mutex);
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has an extra creation ref. */
- mutex_unlock(&vha->vha_tgt.tgt_mutex);
-
- if (!sess)
- goto out_term;
- }
-
- cmd->sess = sess;
- cmd->loop_id = sess->loop_id;
- cmd->conf_compl_supported = sess->conf_compl_supported;
-
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int(
@@ -2563,11 +3129,12 @@ static void qlt_do_work(struct work_struct *work)
atio->u.isp24.fcp_cmnd.add_cdb_len]));
ql_dbg(ql_dbg_tgt, vha, 0xe022,
- "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
- cmd, cmd->unpacked_lun, cmd->tag);
+ "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
+ cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
+ cmd->atio.u.isp24.fcp_hdr.ox_id);
- ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
- fcp_task_attr, data_dir, bidi);
+ ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+ fcp_task_attr, data_dir, bidi);
if (ret != 0)
goto out_term;
/*
@@ -2586,17 +3153,114 @@ out_term:
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
- kmem_cache_free(qla_tgt_cmd_cachep, cmd);
- if (sess)
+ percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_do_work(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ __qlt_do_work(cmd);
+}
+
+static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
+ struct qla_tgt_sess *sess,
+ struct atio_from_isp *atio)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct qla_tgt_cmd *cmd;
+ int tag;
+
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ if (tag < 0)
+ return NULL;
+
+ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+
+ memcpy(&cmd->atio, atio, sizeof(*atio));
+ cmd->state = QLA_TGT_STATE_NEW;
+ cmd->tgt = vha->vha_tgt.qla_tgt;
+ cmd->vha = vha;
+ cmd->se_cmd.map_tag = tag;
+ cmd->sess = sess;
+ cmd->loop_id = sess->loop_id;
+ cmd->conf_compl_supported = sess->conf_compl_supported;
+
+ return cmd;
+}
+
+static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
+ uint16_t);
+
+static void qlt_create_sess_from_atio(struct work_struct *work)
+{
+ struct qla_tgt_sess_op *op = container_of(work,
+ struct qla_tgt_sess_op, work);
+ scsi_qla_host_t *vha = op->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ struct qla_tgt_cmd *cmd;
+ unsigned long flags;
+ uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+ "qla_target(%d): Unable to find wwn login"
+ " (s_id %x:%x:%x), trying to create it manually\n",
+ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+ if (op->atio.u.raw.entry_count > 1) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+ "Dropping multy entry atio %p\n", &op->atio);
+ goto out_term;
+ }
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has an extra creation ref. */
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ if (!sess)
+ goto out_term;
+ /*
+ * Now obtain a pre-allocated session tag using the original op->atio
+ * packet header, and dispatch into __qlt_do_work() using the existing
+ * process context.
+ */
+ cmd = qlt_get_tag(vha, sess, &op->atio);
+ if (!cmd) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ kfree(op);
+ return;
+ }
+ /*
+ * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
+ * the extra reference taken above by qlt_make_local_sess()
+ */
+ __qlt_do_work(cmd);
+ kfree(op);
+ return;
+
+out_term:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(vha, NULL, &op->atio, 1);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ kfree(op);
+
}
/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
{
+ struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
struct qla_tgt_cmd *cmd;
if (unlikely(tgt->tgt_stop)) {
@@ -2605,18 +3269,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -EFAULT;
}
- cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
+ if (unlikely(!sess)) {
+ struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
+ GFP_ATOMIC);
+ if (!op)
+ return -ENOMEM;
+
+ memcpy(&op->atio, atio, sizeof(*atio));
+ INIT_WORK(&op->work, qlt_create_sess_from_atio);
+ queue_work(qla_tgt_wq, &op->work);
+ return 0;
+ }
+ /*
+ * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
+ */
+ kref_get(&sess->se_sess->sess_kref);
+
+ cmd = qlt_get_tag(vha, sess, atio);
if (!cmd) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+ ha->tgt.tgt_ops->put_sess(sess);
return -ENOMEM;
}
- memcpy(&cmd->atio, atio, sizeof(*atio));
- cmd->state = QLA_TGT_STATE_NEW;
- cmd->tgt = vha->vha_tgt.qla_tgt;
- cmd->vha = vha;
-
INIT_WORK(&cmd->work, qlt_do_work);
queue_work(qla_tgt_wq, &cmd->work);
return 0;
@@ -3527,11 +4204,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
ql_dbg(ql_dbg_tgt, vha, 0xe02d,
- "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
- "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
+ "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
atio->u.isp24.fcp_cmnd.rddata,
atio->u.isp24.fcp_cmnd.wrdata,
+ atio->u.isp24.fcp_cmnd.cdb[0],
atio->u.isp24.fcp_cmnd.add_cdb_len,
be32_to_cpu(get_unaligned((uint32_t *)
&atio->u.isp24.fcp_cmnd.add_cdb[
@@ -3629,11 +4306,13 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
tgt->irq_cmd_count++;
switch (pkt->entry_type) {
+ case CTIO_CRC2:
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
- vha->vp_idx);
+ ql_dbg(ql_dbg_tgt, vha, 0xe030,
+ "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
+ entry->entry_type, vha->vp_idx);
qlt_do_ctio_completion(vha, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
@@ -4768,6 +5447,7 @@ qlt_24xx_process_response_error(struct scsi_qla_host *vha,
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case NOTIFY_ACK_TYPE:
+ case CTIO_CRC2:
return 1;
default:
return 0;
@@ -4911,23 +5591,13 @@ int __init qlt_init(void)
if (!QLA_TGT_MODE_ENABLED())
return 0;
- qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
- sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
- NULL);
- if (!qla_tgt_cmd_cachep) {
- ql_log(ql_log_fatal, NULL, 0xe06c,
- "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
- return -ENOMEM;
- }
-
qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
qla_tgt_mgmt_cmd), 0, NULL);
if (!qla_tgt_mgmt_cmd_cachep) {
ql_log(ql_log_fatal, NULL, 0xe06d,
"kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
@@ -4955,8 +5625,6 @@ out_cmd_mempool:
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
out_mgmt_cmd_cachep:
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
-out:
- kmem_cache_destroy(qla_tgt_cmd_cachep);
return ret;
}
@@ -4968,5 +5636,4 @@ void qlt_exit(void)
destroy_workqueue(qla_tgt_wq);
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
- kmem_cache_destroy(qla_tgt_cmd_cachep);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index ce33d8c26406..d1d24fb0160a 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -293,6 +293,7 @@ struct ctio_to_2xxx {
#define CTIO_ABORTED 0x02
#define CTIO_INVALID_RX_ID 0x08
#define CTIO_TIMEOUT 0x0B
+#define CTIO_DIF_ERROR 0x0C /* DIF error detected */
#define CTIO_LIP_RESET 0x0E
#define CTIO_TARGET_RESET 0x17
#define CTIO_PORT_UNAVAILABLE 0x28
@@ -315,7 +316,7 @@ struct fcp_hdr {
uint8_t seq_id;
uint8_t df_ctl;
uint16_t seq_cnt;
- uint16_t ox_id;
+ __be16 ox_id;
uint16_t rx_id;
uint32_t parameter;
} __packed;
@@ -440,9 +441,9 @@ struct ctio7_to_24xx {
union {
struct {
uint16_t reserved1;
- uint16_t flags;
+ __le16 flags;
uint32_t residual;
- uint16_t ox_id;
+ __le16 ox_id;
uint16_t scsi_status;
uint32_t relative_offset;
uint32_t reserved2;
@@ -457,7 +458,7 @@ struct ctio7_to_24xx {
uint16_t sense_length;
uint16_t flags;
uint32_t residual;
- uint16_t ox_id;
+ __le16 ox_id;
uint16_t scsi_status;
uint16_t response_len;
uint16_t reserved;
@@ -498,11 +499,12 @@ struct ctio7_from_24xx {
#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
#define CTIO7_FLAGS_STATUS_MODE_0 0
#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
+#define CTIO7_FLAGS_STATUS_MODE_2 BIT_7
#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
#define CTIO7_FLAGS_DSD_PTR BIT_2
-#define CTIO7_FLAGS_DATA_IN BIT_1
-#define CTIO7_FLAGS_DATA_OUT BIT_0
+#define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */
+#define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */
#define ELS_PLOGI 0x3
#define ELS_FLOGI 0x4
@@ -514,6 +516,68 @@ struct ctio7_from_24xx {
#define ELS_ADISC 0x52
/*
+ *CTIO Type CRC_2 IOCB
+ */
+struct ctio_crc2_to_fw {
+ uint8_t entry_type; /* Entry type. */
+#define CTIO_CRC2 0x7A
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint16_t nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
+
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t add_flags; /* additional flags */
+#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
+
+ uint8_t initiator_id[3]; /* initiator ID */
+ uint8_t reserved1;
+ uint32_t exchange_addr; /* rcv exchange address */
+ uint16_t reserved2;
+ __le16 flags; /* refer to CTIO7 flags values */
+ uint32_t residual;
+ __le16 ox_id;
+ uint16_t scsi_status;
+ __le32 relative_offset;
+ uint32_t reserved5;
+ __le32 transfer_length; /* total fc transfer length */
+ uint32_t reserved6;
+ __le32 crc_context_address[2];/* Data segment address. */
+ uint16_t crc_context_len; /* Data segment length. */
+ uint16_t reserved_1; /* MUST be set to 0. */
+} __packed;
+
+/* CTIO Type CRC_x Status IOCB */
+struct ctio_crc_from_fw {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint16_t status;
+ uint16_t timeout; /* Command timeout. */
+ uint16_t dseg_count; /* Data segment count. */
+ uint32_t reserved1;
+ uint16_t state_flags;
+#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
+
+ uint32_t exchange_address; /* rcv exchange address */
+ uint16_t reserved2;
+ uint16_t flags;
+ uint32_t resid_xfer_length;
+ uint16_t ox_id;
+ uint8_t reserved3[12];
+ uint16_t runt_guard; /* reported runt blk guard */
+ uint8_t actual_dif[8];
+ uint8_t expected_dif[8];
+} __packed;
+
+/*
* ISP queue - ABTS received/response entries structure definition for 24xx.
*/
#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
@@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
+ void (*handle_dif_err)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
@@ -805,6 +870,12 @@ struct qla_tgt {
struct list_head tgt_list_entry;
};
+struct qla_tgt_sess_op {
+ struct scsi_qla_host *vha;
+ struct atio_from_isp atio;
+ struct work_struct work;
+};
+
/*
* Equivilant to IT Nexus (Initiator-Target)
*/
@@ -829,9 +900,9 @@ struct qla_tgt_sess {
};
struct qla_tgt_cmd {
+ struct se_cmd se_cmd;
struct qla_tgt_sess *sess;
int state;
- struct se_cmd se_cmd;
struct work_struct free_work;
struct work_struct work;
/* Sense buffer that will be mapped into outgoing status */
@@ -843,6 +914,7 @@ struct qla_tgt_cmd {
unsigned int free_sg:1;
unsigned int aborted:1; /* Needed in case of SRR */
unsigned int write_data_transferred:1;
+ unsigned int ctx_dsd_alloced:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -857,6 +929,12 @@ struct qla_tgt_cmd {
struct scsi_qla_host *vha;
struct atio_from_isp atio;
+ /* t10dif */
+ struct scatterlist *prot_sg;
+ uint32_t prot_sg_cnt;
+ uint32_t blk_sz;
+ struct crc_context *ctx;
+
};
struct qla_tgt_sess_work_param {
@@ -901,6 +979,10 @@ struct qla_tgt_prm {
int sense_buffer_len;
int residual;
int add_status_pkt;
+ /* dif */
+ struct scatterlist *prot_sg;
+ uint16_t prot_seg_cnt;
+ uint16_t tot_dsds;
};
struct qla_tgt_srr_imm {
@@ -976,6 +1058,8 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
+extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index a804e9b744bb..cb9a0c4bc419 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -201,7 +201,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
ql_dbg(ql_dbg_misc, NULL, 0xd014,
"%s: @%x\n", __func__, offset);
}
- qla27xx_insert32(offset, buf, len);
qla27xx_read32(window, buf, len);
}
@@ -220,7 +219,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
static inline void
qla27xx_read_window(__iomem struct device_reg_24xx *reg,
- uint32_t base, uint offset, uint count, uint width, void *buf,
+ uint32_t addr, uint offset, uint count, uint width, void *buf,
ulong *len)
{
void *window = (void *)reg + offset;
@@ -229,14 +228,14 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
if (buf) {
ql_dbg(ql_dbg_misc, NULL, 0xd016,
"%s: base=%x offset=%x count=%x width=%x\n",
- __func__, base, offset, count, width);
+ __func__, addr, offset, count, width);
}
- qla27xx_write_reg(reg, IOBASE_ADDR, base, buf);
+ qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
while (count--) {
- qla27xx_insert32(base, buf, len);
+ qla27xx_insert32(addr, buf, len);
readn(window, buf, len);
window += width;
- base += width;
+ addr++;
}
}
@@ -336,7 +335,8 @@ qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd204,
"%s: rdpci [%lx]\n", __func__, *len);
- qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len);
+ qla27xx_insert32(ent->t260.pci_offset, buf, len);
+ qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
return false;
}
@@ -349,7 +349,7 @@ qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd205,
"%s: wrpci [%lx]\n", __func__, *len);
- qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf);
+ qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
return false;
}
@@ -392,9 +392,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
goto done;
}
- if (end < start) {
+ if (end < start || end == 0) {
ql_dbg(ql_dbg_misc, vha, 0xd023,
- "%s: bad range (start=%x end=%x)\n", __func__,
+ "%s: unusable range (start=%x end=%x)\n", __func__,
ent->t262.end_addr, ent->t262.start_addr);
qla27xx_skip_entry(ent, buf);
goto done;
@@ -452,17 +452,15 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd025,
"%s: unsupported atio queue\n", __func__);
qla27xx_skip_entry(ent, buf);
- goto done;
} else {
ql_dbg(ql_dbg_misc, vha, 0xd026,
"%s: unknown queue %u\n", __func__, ent->t263.queue_type);
qla27xx_skip_entry(ent, buf);
- goto done;
}
if (buf)
ent->t263.num_queues = count;
-done:
+
return false;
}
@@ -503,7 +501,7 @@ qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd209,
"%s: pause risc [%lx]\n", __func__, *len);
if (buf)
- qla24xx_pause_risc(reg);
+ qla24xx_pause_risc(reg, vha->hw);
return false;
}
@@ -590,7 +588,6 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
- void *window = (void *)reg + 0xc4;
ulong dwords = ent->t270.count;
ulong addr = ent->t270.addr;
@@ -599,10 +596,9 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
while (dwords--) {
qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
- qla27xx_read_reg(reg, 0xc4, buf, len);
qla27xx_insert32(addr, buf, len);
- qla27xx_read32(window, buf, len);
- addr++;
+ qla27xx_read_reg(reg, 0xc4, buf, len);
+ addr += sizeof(uint32_t);
}
return false;
@@ -614,12 +610,12 @@ qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
{
struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
ulong addr = ent->t271.addr;
+ ulong data = ent->t271.data;
ql_dbg(ql_dbg_misc, vha, 0xd20f,
"%s: wrremreg [%lx]\n", __func__, *len);
qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
- qla27xx_read_reg(reg, 0xc4, buf, len);
- qla27xx_insert32(addr, buf, len);
+ qla27xx_write_reg(reg, 0xc4, data, buf);
qla27xx_write_reg(reg, 0xc0, addr, buf);
return false;
@@ -662,9 +658,59 @@ qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
"%s: failed pcicfg read at %lx\n", __func__, addr);
qla27xx_insert32(addr, buf, len);
qla27xx_insert32(value, buf, len);
- addr += 4;
+ addr += sizeof(uint32_t);
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ uint count = 0;
+ uint i;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd212,
+ "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
+ if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+ for (i = 0; i < vha->hw->max_req_queues; i++) {
+ struct req_que *req = vha->hw->req_q_map[i];
+ if (req || !buf) {
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(1, buf, len);
+ qla27xx_insert32(req && req->out_ptr ?
+ *req->out_ptr : 0, buf, len);
+ count++;
+ }
+ }
+ } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+ for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+ if (rsp || !buf) {
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(1, buf, len);
+ qla27xx_insert32(rsp && rsp->in_ptr ?
+ *rsp->in_ptr : 0, buf, len);
+ count++;
+ }
+ }
+ } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
+ ql_dbg(ql_dbg_misc, vha, 0xd02e,
+ "%s: unsupported atio queue\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd02f,
+ "%s: unknown queue %u\n", __func__, ent->t274.queue_type);
+ qla27xx_skip_entry(ent, buf);
}
+ if (buf)
+ ent->t274.num_queues = count;
+
+ if (!count)
+ qla27xx_skip_entry(ent, buf);
+
return false;
}
@@ -709,6 +755,7 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
{ ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
{ ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
{ ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
+ { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
{ -1 , qla27xx_fwdt_entry_other }
};
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index c9d2fff4d964..1967424c8e64 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -52,6 +52,7 @@ struct __packed qla27xx_fwdt_template {
#define ENTRY_TYPE_WRREMREG 271
#define ENTRY_TYPE_RDREMRAM 272
#define ENTRY_TYPE_PCICFG 273
+#define ENTRY_TYPE_GET_SHADOW 274
#define CAPTURE_FLAG_PHYS_ONLY BIT_0
#define CAPTURE_FLAG_PHYS_VIRT BIT_1
@@ -109,12 +110,12 @@ struct __packed qla27xx_fwdt_entry {
} t259;
struct __packed {
- uint8_t pci_addr;
+ uint8_t pci_offset;
uint8_t reserved[3];
} t260;
struct __packed {
- uint8_t pci_addr;
+ uint8_t pci_offset;
uint8_t reserved[3];
uint32_t write_data;
} t261;
@@ -186,6 +187,12 @@ struct __packed qla27xx_fwdt_entry {
uint32_t addr;
uint32_t count;
} t273;
+
+ struct __packed {
+ uint32_t num_queues;
+ uint8_t queue_type;
+ uint8_t reserved[3];
+ } t274;
};
};
@@ -202,4 +209,8 @@ struct __packed qla27xx_fwdt_entry {
#define T268_BUF_TYPE_EXCH_BUFOFF 2
#define T268_BUF_TYPE_EXTD_LOGIN 3
+#define T274_QUEUE_TYPE_REQ_SHAD 1
+#define T274_QUEUE_TYPE_RSP_SHAD 2
+#define T274_QUEUE_TYPE_ATIO_SHAD 3
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e36b94712544..4d2c98cbec4f 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,13 +1,13 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.02-k"
+#define QLA2XXX_VERSION "8.07.00.08-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 68fb66fdb757..e2beab962096 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -472,6 +472,11 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
+ cmd->prot_sg_cnt = se_cmd->t_prot_nents;
+ cmd->prot_sg = se_cmd->t_prot_sg;
+ cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
+ se_cmd->pi_err = 0;
+
/*
* qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
* the SGL mappings into PCIe memory for incoming FCP WRITE data.
@@ -567,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
- transport_generic_request_failure(&cmd->se_cmd,
- TCM_CHECK_CONDITION_ABORT_CMD);
+ if (cmd->se_cmd.pi_err)
+ transport_generic_request_failure(&cmd->se_cmd,
+ cmd->se_cmd.pi_err);
+ else
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
+
return;
}
@@ -584,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
+static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ /* take an extra kref to prevent cmd free too early.
+ * need to wait for SCSI status/check condition to
+ * finish responding generate by transport_generic_request_failure.
+ */
+ kref_get(&cmd->se_cmd.cmd_kref);
+ transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
+{
+ INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
/*
* Called from qla_target.c:qlt_issue_task_mgmt()
*/
@@ -610,6 +641,11 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->sg = se_cmd->t_data_sg;
cmd->offset = 0;
+ cmd->prot_sg_cnt = se_cmd->t_prot_nents;
+ cmd->prot_sg = se_cmd->t_prot_sg;
+ cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
+ se_cmd->pi_err = 0;
+
/*
* Now queue completed DATA_IN the qla2xxx LLD and response ring
*/
@@ -1465,6 +1501,8 @@ static int tcm_qla2xxx_check_initiator_node_acl(
struct qla_tgt_sess *sess = qla_tgt_sess;
unsigned char port_name[36];
unsigned long flags;
+ int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
+ TCM_QLA2XXX_DEFAULT_TAGS;
lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
@@ -1482,7 +1520,9 @@ static int tcm_qla2xxx_check_initiator_node_acl(
}
se_tpg = &tpg->se_tpg;
- se_sess = transport_init_session(TARGET_PROT_NORMAL);
+ se_sess = transport_init_session_tags(num_tags,
+ sizeof(struct qla_tgt_cmd),
+ TARGET_PROT_NORMAL);
if (IS_ERR(se_sess)) {
pr_err("Unable to initialize struct se_session\n");
return PTR_ERR(se_sess);
@@ -1600,6 +1640,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
+ .handle_dif_err = tcm_qla2xxx_handle_dif_err,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 33aaac8c7d59..10c002145648 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -4,6 +4,11 @@
#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
+/*
+ * Number of pre-allocated per-session tags, based upon the worst-case
+ * per port number of iocbs
+ */
+#define TCM_QLA2XXX_DEFAULT_TAGS 2088
#include "qla_target.h"
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 2eba35365920..556c1525f881 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -249,110 +249,6 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
qla4_83xx_flash_unlock(ha);
}
-/**
- * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory
- * @ha: Pointer to adapter structure
- * @addr: Flash address to write to
- * @data: Data to be written
- * @count: word_count to be written
- *
- * Return: On success return QLA_SUCCESS
- * On error return QLA_ERROR
- **/
-int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
- uint32_t *data, uint32_t count)
-{
- int i, j;
- uint32_t agt_ctrl;
- unsigned long flags;
- int ret_val = QLA_SUCCESS;
-
- /* Only 128-bit aligned access */
- if (addr & 0xF) {
- ret_val = QLA_ERROR;
- goto exit_ms_mem_write;
- }
-
- write_lock_irqsave(&ha->hw_lock, flags);
-
- /* Write address */
- ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
- if (ret_val == QLA_ERROR) {
- ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
- __func__);
- goto exit_ms_mem_write_unlock;
- }
-
- for (i = 0; i < count; i++, addr += 16) {
- if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
- QLA8XXX_ADDR_QDR_NET_MAX)) ||
- (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
- QLA8XXX_ADDR_DDR_NET_MAX)))) {
- ret_val = QLA_ERROR;
- goto exit_ms_mem_write_unlock;
- }
-
- ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
- addr);
- /* Write data */
- ret_val |= qla4_83xx_wr_reg_indirect(ha,
- MD_MIU_TEST_AGT_WRDATA_LO,
- *data++);
- ret_val |= qla4_83xx_wr_reg_indirect(ha,
- MD_MIU_TEST_AGT_WRDATA_HI,
- *data++);
- ret_val |= qla4_83xx_wr_reg_indirect(ha,
- MD_MIU_TEST_AGT_WRDATA_ULO,
- *data++);
- ret_val |= qla4_83xx_wr_reg_indirect(ha,
- MD_MIU_TEST_AGT_WRDATA_UHI,
- *data++);
- if (ret_val == QLA_ERROR) {
- ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
- __func__);
- goto exit_ms_mem_write_unlock;
- }
-
- /* Check write status */
- ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
- MIU_TA_CTL_WRITE_ENABLE);
- ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
- MIU_TA_CTL_WRITE_START);
- if (ret_val == QLA_ERROR) {
- ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
- __func__);
- goto exit_ms_mem_write_unlock;
- }
-
- for (j = 0; j < MAX_CTL_CHECK; j++) {
- ret_val = qla4_83xx_rd_reg_indirect(ha,
- MD_MIU_TEST_AGT_CTRL,
- &agt_ctrl);
- if (ret_val == QLA_ERROR) {
- ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
- __func__);
- goto exit_ms_mem_write_unlock;
- }
- if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
- break;
- }
-
- /* Status check failed */
- if (j >= MAX_CTL_CHECK) {
- printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
- __func__);
- ret_val = QLA_ERROR;
- goto exit_ms_mem_write_unlock;
- }
- }
-
-exit_ms_mem_write_unlock:
- write_unlock_irqrestore(&ha->hw_lock, flags);
-
-exit_ms_mem_write:
- return ret_val;
-}
-
#define INTENT_TO_RECOVER 0x01
#define PROCEED_TO_RECOVER 0x02
@@ -760,7 +656,7 @@ static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
__func__));
/* 128 bit/16 byte write to MS memory */
- ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
+ ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
count);
if (ret_val == QLA_ERROR) {
ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index a0de6e25ea5a..775fdf9fcc87 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -254,6 +254,50 @@ struct qla83xx_minidump_entry_pollrd {
uint32_t rsvd_1;
};
+struct qla8044_minidump_entry_rddfe {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t value;
+ uint8_t stride;
+ uint8_t stride2;
+ uint16_t count;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t modify_mask;
+ uint32_t data_size;
+ uint32_t rsvd;
+
+} __packed;
+
+struct qla8044_minidump_entry_rdmdio {
+ struct qla8xxx_minidump_entry_hdr h;
+
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint8_t stride_1;
+ uint8_t stride_2;
+ uint16_t count;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t value_2;
+ uint32_t data_size;
+
+} __packed;
+
+struct qla8044_minidump_entry_pollwr {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t data_size;
+ uint32_t rsvd;
+
+} __packed;
+
/* RDMUX2 Entry */
struct qla83xx_minidump_entry_rdmux2 {
struct qla8xxx_minidump_entry_hdr h;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 73a502288bde..8f6d0fb2cd80 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -601,6 +601,7 @@ struct scsi_qla_host {
#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/
#define DPC_POST_IDC_ACK 23 /* 0x00800000 */
#define DPC_RESTORE_ACB 24 /* 0x01000000 */
+#define DPC_SYSFS_DDB_EXPORT 25 /* 0x02000000 */
struct Scsi_Host *host; /* pointer to host data */
uint32_t tot_ddbs;
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 209853ce0bbc..699575efc9ba 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1415,6 +1415,9 @@ struct ql_iscsi_stats {
#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16
#define QLA83XX_SS_OCM_WNDREG_INDEX 3
#define QLA83XX_SS_PCI_INDEX 0
+#define QLA8022_TEMPLATE_CAP_OFFSET 172
+#define QLA83XX_TEMPLATE_CAP_OFFSET 268
+#define QLA80XX_TEMPLATE_RESERVED_BITS 16
struct qla4_8xxx_minidump_template_hdr {
uint32_t entry_type;
@@ -1434,6 +1437,7 @@ struct qla4_8xxx_minidump_template_hdr {
uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
+ uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS];
};
#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index b1a19cd8d5b2..5f58b451327e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -274,13 +274,14 @@ int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
uint32_t acb_type, uint32_t len);
int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
-int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
+int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha,
uint64_t addr, uint32_t *data, uint32_t count);
uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
int qla4_83xx_is_detached(struct scsi_qla_host *ha);
+int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 28fbece7e08f..6f12f859b11d 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -282,6 +282,25 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
return ipv4_wait|ipv6_wait;
}
+static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha,
+ struct qla4_8xxx_minidump_template_hdr *md_hdr)
+{
+ int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET :
+ QLA83XX_TEMPLATE_CAP_OFFSET;
+ int rval = 1;
+ uint32_t *cap_offset;
+
+ cap_offset = (uint32_t *)((char *)md_hdr + offset);
+
+ if (!(le32_to_cpu(*cap_offset) & BIT_0)) {
+ ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n",
+ *cap_offset);
+ rval = 0;
+ }
+
+ return rval;
+}
+
/**
* qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
* @ha: pointer to host adapter structure.
@@ -294,6 +313,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
void *md_tmp;
dma_addr_t md_tmp_dma;
struct qla4_8xxx_minidump_template_hdr *md_hdr;
+ int dma_capable;
if (ha->fw_dump) {
ql4_printk(KERN_WARNING, ha,
@@ -326,13 +346,19 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
+ dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr);
+
capture_debug_level = md_hdr->capture_debug_level;
/* Get capture mask based on module loadtime setting. */
- if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
+ if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) ||
+ (ql4xmdcapmask == 0xFF && dma_capable)) {
ha->fw_dump_capture_mask = ql4xmdcapmask;
- else
+ } else {
+ if (ql4xmdcapmask == 0xFF)
+ ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n");
ha->fw_dump_capture_mask = capture_debug_level;
+ }
md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
@@ -864,6 +890,8 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
if (status == QLA_SUCCESS) {
if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
qla4xxx_get_crash_record(ha);
+
+ qla4xxx_init_rings(ha);
} else {
DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index b1925d195f41..081b6b78d2c6 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1526,7 +1526,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
int qla4xxx_request_irqs(struct scsi_qla_host *ha)
{
- int ret;
+ int ret = 0;
int rval = QLA_ERROR;
if (is_qla40XX(ha))
@@ -1580,15 +1580,13 @@ try_msi:
}
}
- /*
- * Prevent interrupts from falling back to INTx mode in cases where
- * interrupts cannot get acquired through MSI-X or MSI mode.
- */
+try_intx:
if (is_qla8022(ha)) {
- ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret);
+ ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n",
+ __func__);
goto irq_not_attached;
}
-try_intx:
+
/* Trying INTx */
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
IRQF_SHARED, DRIVER_NAME, ha);
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 0a6b782d6fdb..0a3312c6dd6d 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -2381,7 +2381,7 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
__func__);
rval = QLA_ERROR;
- goto exit_config_acb;
+ goto exit_free_acb;
}
memcpy(ha->saved_acb, acb, acb_len);
break;
@@ -2395,8 +2395,6 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
}
memcpy(acb, ha->saved_acb, acb_len);
- kfree(ha->saved_acb);
- ha->saved_acb = NULL;
rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
if (rval != QLA_SUCCESS)
@@ -2412,6 +2410,10 @@ exit_free_acb:
dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
acb_dma);
exit_config_acb:
+ if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) {
+ kfree(ha->saved_acb);
+ ha->saved_acb = NULL;
+ }
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s %s\n", __func__,
rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 63328c812b70..9dbdb4be2d8f 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -14,6 +14,7 @@
#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#define TIMEOUT_100_MS 100
#define MASK(n) DMA_BIT_MASK(n)
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
@@ -1176,6 +1177,112 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
return 0;
}
+/**
+ * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory
+ * @ha: Pointer to adapter structure
+ * @addr: Flash address to write to
+ * @data: Data to be written
+ * @count: word_count to be written
+ *
+ * Return: On success return QLA_SUCCESS
+ * On error return QLA_ERROR
+ **/
+int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
+ uint32_t *data, uint32_t count)
+{
+ int i, j;
+ uint32_t agt_ctrl;
+ unsigned long flags;
+ int ret_val = QLA_SUCCESS;
+
+ /* Only 128-bit aligned access */
+ if (addr & 0xF) {
+ ret_val = QLA_ERROR;
+ goto exit_ms_mem_write;
+ }
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /* Write address */
+ ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+ QLA8XXX_ADDR_QDR_NET_MAX)) ||
+ (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+ QLA8XXX_ADDR_DDR_NET_MAX)))) {
+ ret_val = QLA_ERROR;
+ goto exit_ms_mem_write_unlock;
+ }
+
+ ret_val = ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_ADDR_LO,
+ addr);
+ /* Write data */
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_LO,
+ *data++);
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_HI,
+ *data++);
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_ULO,
+ *data++);
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_UHI,
+ *data++);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ /* Check write status */
+ ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_ENABLE);
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_START);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ ret_val = ha->isp_ops->rd_reg_indirect(ha,
+ MD_MIU_TEST_AGT_CTRL,
+ &agt_ctrl);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+ if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failed */
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
+ __func__);
+ ret_val = QLA_ERROR;
+ goto exit_ms_mem_write_unlock;
+ }
+ }
+
+exit_ms_mem_write_unlock:
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+ return ret_val;
+}
+
static int
qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
{
@@ -1714,6 +1821,101 @@ void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
qla4_82xx_rom_unlock(ha);
}
+static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
+ uint32_t addr1, uint32_t mask)
+{
+ unsigned long timeout;
+ uint32_t rval = QLA_SUCCESS;
+ uint32_t temp;
+
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+ do {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+
+ if (time_after_eq(jiffies, timeout)) {
+ ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n");
+ return QLA_ERROR;
+ }
+ } while (1);
+
+ return rval;
+}
+
+uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
+ uint32_t addr3, uint32_t mask, uint32_t addr,
+ uint32_t *data_ptr)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t temp;
+ uint32_t data;
+
+ rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+ if (rval)
+ goto exit_ipmdio_rd_reg;
+
+ temp = (0x40000000 | addr);
+ ha->isp_ops->wr_reg_indirect(ha, addr1, temp);
+
+ rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+ if (rval)
+ goto exit_ipmdio_rd_reg;
+
+ ha->isp_ops->rd_reg_indirect(ha, addr3, &data);
+ *data_ptr = data;
+
+exit_ipmdio_rd_reg:
+ return rval;
+}
+
+
+static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha,
+ uint32_t addr1,
+ uint32_t addr2,
+ uint32_t addr3,
+ uint32_t mask)
+{
+ unsigned long timeout;
+ uint32_t temp;
+ uint32_t rval = QLA_SUCCESS;
+
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+ do {
+ ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp);
+ if ((temp & 0x1) != 1)
+ break;
+ if (time_after_eq(jiffies, timeout)) {
+ ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n");
+ return QLA_ERROR;
+ }
+ } while (1);
+
+ return rval;
+}
+
+static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha,
+ uint32_t addr1, uint32_t addr3,
+ uint32_t mask, uint32_t addr,
+ uint32_t value)
+{
+ int rval = QLA_SUCCESS;
+
+ rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+ if (rval)
+ goto exit_ipmdio_wr_reg;
+
+ ha->isp_ops->wr_reg_indirect(ha, addr3, value);
+ ha->isp_ops->wr_reg_indirect(ha, addr1, addr);
+
+ rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+ if (rval)
+ goto exit_ipmdio_wr_reg;
+
+exit_ipmdio_wr_reg:
+ return rval;
+}
+
static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
@@ -1822,7 +2024,7 @@ error_exit:
return rval;
}
-static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
+static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
@@ -1899,11 +2101,11 @@ static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
dma_desc.cmd.read_data_size = size;
/* Prepare: Write pex-dma descriptor to MS memory. */
- rval = qla4_83xx_ms_mem_write_128b(ha,
+ rval = qla4_8xxx_ms_mem_write_128b(ha,
(uint64_t)m_hdr->desc_card_addr,
(uint32_t *)&dma_desc,
(sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
- if (rval == -1) {
+ if (rval != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha,
"%s: Error writing rdmem-dma-init to MS !!!\n",
__func__);
@@ -2359,17 +2561,10 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
uint32_t *data_ptr = *d_ptr;
int rval = QLA_SUCCESS;
- if (is_qla8032(ha) || is_qla8042(ha)) {
- rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr,
- &data_ptr);
- if (rval != QLA_SUCCESS) {
- rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
- &data_ptr);
- }
- } else {
+ rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS)
rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
&data_ptr);
- }
*d_ptr = data_ptr;
return rval;
}
@@ -2440,6 +2635,227 @@ exit_process_pollrd:
return rval;
}
+static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ int loop_cnt;
+ uint32_t addr1, addr2, value, data, temp, wrval;
+ uint8_t stride, stride2;
+ uint16_t count;
+ uint32_t poll, mask, data_size, modify_mask;
+ uint32_t wait_count = 0;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla8044_minidump_entry_rddfe *rddfe;
+ uint32_t rval = QLA_SUCCESS;
+
+ rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr;
+ addr1 = le32_to_cpu(rddfe->addr_1);
+ value = le32_to_cpu(rddfe->value);
+ stride = le32_to_cpu(rddfe->stride);
+ stride2 = le32_to_cpu(rddfe->stride2);
+ count = le32_to_cpu(rddfe->count);
+
+ poll = le32_to_cpu(rddfe->poll);
+ mask = le32_to_cpu(rddfe->mask);
+ modify_mask = le32_to_cpu(rddfe->modify_mask);
+ data_size = le32_to_cpu(rddfe->data_size);
+
+ addr2 = addr1 + stride;
+
+ for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
+ ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value));
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
+ rval = QLA_ERROR;
+ goto exit_process_rddfe;
+ } else {
+ ha->isp_ops->rd_reg_indirect(ha, addr2, &temp);
+ temp = temp & modify_mask;
+ temp = (temp | ((loop_cnt << 16) | loop_cnt));
+ wrval = ((temp << 16) | temp);
+
+ ha->isp_ops->wr_reg_indirect(ha, addr2, wrval);
+ ha->isp_ops->wr_reg_indirect(ha, addr1, value);
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+ if (wait_count == poll) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_rddfe;
+ }
+
+ ha->isp_ops->wr_reg_indirect(ha, addr1,
+ ((0x40000000 | value) +
+ stride2));
+ wait_count = 0;
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_rddfe;
+ }
+
+ ha->isp_ops->rd_reg_indirect(ha, addr2, &data);
+
+ *data_ptr++ = cpu_to_le32(wrval);
+ *data_ptr++ = cpu_to_le32(data);
+ }
+ }
+
+ *d_ptr = data_ptr;
+exit_process_rddfe:
+ return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t addr1, addr2, value1, value2, data, selval;
+ uint8_t stride1, stride2;
+ uint32_t addr3, addr4, addr5, addr6, addr7;
+ uint16_t count, loop_cnt;
+ uint32_t poll, mask;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla8044_minidump_entry_rdmdio *rdmdio;
+
+ rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr;
+ addr1 = le32_to_cpu(rdmdio->addr_1);
+ addr2 = le32_to_cpu(rdmdio->addr_2);
+ value1 = le32_to_cpu(rdmdio->value_1);
+ stride1 = le32_to_cpu(rdmdio->stride_1);
+ stride2 = le32_to_cpu(rdmdio->stride_2);
+ count = le32_to_cpu(rdmdio->count);
+
+ poll = le32_to_cpu(rdmdio->poll);
+ mask = le32_to_cpu(rdmdio->mask);
+ value2 = le32_to_cpu(rdmdio->value_2);
+
+ addr3 = addr1 + stride1;
+
+ for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
+ rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
+ addr3, mask);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ addr4 = addr2 - stride1;
+ rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4,
+ value2);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ addr5 = addr2 - (2 * stride1);
+ rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5,
+ value1);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ addr6 = addr2 - (3 * stride1);
+ rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask,
+ addr6, 0x2);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
+ addr3, mask);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ addr7 = addr2 - (4 * stride1);
+ rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3,
+ mask, addr7, &data);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ selval = (value2 << 18) | (value1 << 2) | 2;
+
+ stride2 = le32_to_cpu(rdmdio->stride_2);
+ *data_ptr++ = cpu_to_le32(selval);
+ *data_ptr++ = cpu_to_le32(data);
+
+ value1 = value1 + stride2;
+ *d_ptr = data_ptr;
+ }
+
+exit_process_rdmdio:
+ return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+ struct qla8044_minidump_entry_pollwr *pollwr_hdr;
+ uint32_t wait_count = 0;
+ uint32_t rval = QLA_SUCCESS;
+
+ pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
+ addr1 = le32_to_cpu(pollwr_hdr->addr_1);
+ addr2 = le32_to_cpu(pollwr_hdr->addr_2);
+ value1 = le32_to_cpu(pollwr_hdr->value_1);
+ value2 = le32_to_cpu(pollwr_hdr->value_2);
+
+ poll = le32_to_cpu(pollwr_hdr->poll);
+ mask = le32_to_cpu(pollwr_hdr->mask);
+
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
+
+ if ((r_value & poll) != 0)
+ break;
+
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
+ rval = QLA_ERROR;
+ goto exit_process_pollwr;
+ }
+
+ ha->isp_ops->wr_reg_indirect(ha, addr2, value2);
+ ha->isp_ops->wr_reg_indirect(ha, addr1, value1);
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
+
+ if ((r_value & poll) != 0)
+ break;
+ wait_count++;
+ }
+
+exit_process_pollwr:
+ return rval;
+}
+
static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
@@ -2753,6 +3169,24 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
if (rval != QLA_SUCCESS)
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
+ case QLA8044_RDDFE:
+ rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA8044_RDMDIO:
+ rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA8044_POLLWR:
+ rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
case QLA8XXX_RDNOP:
default:
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 14500a0f62cc..337d9fcf6417 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -858,6 +858,9 @@ struct crb_addr_pair {
#define QLA83XX_POLLRD 35
#define QLA83XX_RDMUX2 36
#define QLA83XX_POLLRDMWR 37
+#define QLA8044_RDDFE 38
+#define QLA8044_RDMDIO 39
+#define QLA8044_POLLWR 40
#define QLA8XXX_RDROM 71
#define QLA8XXX_RDMEM 72
#define QLA8XXX_CNTRL 98
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 459b9f7186fd..320206376206 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -83,12 +83,12 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo,
" Target Session Recovery Timeout.\n"
"\t\t Default: 120 sec.");
-int ql4xmdcapmask = 0x1F;
+int ql4xmdcapmask = 0;
module_param(ql4xmdcapmask, int, S_IRUGO);
MODULE_PARM_DESC(ql4xmdcapmask,
" Set the Minidump driver capture mask level.\n"
- "\t\t Default is 0x1F.\n"
- "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
+ "\t\t Default is 0 (firmware default capture mask)\n"
+ "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
int ql4xenablemd = 1;
module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
@@ -1742,6 +1742,9 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
struct sockaddr *dst_addr;
struct scsi_qla_host *ha;
+ if (!qla_ep)
+ return -ENOTCONN;
+
ha = to_qla_host(qla_ep->host);
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
ha->host_no));
@@ -1749,9 +1752,6 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
switch (param) {
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
- if (!qla_ep)
- return -ENOTCONN;
-
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
if (!dst_addr)
return -ENOTCONN;
@@ -2879,7 +2879,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
struct iscsi_conn *conn;
struct qla_conn *qla_conn;
struct sockaddr *dst_addr;
- int len = 0;
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
@@ -2893,9 +2892,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
default:
return iscsi_conn_get_param(cls_conn, param, buf);
}
-
- return len;
-
}
int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
@@ -3569,14 +3565,13 @@ static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
if (test_bit(OPT_IPV6_DEVICE, &options)) {
conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
- conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+ conn->link_local_ipv6_addr = kmemdup(
+ fw_ddb_entry->link_local_ipv6_addr,
+ IPv6_ADDR_LEN, GFP_KERNEL);
if (!conn->link_local_ipv6_addr) {
rc = -ENOMEM;
goto exit_copy;
}
-
- memcpy(conn->link_local_ipv6_addr,
- fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
} else {
conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
}
@@ -4565,6 +4560,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
+ test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
test_bit(DPC_AEN, &ha->dpc_flags)) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
" - dpc flags = 0x%lx\n",
@@ -4862,9 +4858,6 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
ha->host_no, __func__));
status = ha->isp_ops->reset_firmware(ha);
if (status == QLA_SUCCESS) {
- if (!test_bit(AF_FW_RECOVERY, &ha->flags))
- qla4xxx_cmd_wait(ha);
-
ha->isp_ops->disable_intrs(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -5432,6 +5425,11 @@ dpc_post_reset_ha:
qla4xxx_relogin_all_devices(ha);
}
}
+ if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
+ if (qla4xxx_sysfs_ddb_export(ha))
+ ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
+ __func__);
+ }
}
/**
@@ -8409,7 +8407,7 @@ exit_ddb_del:
*
* Export the firmware DDB for all send targets and normal targets to sysfs.
**/
-static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
+int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
{
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
@@ -8847,11 +8845,8 @@ skip_retry_init:
ql4_printk(KERN_ERR, ha,
"%s: No iSCSI boot target configured\n", __func__);
- if (qla4xxx_sysfs_ddb_export(ha))
- ql4_printk(KERN_ERR, ha,
- "%s: Error exporting ddb to sysfs\n", __func__);
-
- /* Perform the build ddb list and login to each */
+ set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
+ /* Perform the build ddb list and login to each */
qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
qla4xxx_wait_login_resp_boot_tgt(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c6ba0a6b8458..f11eaa773339 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.04.00-k4"
+#define QLA4XXX_DRIVER_VERSION "5.04.00-k6"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f3e9cc038d1d..1328a2621070 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -130,6 +130,7 @@ static const char * scsi_debug_version_date = "20100324";
#define SCSI_DEBUG_OPT_DIF_ERR 32
#define SCSI_DEBUG_OPT_DIX_ERR 64
#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
+#define SCSI_DEBUG_OPT_SHORT_TRANSFER 256
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
@@ -3583,6 +3584,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
int inj_transport = 0;
int inj_dif = 0;
int inj_dix = 0;
+ int inj_short = 0;
int delay_override = 0;
int unmap = 0;
@@ -3628,6 +3630,8 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
inj_dif = 1; /* to reads and writes below */
else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
inj_dix = 1; /* to reads and writes below */
+ else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
+ inj_short = 1;
}
if (devip->wlun) {
@@ -3744,6 +3748,10 @@ read:
if (scsi_debug_fake_rw)
break;
get_data_transfer_info(cmd, &lba, &num, &ei_lba);
+
+ if (inj_short)
+ num /= 2;
+
errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
if (inj_recovered && (0 == errsts)) {
mk_sense_buffer(devip, RECOVERED_ERROR,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index f17aa7aa7879..7e957918f33f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -131,7 +131,7 @@ scmd_eh_abort_handler(struct work_struct *work)
"aborting command %p\n", scmd));
rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
if (rtn == SUCCESS) {
- scmd->result |= DID_TIME_OUT << 16;
+ set_host_byte(scmd, DID_TIME_OUT);
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
@@ -167,7 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work)
scmd_printk(KERN_WARNING, scmd,
"scmd %p terminate "
"aborted command\n", scmd));
- scmd->result |= DID_TIME_OUT << 16;
+ set_host_byte(scmd, DID_TIME_OUT);
scsi_finish_command(scmd);
}
}
@@ -287,15 +287,15 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
else if (host->hostt->eh_timed_out)
rtn = host->hostt->eh_timed_out(scmd);
- if (rtn == BLK_EH_NOT_HANDLED && !host->hostt->no_async_abort)
- if (scsi_abort_command(scmd) == SUCCESS)
+ if (rtn == BLK_EH_NOT_HANDLED) {
+ if (!host->hostt->no_async_abort &&
+ scsi_abort_command(scmd) == SUCCESS)
return BLK_EH_NOT_HANDLED;
- scmd->result |= DID_TIME_OUT << 16;
-
- if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
- !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
- rtn = BLK_EH_HANDLED;
+ set_host_byte(scmd, DID_TIME_OUT);
+ if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
+ rtn = BLK_EH_HANDLED;
+ }
return rtn;
}
@@ -1029,6 +1029,7 @@ retry:
rtn = NEEDS_RETRY;
} else {
timeleft = wait_for_completion_timeout(&done, timeout);
+ rtn = SUCCESS;
}
shost->eh_action = NULL;
@@ -1776,7 +1777,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
break;
case DID_ABORT:
if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
- scmd->result |= DID_TIME_OUT << 16;
+ set_host_byte(scmd, DID_TIME_OUT);
return SUCCESS;
}
case DID_NO_CONNECT:
@@ -1951,6 +1952,8 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
*/
req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
+ blk_rq_set_block_pc(req);
+
req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
req->cmd[1] = 0;
req->cmd[2] = 0;
@@ -1960,7 +1963,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
req->cmd_len = COMMAND_SIZE(req->cmd[0]);
- req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_QUIET;
req->timeout = 10 * HZ;
req->retries = 5;
@@ -2306,6 +2308,12 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
}
scmd = scsi_get_command(dev, GFP_KERNEL);
+ if (!scmd) {
+ rtn = FAILED;
+ put_device(&dev->sdev_gendev);
+ goto out_put_autopm_host;
+ }
+
blk_rq_init(NULL, &req);
scmd->request = &req;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a0c95cac91f0..f7e316368c99 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -195,6 +195,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
if (!req)
return ret;
+ blk_rq_set_block_pc(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_WAIT))
@@ -206,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req->sense_len = 0;
req->retries = retries;
req->timeout = timeout;
- req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
/*
@@ -512,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
scsi_run_queue(sdev->request_queue);
}
-static void __scsi_release_buffers(struct scsi_cmnd *, int);
-
-/*
- * Function: scsi_end_request()
- *
- * Purpose: Post-processing of completed commands (usually invoked at end
- * of upper level post-processing and scsi_io_completion).
- *
- * Arguments: cmd - command that is complete.
- * error - 0 if I/O indicates success, < 0 for I/O error.
- * bytes - number of bytes of completed I/O
- * requeue - indicates whether we should requeue leftovers.
- *
- * Lock status: Assumed that lock is not held upon entry.
- *
- * Returns: cmd if requeue required, NULL otherwise.
- *
- * Notes: This is called for block device requests in order to
- * mark some number of sectors as complete.
- *
- * We are guaranteeing that the request queue will be goosed
- * at some point during this call.
- * Notes: If cmd was requeued, upon return it will be a stale pointer.
- */
-static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
- int bytes, int requeue)
-{
- struct request_queue *q = cmd->device->request_queue;
- struct request *req = cmd->request;
-
- /*
- * If there are blocks left over at the end, set up the command
- * to queue the remainder of them.
- */
- if (blk_end_request(req, error, bytes)) {
- /* kill remainder if no retrys */
- if (error && scsi_noretry_cmd(cmd))
- blk_end_request_all(req, error);
- else {
- if (requeue) {
- /*
- * Bleah. Leftovers again. Stick the
- * leftovers in the front of the
- * queue, and goose the queue again.
- */
- scsi_release_buffers(cmd);
- scsi_requeue_command(q, cmd);
- cmd = NULL;
- }
- return cmd;
- }
- }
-
- /*
- * This will goose the queue request function at the end, so we don't
- * need to worry about launching another command.
- */
- __scsi_release_buffers(cmd, 0);
- scsi_next_command(cmd);
- return NULL;
-}
-
static inline unsigned int scsi_sgtable_index(unsigned short nents)
{
unsigned int index;
@@ -625,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
}
-static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
-{
-
- if (cmd->sdb.table.nents)
- scsi_free_sgtable(&cmd->sdb);
-
- memset(&cmd->sdb, 0, sizeof(cmd->sdb));
-
- if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
- struct scsi_data_buffer *bidi_sdb =
- cmd->request->next_rq->special;
- scsi_free_sgtable(bidi_sdb);
- kmem_cache_free(scsi_sdb_cache, bidi_sdb);
- cmd->request->next_rq->special = NULL;
- }
-
- if (scsi_prot_sg_count(cmd))
- scsi_free_sgtable(cmd->prot_sdb);
-}
-
/*
* Function: scsi_release_buffers()
*
- * Purpose: Completion processing for block device I/O requests.
+ * Purpose: Free resources allocate for a scsi_command.
*
* Arguments: cmd - command that we are bailing.
*
@@ -659,15 +577,29 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
* Notes: In the event that an upper level driver rejects a
* command, we must release resources allocated during
* the __init_io() function. Primarily this would involve
- * the scatter-gather table, and potentially any bounce
- * buffers.
+ * the scatter-gather table.
*/
void scsi_release_buffers(struct scsi_cmnd *cmd)
{
- __scsi_release_buffers(cmd, 1);
+ if (cmd->sdb.table.nents)
+ scsi_free_sgtable(&cmd->sdb);
+
+ memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+
+ if (scsi_prot_sg_count(cmd))
+ scsi_free_sgtable(cmd->prot_sdb);
}
EXPORT_SYMBOL(scsi_release_buffers);
+static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
+{
+ struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
+
+ scsi_free_sgtable(bidi_sdb);
+ kmem_cache_free(scsi_sdb_cache, bidi_sdb);
+ cmd->request->next_rq->special = NULL;
+}
+
/**
* __scsi_error_from_host_byte - translate SCSI error code into errno
* @cmd: SCSI command (unused)
@@ -725,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
*
* Returns: Nothing
*
- * Notes: This function is matched in terms of capabilities to
- * the function that created the scatter-gather list.
- * In other words, if there are no bounce buffers
- * (the normal case for most drivers), we don't need
- * the logic to deal with cleaning up afterwards.
- *
- * We must call scsi_end_request(). This will finish off
- * the specified number of sectors. If we are done, the
- * command block will be released and the queue function
- * will be goosed. If we are not done then we have to
+ * Notes: We will finish off the specified number of sectors. If we
+ * are done, the command block will be released and the queue
+ * function will be goosed. If we are not done then we have to
* figure out what to do next:
*
* a) We can call scsi_requeue_command(). The request
@@ -743,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
* be used if we made forward progress, or if we want
* to switch from READ(10) to READ(6) for example.
*
- * b) We can call scsi_queue_insert(). The request will
+ * b) We can call __scsi_queue_insert(). The request will
* be put back on the queue and retried using the same
* command as before, possibly after a delay.
*
@@ -801,6 +726,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
req->next_rq->resid_len = scsi_in(cmd)->resid;
scsi_release_buffers(cmd);
+ scsi_release_bidi_buffers(cmd);
+
blk_end_request_all(req, 0);
scsi_next_command(cmd);
@@ -840,12 +767,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
/*
- * A number of bytes were successfully read. If there
- * are leftovers and there is some kind of error
- * (result != 0), retry the rest.
+ * If we finished all bytes in the request we are done now.
*/
- if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
- return;
+ if (!blk_end_request(req, error, good_bytes))
+ goto next_command;
+
+ /*
+ * Kill remainder if no retrys.
+ */
+ if (error && scsi_noretry_cmd(cmd)) {
+ blk_end_request_all(req, error);
+ goto next_command;
+ }
+
+ /*
+ * If there had been no error, but we have leftover bytes in the
+ * requeues just queue the command up again.
+ */
+ if (result == 0)
+ goto requeue;
error = __scsi_error_from_host_byte(cmd, result);
@@ -973,7 +913,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
switch (action) {
case ACTION_FAIL:
/* Give up and fail the remainder of the request */
- scsi_release_buffers(cmd);
if (!(req->cmd_flags & REQ_QUIET)) {
if (description)
scmd_printk(KERN_INFO, cmd, "%s\n",
@@ -983,12 +922,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_print_sense("", cmd);
scsi_print_command(cmd);
}
- if (blk_end_request_err(req, error))
- scsi_requeue_command(q, cmd);
- else
- scsi_next_command(cmd);
- break;
+ if (!blk_end_request_err(req, error))
+ goto next_command;
+ /*FALLTHRU*/
case ACTION_REPREP:
+ requeue:
/* Unprep the request and put it back at the head of the queue.
* A new command will be prepared and issued.
*/
@@ -1004,6 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
break;
}
+ return;
+
+next_command:
+ scsi_release_buffers(cmd);
+ scsi_next_command(cmd);
}
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -1128,15 +1071,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd;
- int ret = scsi_prep_state_check(sdev, req);
-
- if (ret != BLKPREP_OK)
- return ret;
-
- cmd = scsi_get_cmd_from_req(sdev, req);
- if (unlikely(!cmd))
- return BLKPREP_DEFER;
+ struct scsi_cmnd *cmd = req->special;
/*
* BLOCK_PC requests may transfer data, in which case they must
@@ -1179,15 +1114,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
*/
int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd;
- int ret = scsi_prep_state_check(sdev, req);
-
- if (ret != BLKPREP_OK)
- return ret;
+ struct scsi_cmnd *cmd = req->special;
if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
&& sdev->scsi_dh_data->scsi_dh->prep_fn)) {
- ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
+ int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
if (ret != BLKPREP_OK)
return ret;
}
@@ -1197,16 +1128,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
*/
BUG_ON(!req->nr_phys_segments);
- cmd = scsi_get_cmd_from_req(sdev, req);
- if (unlikely(!cmd))
- return BLKPREP_DEFER;
-
memset(cmd->cmnd, 0, BLK_MAX_CDB);
return scsi_init_io(cmd, GFP_ATOMIC);
}
EXPORT_SYMBOL(scsi_setup_fs_cmnd);
-int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+static int
+scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
{
int ret = BLKPREP_OK;
@@ -1258,9 +1186,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
}
return ret;
}
-EXPORT_SYMBOL(scsi_prep_state_check);
-int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
+static int
+scsi_prep_return(struct request_queue *q, struct request *req, int ret)
{
struct scsi_device *sdev = q->queuedata;
@@ -1291,18 +1219,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
return ret;
}
-EXPORT_SYMBOL(scsi_prep_return);
-int scsi_prep_fn(struct request_queue *q, struct request *req)
+static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
- int ret = BLKPREP_KILL;
+ struct scsi_cmnd *cmd;
+ int ret;
- if (req->cmd_type == REQ_TYPE_BLOCK_PC)
+ ret = scsi_prep_state_check(sdev, req);
+ if (ret != BLKPREP_OK)
+ goto out;
+
+ cmd = scsi_get_cmd_from_req(sdev, req);
+ if (unlikely(!cmd)) {
+ ret = BLKPREP_DEFER;
+ goto out;
+ }
+
+ if (req->cmd_type == REQ_TYPE_FS)
+ ret = scsi_cmd_to_driver(cmd)->init_command(cmd);
+ else if (req->cmd_type == REQ_TYPE_BLOCK_PC)
ret = scsi_setup_blk_pc_cmnd(sdev, req);
+ else
+ ret = BLKPREP_KILL;
+
+out:
return scsi_prep_return(q, req, ret);
}
-EXPORT_SYMBOL(scsi_prep_fn);
+
+static void scsi_unprep_fn(struct request_queue *q, struct request *req)
+{
+ if (req->cmd_type == REQ_TYPE_FS) {
+ struct scsi_cmnd *cmd = req->special;
+ struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
+
+ if (drv->uninit_command)
+ drv->uninit_command(cmd);
+ }
+}
/*
* scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
@@ -1723,6 +1677,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
return NULL;
blk_queue_prep_rq(q, scsi_prep_fn);
+ blk_queue_unprep_rq(q, scsi_unprep_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
blk_queue_rq_timed_out(q, scsi_times_out);
blk_queue_lld_busy(q, scsi_lld_busy);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index f80908f74ca9..521f5838594b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2549,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work)
fc_flush_devloss(shost);
if (!cancel_delayed_work(&rport->dev_loss_work))
fc_flush_devloss(shost);
+ cancel_work_sync(&rport->scan_work);
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 96af195224f2..6825eda1114a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -109,6 +109,8 @@ static int sd_suspend_system(struct device *);
static int sd_suspend_runtime(struct device *);
static int sd_resume(struct device *);
static void sd_rescan(struct device *);
+static int sd_init_command(struct scsi_cmnd *SCpnt);
+static void sd_uninit_command(struct scsi_cmnd *SCpnt);
static int sd_done(struct scsi_cmnd *);
static int sd_eh_action(struct scsi_cmnd *, int);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
@@ -503,6 +505,8 @@ static struct scsi_driver sd_template = {
.pm = &sd_pm_ops,
},
.rescan = sd_rescan,
+ .init_command = sd_init_command,
+ .uninit_command = sd_uninit_command,
.done = sd_done,
.eh_action = sd_eh_action,
};
@@ -836,9 +840,9 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
return scsi_setup_blk_pc_cmnd(sdp, rq);
}
-static void sd_unprep_fn(struct request_queue *q, struct request *rq)
+static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
- struct scsi_cmnd *SCpnt = rq->special;
+ struct request *rq = SCpnt->request;
if (rq->cmd_flags & REQ_DISCARD)
__free_page(rq->completion_data);
@@ -850,18 +854,10 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
}
}
-/**
- * sd_prep_fn - build a scsi (read or write) command from
- * information in the request structure.
- * @SCpnt: pointer to mid-level's per scsi command structure that
- * contains request and into which the scsi command is written
- *
- * Returns 1 if successful and 0 if error (or cannot be done now).
- **/
-static int sd_prep_fn(struct request_queue *q, struct request *rq)
+static int sd_init_command(struct scsi_cmnd *SCpnt)
{
- struct scsi_cmnd *SCpnt;
- struct scsi_device *sdp = q->queuedata;
+ struct request *rq = SCpnt->request;
+ struct scsi_device *sdp = SCpnt->device;
struct gendisk *disk = rq->rq_disk;
struct scsi_disk *sdkp;
sector_t block = blk_rq_pos(rq);
@@ -883,12 +879,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
} else if (rq->cmd_flags & REQ_FLUSH) {
ret = scsi_setup_flush_cmnd(sdp, rq);
goto out;
- } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
- ret = scsi_setup_blk_pc_cmnd(sdp, rq);
- goto out;
- } else if (rq->cmd_type != REQ_TYPE_FS) {
- ret = BLKPREP_KILL;
- goto out;
}
ret = scsi_setup_fs_cmnd(sdp, rq);
if (ret != BLKPREP_OK)
@@ -900,11 +890,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
* is used for a killable error condition */
ret = BLKPREP_KILL;
- SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
- "sd_prep_fn: block=%llu, "
- "count=%d\n",
- (unsigned long long)block,
- this_count));
+ SCSI_LOG_HLQUEUE(1,
+ scmd_printk(KERN_INFO, SCpnt,
+ "%s: block=%llu, count=%d\n",
+ __func__, (unsigned long long)block, this_count));
if (!sdp || !scsi_device_online(sdp) ||
block + blk_rq_sectors(rq) > get_capacity(disk)) {
@@ -1124,7 +1113,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
*/
ret = BLKPREP_OK;
out:
- return scsi_prep_return(q, rq, ret);
+ return ret;
}
/**
@@ -1686,12 +1675,12 @@ static int sd_done(struct scsi_cmnd *SCpnt)
sshdr.ascq));
}
#endif
+ sdkp->medium_access_timed_out = 0;
+
if (driver_byte(result) != DRIVER_SENSE &&
(!sense_valid || sense_deferred))
goto out;
- sdkp->medium_access_timed_out = 0;
-
switch (sshdr.sense_key) {
case HARDWARE_ERROR:
case MEDIUM_ERROR:
@@ -2452,7 +2441,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
- if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ if (sdp->broken_fua) {
+ sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
+ sdkp->DPOFUA = 0;
+ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
@@ -2875,9 +2867,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_revalidate_disk(gd);
- blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
- blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn);
-
gd->driverfs_dev = &sdp->sdev_gendev;
gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable) {
@@ -3025,8 +3014,6 @@ static int sd_remove(struct device *dev)
async_synchronize_full_domain(&scsi_sd_pm_domain);
async_synchronize_full_domain(&scsi_sd_probe_domain);
- blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
- blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
device_del(&sdkp->dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index df5e961484e1..53268aaba559 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1653,10 +1653,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
if (!rq)
return -ENOMEM;
+ blk_rq_set_block_pc(rq);
memcpy(rq->cmd, cmd, hp->cmd_len);
-
rq->cmd_len = hp->cmd_len;
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
srp->rq = rq;
rq->end_io_data = srp;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 40d85929aefe..93cbd36c990b 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -79,6 +79,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
static DEFINE_MUTEX(sr_mutex);
static int sr_probe(struct device *);
static int sr_remove(struct device *);
+static int sr_init_command(struct scsi_cmnd *SCpnt);
static int sr_done(struct scsi_cmnd *);
static int sr_runtime_suspend(struct device *dev);
@@ -94,6 +95,7 @@ static struct scsi_driver sr_template = {
.remove = sr_remove,
.pm = &sr_pm_ops,
},
+ .init_command = sr_init_command,
.done = sr_done,
};
@@ -378,21 +380,14 @@ static int sr_done(struct scsi_cmnd *SCpnt)
return good_bytes;
}
-static int sr_prep_fn(struct request_queue *q, struct request *rq)
+static int sr_init_command(struct scsi_cmnd *SCpnt)
{
int block = 0, this_count, s_size;
struct scsi_cd *cd;
- struct scsi_cmnd *SCpnt;
- struct scsi_device *sdp = q->queuedata;
+ struct request *rq = SCpnt->request;
+ struct scsi_device *sdp = SCpnt->device;
int ret;
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
- ret = scsi_setup_blk_pc_cmnd(sdp, rq);
- goto out;
- } else if (rq->cmd_type != REQ_TYPE_FS) {
- ret = BLKPREP_KILL;
- goto out;
- }
ret = scsi_setup_fs_cmnd(sdp, rq);
if (ret != BLKPREP_OK)
goto out;
@@ -517,7 +512,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
*/
ret = BLKPREP_OK;
out:
- return scsi_prep_return(q, rq, ret);
+ return ret;
}
static int sr_block_open(struct block_device *bdev, fmode_t mode)
@@ -718,7 +713,6 @@ static int sr_probe(struct device *dev)
/* FIXME: need to handle a get_capabilities failure properly ?? */
get_capabilities(cd);
- blk_queue_prep_rq(sdev->request_queue, sr_prep_fn);
sr_vendor_init(cd);
disk->driverfs_dev = &sdev->sdev_gendev;
@@ -993,7 +987,6 @@ static int sr_remove(struct device *dev)
scsi_autopm_get_device(cd->device);
- blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
del_gendisk(cd->disk);
mutex_lock(&sr_ref_mutex);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index afc834e172c6..14eb4b256a03 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -484,7 +484,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
if (!req)
return DRIVER_ERROR << 24;
- req->cmd_type = REQ_TYPE_BLOCK_PC;
+ blk_rq_set_block_pc(req);
req->cmd_flags |= REQ_QUIET;
mdata->null_mapped = 1;
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 636bbe0ea84c..88220794cc98 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -364,7 +364,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
return( 0 );
if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) {
- TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n",
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
H_NO(cmd), cmd->device->id, cmd->device->lun );
return( 1 );
}
@@ -388,7 +388,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
!setup_use_tagged_queuing || !cmd->device->tagged_supported) {
cmd->tag = TAG_NONE;
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
- TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
"command\n", H_NO(cmd), cmd->device->id, cmd->device->lun );
}
else {
@@ -397,7 +397,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS );
set_bit( cmd->tag, &ta->allocated );
ta->nr_allocated++;
- TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d "
+ dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
"(now %d tags in use)\n",
H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun,
ta->nr_allocated );
@@ -415,7 +415,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)
if (cmd->tag == TAG_NONE) {
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
- TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n",
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
H_NO(cmd), cmd->device->id, cmd->device->lun );
}
else if (cmd->tag >= MAX_TAGS) {
@@ -426,7 +426,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
clear_bit( cmd->tag, &ta->allocated );
ta->nr_allocated--;
- TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n",
+ dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun );
}
}
@@ -484,7 +484,7 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
#include <linux/delay.h>
-#if 1
+#if NDEBUG
static struct {
unsigned char mask;
const char * name;}
@@ -572,12 +572,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
}
}
-#else /* !NDEBUG */
-
-/* dummies... */
-__inline__ void NCR5380_print(struct Scsi_Host *instance) { };
-__inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { };
-
#endif
/*
@@ -618,7 +612,7 @@ static inline void NCR5380_all_init (void)
{
static int done = 0;
if (!done) {
- INI_PRINTK("scsi : NCR5380_all_init()\n");
+ dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
done = 1;
}
}
@@ -681,8 +675,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
Scsi_Cmnd *ptr;
unsigned long flags;
- NCR_PRINT(NDEBUG_ANY);
- NCR_PRINT_PHASE(NDEBUG_ANY);
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ NCR5380_dprint_phase(NDEBUG_ANY, instance);
hostdata = (struct NCR5380_hostdata *)instance->hostdata;
@@ -928,7 +922,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
local_irq_restore(flags);
- QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd),
+ dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
(cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
/* If queue_command() is called from an interrupt (real one or bottom
@@ -998,7 +992,7 @@ static void NCR5380_main (struct work_struct *bl)
done = 1;
if (!hostdata->connected) {
- MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO );
+ dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO );
/*
* Search through the issue_queue for a command destined
* for a target that's not busy.
@@ -1012,12 +1006,8 @@ static void NCR5380_main (struct work_struct *bl)
for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,
prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) {
-#if (NDEBUG & NDEBUG_LISTS)
if (prev != tmp)
- printk("MAIN tmp=%p target=%d busy=%d lun=%d\n",
- tmp, tmp->target, hostdata->busy[tmp->target],
- tmp->lun);
-#endif
+ dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
/* When we find one, remove it from the issue queue. */
/* ++guenther: possible race with Falcon locking */
if (
@@ -1047,9 +1037,9 @@ static void NCR5380_main (struct work_struct *bl)
* On failure, we must add the command back to the
* issue queue so we can keep trying.
*/
- MAIN_PRINTK("scsi%d: main(): command for target %d "
+ dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
"lun %d removed from issue_queue\n",
- HOSTNO, tmp->target, tmp->lun);
+ HOSTNO, tmp->device->id, tmp->device->lun);
/*
* REQUEST SENSE commands are issued without tagged
* queueing, even on SCSI-II devices because the
@@ -1076,7 +1066,7 @@ static void NCR5380_main (struct work_struct *bl)
cmd_free_tag( tmp );
#endif
local_irq_restore(flags);
- MAIN_PRINTK("scsi%d: main(): select() failed, "
+ dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
"returned to issue_queue\n", HOSTNO);
if (hostdata->connected)
break;
@@ -1090,10 +1080,10 @@ static void NCR5380_main (struct work_struct *bl)
#endif
) {
local_irq_restore(flags);
- MAIN_PRINTK("scsi%d: main: performing information transfer\n",
+ dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
HOSTNO);
NCR5380_information_transfer(instance);
- MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO);
+ dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
done = 0;
}
} while (!done);
@@ -1130,7 +1120,7 @@ static void NCR5380_dma_complete( struct Scsi_Host *instance )
return;
}
- DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
+ dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
NCR5380_read(STATUS_REG));
@@ -1189,27 +1179,27 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
int done = 1, handled = 0;
unsigned char basr;
- INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
/* Look for pending interrupts */
basr = NCR5380_read(BUS_AND_STATUS_REG);
- INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr);
+ dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
/* dispatch to appropriate routine if found and done=0 */
if (basr & BASR_IRQ) {
- NCR_PRINT(NDEBUG_INTR);
+ NCR5380_dprint(NDEBUG_INTR, instance);
if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
done = 0;
// ENABLE_IRQ();
- INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
NCR5380_reselect(instance);
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
}
else if (basr & BASR_PARITY_ERROR) {
- INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
}
else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
- INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
}
else {
@@ -1229,7 +1219,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
((basr & BASR_END_DMA_TRANSFER) ||
!(basr & BASR_PHASE_MATCH))) {
- INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
NCR5380_dma_complete( instance );
done = 0;
// ENABLE_IRQ();
@@ -1238,7 +1228,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
{
/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
if (basr & BASR_PHASE_MATCH)
- INT_PRINTK("scsi%d: unknown interrupt, "
+ dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "
"BASR 0x%x, MR 0x%x, SR 0x%x\n",
HOSTNO, basr, NCR5380_read(MODE_REG),
NCR5380_read(STATUS_REG));
@@ -1262,7 +1252,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
}
if (!done) {
- INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO);
+ dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
/* Put a call to NCR5380_main() on the queue... */
queue_main();
}
@@ -1338,8 +1328,8 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
unsigned long flags;
hostdata->restart_select = 0;
- NCR_PRINT(NDEBUG_ARBITRATION);
- ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO,
+ NCR5380_dprint(NDEBUG_ARBITRATION, instance);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
instance->this_id);
/*
@@ -1385,7 +1375,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
&& !hostdata->connected);
#endif
- ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
if (hostdata->connected) {
NCR5380_write(MODE_REG, MR_BASE);
@@ -1406,7 +1396,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
hostdata->connected) {
NCR5380_write(MODE_REG, MR_BASE);
- ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
HOSTNO);
return -1;
}
@@ -1421,7 +1411,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
hostdata->connected) {
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
HOSTNO);
return -1;
}
@@ -1444,7 +1434,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
return -1;
}
- ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
/*
* Now that we have won arbitration, start Selection process, asserting
@@ -1504,7 +1494,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
udelay(1);
- SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
+ dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
/*
* The SCSI specification calls for a 250 ms timeout for the actual
@@ -1559,7 +1549,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
if (hostdata->restart_select)
printk(KERN_NOTICE "\trestart select\n");
- NCR_PRINT(NDEBUG_ANY);
+ NCR5380_dprint(NDEBUG_ANY, instance);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return -1;
}
@@ -1572,7 +1562,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
#endif
cmd->scsi_done(cmd);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO);
+ dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return 0;
}
@@ -1597,7 +1587,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
/* Wait for start of REQ/ACK handshake */
while (!(NCR5380_read(STATUS_REG) & SR_REQ));
- SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
+ dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
HOSTNO, cmd->device->id);
tmp[0] = IDENTIFY(1, cmd->device->lun);
@@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
data = tmp;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &data);
- SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO);
+ dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
/* XXX need to handle errors here */
hostdata->connected = cmd;
#ifndef SUPPORT_TAGS
@@ -1680,12 +1670,12 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
*/
while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
- HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO);
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
/* Check for phase mismatch */
if ((tmp & PHASE_MASK) != p) {
- PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO);
- NCR_PRINT_PHASE(NDEBUG_PIO);
+ dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
+ NCR5380_dprint_phase(NDEBUG_PIO, instance);
break;
}
@@ -1708,24 +1698,24 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
if (!((p & SR_MSG) && c > 1)) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
ICR_ASSERT_DATA);
- NCR_PRINT(NDEBUG_PIO);
+ NCR5380_dprint(NDEBUG_PIO, instance);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
ICR_ASSERT_DATA | ICR_ASSERT_ACK);
} else {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
ICR_ASSERT_DATA | ICR_ASSERT_ATN);
- NCR_PRINT(NDEBUG_PIO);
+ NCR5380_dprint(NDEBUG_PIO, instance);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
}
} else {
- NCR_PRINT(NDEBUG_PIO);
+ NCR5380_dprint(NDEBUG_PIO, instance);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
}
while (NCR5380_read(STATUS_REG) & SR_REQ);
- HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO);
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
/*
* We have several special cases to consider during REQ/ACK handshaking :
@@ -1746,7 +1736,7 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
}
} while (--c);
- PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c);
+ dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
*count = c;
*data = d;
@@ -1854,7 +1844,7 @@ static int NCR5380_transfer_dma( struct Scsi_Host *instance,
}
hostdata->dma_len = c;
- DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n",
+ dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
HOSTNO, (p & SR_IO) ? "reading" : "writing",
c, (p & SR_IO) ? "to" : "from", *data);
@@ -1931,7 +1921,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
phase = (tmp & PHASE_MASK);
if (phase != old_phase) {
old_phase = phase;
- NCR_PRINT_PHASE(NDEBUG_INFORMATION);
+ NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
}
if(phase == PHASE_CMDOUT) {
@@ -1996,7 +1986,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
cmd->SCp.ptr = SGADDR(cmd->SCp.buffer);
- INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
+ dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
HOSTNO, cmd->SCp.this_residual,
cmd->SCp.buffers_residual);
}
@@ -2088,7 +2078,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- LNK_PRINTK("scsi%d: target %d lun %d linked command "
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
"complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
/* Enable reselect interrupts */
@@ -2113,7 +2103,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
* and don't free it! */
cmd->next_link->tag = cmd->tag;
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- LNK_PRINTK("scsi%d: target %d lun %d linked request "
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
"done, calling scsi_done().\n",
HOSTNO, cmd->device->id, cmd->device->lun);
#ifdef NCR5380_STATS
@@ -2128,7 +2118,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
hostdata->connected = NULL;
- QU_PRINTK("scsi%d: command for target %d, lun %d "
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
"completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
#ifdef SUPPORT_TAGS
cmd_free_tag( cmd );
@@ -2142,7 +2132,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
/* ++Andreas: the mid level code knows about
QUEUE_FULL now. */
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
- TAG_PRINTK("scsi%d: target %d lun %d returned "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
"QUEUE_FULL after %d commands\n",
HOSTNO, cmd->device->id, cmd->device->lun,
ta->nr_allocated);
@@ -2186,7 +2176,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
if ((cmd->cmnd[0] != REQUEST_SENSE) &&
(status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
- ASEN_PRINTK("scsi%d: performing request sense\n",
+ dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n",
HOSTNO);
/* this is initialized from initialize_SCp
cmd->SCp.buffer = NULL;
@@ -2198,7 +2188,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
SET_NEXT(cmd, hostdata->issue_queue);
hostdata->issue_queue = (struct scsi_cmnd *) cmd;
local_irq_restore(flags);
- QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
+ dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
"issue queue\n", H_NO(cmd));
} else
#endif /* def AUTOSENSE */
@@ -2238,7 +2228,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
cmd->device->tagged_supported = 0;
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
cmd->tag = TAG_NONE;
- TAG_PRINTK("scsi%d: target %d lun %d rejected "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
"QUEUE_TAG message; tagged queuing "
"disabled\n",
HOSTNO, cmd->device->id, cmd->device->lun);
@@ -2255,7 +2245,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->disconnected_queue = cmd;
local_irq_restore(flags);
- QU_PRINTK("scsi%d: command for target %d lun %d was "
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
"moved from connected to the "
"disconnected_queue\n", HOSTNO,
cmd->device->id, cmd->device->lun);
@@ -2308,13 +2298,13 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
/* Accept first byte by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO);
+ dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
len = 2;
data = extended_msg + 1;
phase = PHASE_MSGIN;
NCR5380_transfer_pio(instance, &phase, &len, &data);
- EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO,
+ dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
(int)extended_msg[1], (int)extended_msg[2]);
if (!len && extended_msg[1] <=
@@ -2326,7 +2316,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
phase = PHASE_MSGIN;
NCR5380_transfer_pio(instance, &phase, &len, &data);
- EXT_PRINTK("scsi%d: message received, residual %d\n",
+ dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
HOSTNO, len);
switch (extended_msg[2]) {
@@ -2416,7 +2406,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
break;
default:
printk("scsi%d: unknown phase\n", HOSTNO);
- NCR_PRINT(NDEBUG_ANY);
+ NCR5380_dprint(NDEBUG_ANY, instance);
} /* switch(phase) */
} /* if (tmp * SR_REQ) */
} /* while (1) */
@@ -2458,7 +2448,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
- RSL_PRINTK("scsi%d: reselect\n", HOSTNO);
+ dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
/*
* At this point, we have detected that our SCSI ID is on the bus,
@@ -2580,14 +2570,14 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
msg[1] == SIMPLE_QUEUE_TAG)
tag = msg[2];
- TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at "
+ dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
"reselection\n", HOSTNO, target_mask, lun, tag);
}
#endif
hostdata->connected = tmp;
- RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
- HOSTNO, tmp->target, tmp->lun, tmp->tag);
+ dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
+ HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
}
@@ -2622,7 +2612,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
local_irq_save(flags);
- ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
+ dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
NCR5380_read(BUS_AND_STATUS_REG),
NCR5380_read(STATUS_REG));
@@ -2635,7 +2625,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (hostdata->connected == cmd) {
- ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO);
+ dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
/*
* We should perform BSY checking, and make sure we haven't slipped
* into BUS FREE.
@@ -2664,11 +2654,11 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
#endif
local_irq_restore(flags);
cmd->scsi_done(cmd);
- return SCSI_ABORT_SUCCESS;
+ return SUCCESS;
} else {
/* local_irq_restore(flags); */
printk("scsi%d: abort of connected command failed!\n", HOSTNO);
- return SCSI_ABORT_ERROR;
+ return FAILED;
}
}
#endif
@@ -2686,12 +2676,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
SET_NEXT(tmp, NULL);
tmp->result = DID_ABORT << 16;
local_irq_restore(flags);
- ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
+ dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
HOSTNO);
/* Tagged queuing note: no tag to free here, hasn't been assigned
* yet... */
tmp->scsi_done(tmp);
- return SCSI_ABORT_SUCCESS;
+ return SUCCESS;
}
/*
@@ -2707,8 +2697,8 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (hostdata->connected) {
local_irq_restore(flags);
- ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO);
- return SCSI_ABORT_SNOOZE;
+ dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
+ return FAILED;
}
/*
@@ -2740,12 +2730,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
tmp = NEXT(tmp))
if (cmd == tmp) {
local_irq_restore(flags);
- ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO);
+ dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
if (NCR5380_select (instance, cmd, (int) cmd->tag))
- return SCSI_ABORT_BUSY;
+ return FAILED;
- ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO);
+ dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
do_abort (instance);
@@ -2769,7 +2759,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
#endif
local_irq_restore(flags);
tmp->scsi_done(tmp);
- return SCSI_ABORT_SUCCESS;
+ return SUCCESS;
}
}
@@ -2786,7 +2776,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
local_irq_restore(flags);
printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
- return SCSI_ABORT_NOT_RUNNING;
+ return FAILED;
}
@@ -2795,7 +2785,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
*
* Purpose : reset the SCSI bus.
*
- * Returns : SCSI_RESET_WAKEUP
+ * Returns : SUCCESS or FAILURE
*
*/
@@ -2804,7 +2794,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
SETUP_HOSTDATA(cmd->device->host);
int i;
unsigned long flags;
-#if 1
+#if defined(RESET_RUN_DONE)
struct scsi_cmnd *connected, *disconnected_queue;
#endif
@@ -2826,8 +2816,15 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
* through anymore ... */
(void)NCR5380_read( RESET_PARITY_INTERRUPT_REG );
-#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */
- /* XXX see below XXX */
+ /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
+ * should go.
+ * Catch-22: if we don't clear all queues, the SCSI driver lock will
+ * not be released by atari_scsi_reset()!
+ */
+
+#if defined(RESET_RUN_DONE)
+ /* XXX Should now be done by midlevel code, but it's broken XXX */
+ /* XXX see below XXX */
/* MSch: old-style reset: actually abort all command processing here */
@@ -2857,7 +2854,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
*/
if ((cmd = connected)) {
- ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
cmd->scsi_done( cmd );
}
@@ -2869,14 +2866,14 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
cmd->scsi_done( cmd );
}
if (i > 0)
- ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i);
+ dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
/* since all commands have been explicitly terminated, we need to tell
* the midlevel code that the reset was SUCCESSFUL, and there is no
* need to 'wake up' the commands by a request_sense
*/
- return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
+ return SUCCESS;
#else /* 1 */
/* MSch: new-style reset handling: let the mid-level do what it can */
@@ -2903,11 +2900,11 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
*/
if (hostdata->issue_queue)
- ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
if (hostdata->connected)
- ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
if (hostdata->disconnected_queue)
- ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
local_irq_save(flags);
hostdata->issue_queue = NULL;
@@ -2924,7 +2921,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
local_irq_restore(flags);
/* we did no complete reset of all commands, so a wakeup is required */
- return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET;
+ return SUCCESS;
#endif /* 1 */
}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index e2c009b033ce..9707b7494a89 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -3,6 +3,10 @@
*
* Sun3 DMA routines added by Sam Creasey (sammy@sammy.net)
*
+ * VME support added by Sam Creasey
+ *
+ * TODO: modify this driver to support multiple Sun3 SCSI VME boards
+ *
* Adapted from mac_scsinew.c:
*/
/*
@@ -45,10 +49,6 @@
* USLEEP - enable support for devices that don't disconnect. Untested.
*/
-/*
- * $Log: sun3_NCR5380.c,v $
- */
-
#define AUTOSENSE
#include <linux/types.h>
@@ -69,23 +69,15 @@
#include <asm/idprom.h>
#include <asm/machines.h>
-#define NDEBUG 0
-
-#define NDEBUG_ABORT 0x00100000
-#define NDEBUG_TAGS 0x00200000
-#define NDEBUG_MERGING 0x00400000
-
/* dma on! */
#define REAL_DMA
#include "scsi.h"
-#include "initio.h"
#include <scsi/scsi_host.h>
#include "sun3_scsi.h"
+#include "NCR5380.h"
-static void NCR5380_print(struct Scsi_Host *instance);
-
-/* #define OLDDMA */
+extern int sun3_map_test(unsigned long, char *);
#define USE_WRAPPER
/*#define RESET_BOOT */
@@ -101,7 +93,11 @@ static void NCR5380_print(struct Scsi_Host *instance);
/* #define SUPPORT_TAGS */
+#ifdef SUN3_SCSI_VME
+#define ENABLE_IRQ()
+#else
#define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI );
+#endif
static irqreturn_t scsi_sun3_intr(int irq, void *dummy);
@@ -123,6 +119,8 @@ module_param(setup_hostid, int, 0);
static struct scsi_cmnd *sun3_dma_setup_done = NULL;
+#define RESET_RUN_DONE
+
#define AFTER_RESET_DELAY (HZ/2)
/* ms to wait after hitting dma regs */
@@ -136,10 +134,9 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL;
static volatile unsigned char *sun3_scsi_regp;
static volatile struct sun3_dma_regs *dregs;
-#ifdef OLDDMA
-static unsigned char *dmabuf = NULL; /* dma memory buffer */
-#endif
+#ifndef SUN3_SCSI_VME
static struct sun3_udc_regs *udc_regs = NULL;
+#endif
static unsigned char *sun3_dma_orig_addr = NULL;
static unsigned long sun3_dma_orig_count = 0;
static int sun3_dma_active = 0;
@@ -159,6 +156,7 @@ static inline void sun3scsi_write(int reg, int value)
sun3_scsi_regp[reg] = value;
}
+#ifndef SUN3_SCSI_VME
/* dma controller register access functions */
static inline unsigned short sun3_udc_read(unsigned char reg)
@@ -180,6 +178,7 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg)
dregs->udc_data = val;
udelay(SUN3_DMA_DELAY);
}
+#endif
/*
* XXX: status debug
@@ -198,17 +197,32 @@ static struct Scsi_Host *default_instance;
*
*/
-int __init sun3scsi_detect(struct scsi_host_template * tpnt)
+static int __init sun3scsi_detect(struct scsi_host_template *tpnt)
{
- unsigned long ioaddr;
+ unsigned long ioaddr, irq;
static int called = 0;
struct Scsi_Host *instance;
+#ifdef SUN3_SCSI_VME
+ int i;
+ unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI,
+ IOBASE_SUN3_VMESCSI + 0x4000,
+ 0 };
+ unsigned long vecs[3] = { SUN3_VEC_VMESCSI0,
+ SUN3_VEC_VMESCSI1,
+ 0 };
+#endif
/* check that this machine has an onboard 5380 */
switch(idprom->id_machtype) {
+#ifdef SUN3_SCSI_VME
+ case SM_SUN3|SM_3_160:
+ case SM_SUN3|SM_3_260:
+ break;
+#else
case SM_SUN3|SM_3_50:
case SM_SUN3|SM_3_60:
break;
+#endif
default:
return 0;
@@ -217,7 +231,11 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
if(called)
return 0;
+#ifdef SUN3_SCSI_VME
+ tpnt->proc_name = "Sun3 5380 VME SCSI";
+#else
tpnt->proc_name = "Sun3 5380 SCSI";
+#endif
/* setup variables */
tpnt->can_queue =
@@ -234,6 +252,38 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
tpnt->this_id = 7;
}
+#ifdef SUN3_SCSI_VME
+ ioaddr = 0;
+ for (i = 0; addrs[i] != 0; i++) {
+ unsigned char x;
+
+ ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE,
+ SUN3_PAGE_TYPE_VME16);
+ irq = vecs[i];
+ sun3_scsi_regp = (unsigned char *)ioaddr;
+
+ dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
+
+ if (sun3_map_test((unsigned long)dregs, &x)) {
+ unsigned short oldcsr;
+
+ oldcsr = dregs->csr;
+ dregs->csr = 0;
+ udelay(SUN3_DMA_DELAY);
+ if (dregs->csr == 0x1400)
+ break;
+
+ dregs->csr = oldcsr;
+ }
+
+ iounmap((void *)ioaddr);
+ ioaddr = 0;
+ }
+
+ if (!ioaddr)
+ return 0;
+#else
+ irq = IRQ_SUN3_SCSI;
ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE);
sun3_scsi_regp = (unsigned char *)ioaddr;
@@ -244,11 +294,6 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
printk("SUN3 Scsi couldn't allocate DVMA memory!\n");
return 0;
}
-#ifdef OLDDMA
- if((dmabuf = dvma_malloc_align(SUN3_DVMA_BUFSIZE, 0x10000)) == NULL) {
- printk("SUN3 Scsi couldn't allocate DVMA memory!\n");
- return 0;
- }
#endif
#ifdef SUPPORT_TAGS
if (setup_use_tagged_queuing < 0)
@@ -262,7 +307,7 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
default_instance = instance;
instance->io_port = (unsigned long) ioaddr;
- instance->irq = IRQ_SUN3_SCSI;
+ instance->irq = irq;
NCR5380_init(instance, 0);
@@ -283,7 +328,8 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
#endif
}
- printk("scsi%d: Sun3 5380 at port %lX irq", instance->host_no, instance->io_port);
+ pr_info("scsi%d: %s at port %lX irq", instance->host_no,
+ tpnt->proc_name, instance->io_port);
if (instance->irq == SCSI_IRQ_NONE)
printk ("s disabled");
else
@@ -300,6 +346,15 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
udelay(SUN3_DMA_DELAY);
dregs->fifo_count = 0;
+#ifdef SUN3_SCSI_VME
+ dregs->fifo_count_hi = 0;
+ dregs->dma_addr_hi = 0;
+ dregs->dma_addr_lo = 0;
+ dregs->dma_count_hi = 0;
+ dregs->dma_count_lo = 0;
+
+ dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
+#endif
called = 1;
@@ -367,7 +422,8 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
}
#endif
-const char * sun3scsi_info (struct Scsi_Host *spnt) {
+static const char *sun3scsi_info(struct Scsi_Host *spnt)
+{
return "";
}
@@ -379,6 +435,10 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy)
unsigned short csr = dregs->csr;
int handled = 0;
+#ifdef SUN3_SCSI_VME
+ dregs->csr &= ~CSR_DMA_ENABLE;
+#endif
+
if(csr & ~CSR_GOOD) {
if(csr & CSR_DMA_BUSERR) {
printk("scsi%d: bus error in dma\n", default_instance->host_no);
@@ -422,31 +482,28 @@ void sun3_sun3_debug (void)
/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag)
{
-#ifdef OLDDMA
- if(write_flag)
- memcpy(dmabuf, data, count);
- else {
- sun3_dma_orig_addr = data;
- sun3_dma_orig_count = count;
- }
-#else
void *addr;
if(sun3_dma_orig_addr != NULL)
dvma_unmap(sun3_dma_orig_addr);
-// addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf);
+#ifdef SUN3_SCSI_VME
+ addr = (void *)dvma_map_vme((unsigned long) data, count);
+#else
addr = (void *)dvma_map((unsigned long) data, count);
+#endif
sun3_dma_orig_addr = addr;
sun3_dma_orig_count = count;
-#endif
+
+#ifndef SUN3_SCSI_VME
dregs->fifo_count = 0;
sun3_udc_write(UDC_RESET, UDC_CSR);
/* reset fifo */
dregs->csr &= ~CSR_FIFO;
dregs->csr |= CSR_FIFO;
+#endif
/* set direction */
if(write_flag)
@@ -454,6 +511,17 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri
else
dregs->csr &= ~CSR_SEND;
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_PACK_ENABLE;
+
+ dregs->dma_addr_hi = ((unsigned long)addr >> 16);
+ dregs->dma_addr_lo = ((unsigned long)addr & 0xffff);
+
+ dregs->dma_count_hi = 0;
+ dregs->dma_count_lo = 0;
+ dregs->fifo_count_hi = 0;
+ dregs->fifo_count = 0;
+#else
/* byte count for fifo */
dregs->fifo_count = count;
@@ -467,17 +535,12 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri
printk("scsi%d: fifo_mismatch %04x not %04x\n",
default_instance->host_no, dregs->fifo_count,
(unsigned int) count);
- NCR5380_print(default_instance);
+ NCR5380_dprint(NDEBUG_DMA, default_instance);
}
/* setup udc */
-#ifdef OLDDMA
- udc_regs->addr_hi = ((dvma_vtob(dmabuf) & 0xff0000) >> 8);
- udc_regs->addr_lo = (dvma_vtob(dmabuf) & 0xffff);
-#else
udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8);
udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff);
-#endif
udc_regs->count = count/2; /* count in words */
udc_regs->mode_hi = UDC_MODE_HIWORD;
if(write_flag) {
@@ -501,11 +564,13 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri
/* interrupt enable */
sun3_udc_write(UDC_INT_ENABLE, UDC_CSR);
+#endif
return count;
}
+#ifndef SUN3_SCSI_VME
static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance)
{
unsigned short resid;
@@ -518,6 +583,7 @@ static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance)
return (unsigned long) resid;
}
+#endif
static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
{
@@ -536,8 +602,23 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
{
+#ifdef SUN3_SCSI_VME
+ unsigned short csr;
+
+ csr = dregs->csr;
+ dregs->dma_count_hi = (sun3_dma_orig_count >> 16);
+ dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff);
+
+ dregs->fifo_count_hi = (sun3_dma_orig_count >> 16);
+ dregs->fifo_count = (sun3_dma_orig_count & 0xffff);
+
+/* if(!(csr & CSR_DMA_ENABLE))
+ * dregs->csr |= CSR_DMA_ENABLE;
+ */
+#else
sun3_udc_write(UDC_CHN_START, UDC_CSR);
+#endif
return 0;
}
@@ -545,12 +626,46 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
/* clean up after our dma is done */
static int sun3scsi_dma_finish(int write_flag)
{
- unsigned short count;
+ unsigned short __maybe_unused count;
unsigned short fifo;
int ret = 0;
sun3_dma_active = 0;
-#if 1
+
+#ifdef SUN3_SCSI_VME
+ dregs->csr &= ~CSR_DMA_ENABLE;
+
+ fifo = dregs->fifo_count;
+ if (write_flag) {
+ if ((fifo > 0) && (fifo < sun3_dma_orig_count))
+ fifo++;
+ }
+
+ last_residual = fifo;
+ /* empty bytes from the fifo which didn't make it */
+ if ((!write_flag) && (dregs->csr & CSR_LEFT)) {
+ unsigned char *vaddr;
+
+ vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr);
+
+ vaddr += (sun3_dma_orig_count - fifo);
+ vaddr--;
+
+ switch (dregs->csr & CSR_LEFT) {
+ case CSR_LEFT_3:
+ *vaddr = (dregs->bpack_lo & 0xff00) >> 8;
+ vaddr--;
+
+ case CSR_LEFT_2:
+ *vaddr = (dregs->bpack_hi & 0x00ff);
+ vaddr--;
+
+ case CSR_LEFT_1:
+ *vaddr = (dregs->bpack_hi & 0xff00) >> 8;
+ break;
+ }
+ }
+#else
// check to empty the fifo on a read
if(!write_flag) {
int tmo = 20000; /* .2 sec */
@@ -566,28 +681,8 @@ static int sun3scsi_dma_finish(int write_flag)
udelay(10);
}
}
-
-#endif
count = sun3scsi_dma_count(default_instance);
-#ifdef OLDDMA
-
- /* if we've finished a read, copy out the data we read */
- if(sun3_dma_orig_addr) {
- /* check for residual bytes after dma end */
- if(count && (NCR5380_read(BUS_AND_STATUS_REG) &
- (BASR_PHASE_MATCH | BASR_ACK))) {
- printk("scsi%d: sun3_scsi_finish: read overrun baby... ", default_instance->host_no);
- printk("basr now %02x\n", NCR5380_read(BUS_AND_STATUS_REG));
- ret = count;
- }
-
- /* copy in what we dma'd no matter what */
- memcpy(sun3_dma_orig_addr, dmabuf, sun3_dma_orig_count);
- sun3_dma_orig_addr = NULL;
-
- }
-#else
fifo = dregs->fifo_count;
last_residual = fifo;
@@ -605,10 +700,23 @@ static int sun3scsi_dma_finish(int write_flag)
vaddr[-2] = (data & 0xff00) >> 8;
vaddr[-1] = (data & 0xff);
}
+#endif
dvma_unmap(sun3_dma_orig_addr);
sun3_dma_orig_addr = NULL;
-#endif
+
+#ifdef SUN3_SCSI_VME
+ dregs->dma_addr_hi = 0;
+ dregs->dma_addr_lo = 0;
+ dregs->dma_count_hi = 0;
+ dregs->dma_count_lo = 0;
+
+ dregs->fifo_count = 0;
+ dregs->fifo_count_hi = 0;
+
+ dregs->csr &= ~CSR_SEND;
+/* dregs->csr |= CSR_DMA_ENABLE; */
+#else
sun3_udc_write(UDC_RESET, UDC_CSR);
dregs->fifo_count = 0;
dregs->csr &= ~CSR_SEND;
@@ -616,6 +724,7 @@ static int sun3scsi_dma_finish(int write_flag)
/* reset fifo */
dregs->csr &= ~CSR_FIFO;
dregs->csr |= CSR_FIFO;
+#endif
sun3_dma_setup_done = NULL;
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
index a8da9c710fea..e96a37cf06ac 100644
--- a/drivers/scsi/sun3_scsi.h
+++ b/drivers/scsi/sun3_scsi.h
@@ -29,12 +29,8 @@
* 1+ (800) 334-5454
*/
-/*
- * $Log: cumana_NCR5380.h,v $
- */
-
-#ifndef SUN3_NCR5380_H
-#define SUN3_NCR5380_H
+#ifndef SUN3_SCSI_H
+#define SUN3_SCSI_H
#define SUN3SCSI_PUBLIC_RELEASE 1
@@ -82,8 +78,6 @@ static int sun3scsi_release (struct Scsi_Host *);
#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI"
#endif
-#ifndef HOSTS_C
-
#define NCR5380_implementation_fields \
int port, ctrl
@@ -108,9 +102,6 @@ static int sun3scsi_release (struct Scsi_Host *);
#define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0)
#define NCR5380_dma_residual sun3scsi_dma_residual
-#define BOARD_NORMAL 0
-#define BOARD_NCR53C400 1
-
/* additional registers - mainly DMA control regs */
/* these start at regbase + 8 -- directly after the NCR regs */
struct sun3_dma_regs {
@@ -191,189 +182,5 @@ struct sun3_udc_regs {
#define VME_DATA24 0x3d00
-// debugging printk's, taken from atari_scsi.h
-/* Debugging printk definitions:
- *
- * ARB -> arbitration
- * ASEN -> auto-sense
- * DMA -> DMA
- * HSH -> PIO handshake
- * INF -> information transfer
- * INI -> initialization
- * INT -> interrupt
- * LNK -> linked commands
- * MAIN -> NCR5380_main() control flow
- * NDAT -> no data-out phase
- * NWR -> no write commands
- * PIO -> PIO transfers
- * PDMA -> pseudo DMA (unused on Atari)
- * QU -> queues
- * RSL -> reselections
- * SEL -> selections
- * USL -> usleep cpde (unused on Atari)
- * LBS -> last byte sent (unused on Atari)
- * RSS -> restarting of selections
- * EXT -> extended messages
- * ABRT -> aborting and resetting
- * TAG -> queue tag handling
- * MER -> merging of consec. buffers
- *
- */
-
-#include "NCR5380.h"
-
-#if NDEBUG & NDEBUG_ARBITRATION
-#define ARB_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define ARB_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_AUTOSENSE
-#define ASEN_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define ASEN_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_DMA
-#define DMA_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define DMA_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_HANDSHAKE
-#define HSH_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define HSH_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_INFORMATION
-#define INF_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define INF_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_INIT
-#define INI_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define INI_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_INTR
-#define INT_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define INT_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_LINKED
-#define LNK_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define LNK_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_MAIN
-#define MAIN_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define MAIN_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_NO_DATAOUT
-#define NDAT_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define NDAT_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_NO_WRITE
-#define NWR_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define NWR_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_PIO
-#define PIO_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define PIO_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_PSEUDO_DMA
-#define PDMA_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define PDMA_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_QUEUES
-#define QU_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define QU_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_RESELECTION
-#define RSL_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define RSL_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_SELECTION
-#define SEL_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define SEL_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_USLEEP
-#define USL_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define USL_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_LAST_BYTE_SENT
-#define LBS_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define LBS_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_RESTART_SELECT
-#define RSS_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define RSS_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_EXTENDED
-#define EXT_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define EXT_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_ABORT
-#define ABRT_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define ABRT_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_TAGS
-#define TAG_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define TAG_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_MERGING
-#define MER_PRINTK(format, args...) \
- printk(KERN_DEBUG format , ## args)
-#else
-#define MER_PRINTK(format, args...)
-#endif
-
-/* conditional macros for NCR5380_print_{,phase,status} */
-
-#define NCR_PRINT(mask) \
- ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0)
-
-#define NCR_PRINT_PHASE(mask) \
- ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0)
-
-#define NCR_PRINT_STATUS(mask) \
- ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0)
-
-
-
-#endif /* ndef HOSTS_C */
-#endif /* SUN3_NCR5380_H */
+#endif /* SUN3_SCSI_H */
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c
index a3dd55d1d2fd..1eeece6e2040 100644
--- a/drivers/scsi/sun3_scsi_vme.c
+++ b/drivers/scsi/sun3_scsi_vme.c
@@ -1,589 +1,3 @@
- /*
- * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl)
- *
- * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net)
- *
- * VME support added by Sam Creasey
- *
- * Adapted from sun3_scsi.c -- see there for other headers
- *
- * TODO: modify this driver to support multiple Sun3 SCSI VME boards
- *
- */
-
-#define AUTOSENSE
-
-#include <linux/types.h>
-#include <linux/stddef.h>
-#include <linux/ctype.h>
-#include <linux/delay.h>
-
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-
-#include <asm/io.h>
-
-#include <asm/sun3ints.h>
-#include <asm/dvma.h>
-#include <asm/idprom.h>
-#include <asm/machines.h>
-
#define SUN3_SCSI_VME
-#undef SUN3_SCSI_DEBUG
-
-/* dma on! */
-#define REAL_DMA
-
-#define NDEBUG 0
-
-#define NDEBUG_ABORT 0x00100000
-#define NDEBUG_TAGS 0x00200000
-#define NDEBUG_MERGING 0x00400000
-
-#include "scsi.h"
-#include "initio.h"
-#include <scsi/scsi_host.h>
-#include "sun3_scsi.h"
-
-extern int sun3_map_test(unsigned long, char *);
-
-#define USE_WRAPPER
-/*#define RESET_BOOT */
-#define DRIVER_SETUP
-
-/*
- * BUG can be used to trigger a strange code-size related hang on 2.1 kernels
- */
-#ifdef BUG
-#undef RESET_BOOT
-#undef DRIVER_SETUP
-#endif
-
-/* #define SUPPORT_TAGS */
-
-//#define ENABLE_IRQ() enable_irq( SUN3_VEC_VMESCSI0 );
-#define ENABLE_IRQ()
-
-
-static irqreturn_t scsi_sun3_intr(int irq, void *dummy);
-static inline unsigned char sun3scsi_read(int reg);
-static inline void sun3scsi_write(int reg, int value);
-
-static int setup_can_queue = -1;
-module_param(setup_can_queue, int, 0);
-static int setup_cmd_per_lun = -1;
-module_param(setup_cmd_per_lun, int, 0);
-static int setup_sg_tablesize = -1;
-module_param(setup_sg_tablesize, int, 0);
-#ifdef SUPPORT_TAGS
-static int setup_use_tagged_queuing = -1;
-module_param(setup_use_tagged_queuing, int, 0);
-#endif
-static int setup_hostid = -1;
-module_param(setup_hostid, int, 0);
-
-static struct scsi_cmnd *sun3_dma_setup_done = NULL;
-
-#define AFTER_RESET_DELAY (HZ/2)
-
-/* ms to wait after hitting dma regs */
-#define SUN3_DMA_DELAY 10
-
-/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */
-#define SUN3_DVMA_BUFSIZE 0xe000
-
-/* minimum number of bytes to do dma on */
-#define SUN3_DMA_MINSIZE 128
-
-static volatile unsigned char *sun3_scsi_regp;
-static volatile struct sun3_dma_regs *dregs;
-#ifdef OLDDMA
-static unsigned char *dmabuf = NULL; /* dma memory buffer */
-#endif
-static unsigned char *sun3_dma_orig_addr = NULL;
-static unsigned long sun3_dma_orig_count = 0;
-static int sun3_dma_active = 0;
-static unsigned long last_residual = 0;
-
-/*
- * NCR 5380 register access functions
- */
-
-static inline unsigned char sun3scsi_read(int reg)
-{
- return( sun3_scsi_regp[reg] );
-}
-
-static inline void sun3scsi_write(int reg, int value)
-{
- sun3_scsi_regp[reg] = value;
-}
-
-/*
- * XXX: status debug
- */
-static struct Scsi_Host *default_instance;
-
-/*
- * Function : int sun3scsi_detect(struct scsi_host_template * tpnt)
- *
- * Purpose : initializes mac NCR5380 driver based on the
- * command line / compile time port and irq definitions.
- *
- * Inputs : tpnt - template for this SCSI adapter.
- *
- * Returns : 1 if a host adapter was found, 0 if not.
- *
- */
-
-static int __init sun3scsi_detect(struct scsi_host_template * tpnt)
-{
- unsigned long ioaddr, irq = 0;
- static int called = 0;
- struct Scsi_Host *instance;
- int i;
- unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI,
- IOBASE_SUN3_VMESCSI + 0x4000,
- 0 };
- unsigned long vecs[3] = { SUN3_VEC_VMESCSI0,
- SUN3_VEC_VMESCSI1,
- 0 };
- /* check that this machine has an onboard 5380 */
- switch(idprom->id_machtype) {
- case SM_SUN3|SM_3_160:
- case SM_SUN3|SM_3_260:
- break;
-
- default:
- return 0;
- }
-
- if(called)
- return 0;
-
- tpnt->proc_name = "Sun3 5380 VME SCSI";
-
- /* setup variables */
- tpnt->can_queue =
- (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE;
- tpnt->cmd_per_lun =
- (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN;
- tpnt->sg_tablesize =
- (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE;
-
- if (setup_hostid >= 0)
- tpnt->this_id = setup_hostid;
- else {
- /* use 7 as default */
- tpnt->this_id = 7;
- }
-
- ioaddr = 0;
- for(i = 0; addrs[i] != 0; i++) {
- unsigned char x;
-
- ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE,
- SUN3_PAGE_TYPE_VME16);
- irq = vecs[i];
- sun3_scsi_regp = (unsigned char *)ioaddr;
-
- dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
-
- if(sun3_map_test((unsigned long)dregs, &x)) {
- unsigned short oldcsr;
-
- oldcsr = dregs->csr;
- dregs->csr = 0;
- udelay(SUN3_DMA_DELAY);
- if(dregs->csr == 0x1400)
- break;
-
- dregs->csr = oldcsr;
- }
-
- iounmap((void *)ioaddr);
- ioaddr = 0;
- }
-
- if(!ioaddr)
- return 0;
-
-#ifdef SUPPORT_TAGS
- if (setup_use_tagged_queuing < 0)
- setup_use_tagged_queuing = USE_TAGGED_QUEUING;
-#endif
-
- instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
- if(instance == NULL)
- return 0;
-
- default_instance = instance;
-
- instance->io_port = (unsigned long) ioaddr;
- instance->irq = irq;
-
- NCR5380_init(instance, 0);
-
- instance->n_io_port = 32;
-
- ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
-
- if (request_irq(instance->irq, scsi_sun3_intr,
- 0, "Sun3SCSI-5380VME", instance)) {
-#ifndef REAL_DMA
- printk("scsi%d: IRQ%d not free, interrupts disabled\n",
- instance->host_no, instance->irq);
- instance->irq = SCSI_IRQ_NONE;
-#else
- printk("scsi%d: IRQ%d not free, bailing out\n",
- instance->host_no, instance->irq);
- return 0;
-#endif
- }
-
- printk("scsi%d: Sun3 5380 VME at port %lX irq", instance->host_no, instance->io_port);
- if (instance->irq == SCSI_IRQ_NONE)
- printk ("s disabled");
- else
- printk (" %d", instance->irq);
- printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
- instance->can_queue, instance->cmd_per_lun,
- SUN3SCSI_PUBLIC_RELEASE);
- printk("\nscsi%d:", instance->host_no);
- NCR5380_print_options(instance);
- printk("\n");
-
- dregs->csr = 0;
- udelay(SUN3_DMA_DELAY);
- dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
- udelay(SUN3_DMA_DELAY);
- dregs->fifo_count = 0;
- dregs->fifo_count_hi = 0;
- dregs->dma_addr_hi = 0;
- dregs->dma_addr_lo = 0;
- dregs->dma_count_hi = 0;
- dregs->dma_count_lo = 0;
-
- dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
-
- called = 1;
-
-#ifdef RESET_BOOT
- sun3_scsi_reset_boot(instance);
-#endif
-
- return 1;
-}
-
-int sun3scsi_release (struct Scsi_Host *shpnt)
-{
- if (shpnt->irq != SCSI_IRQ_NONE)
- free_irq(shpnt->irq, shpnt);
-
- iounmap((void *)sun3_scsi_regp);
-
- NCR5380_exit(shpnt);
- return 0;
-}
-
-#ifdef RESET_BOOT
-/*
- * Our 'bus reset on boot' function
- */
-
-static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
-{
- unsigned long end;
-
- NCR5380_local_declare();
- NCR5380_setup(instance);
-
- /*
- * Do a SCSI reset to clean up the bus during initialization. No
- * messing with the queues, interrupts, or locks necessary here.
- */
-
- printk( "Sun3 SCSI: resetting the SCSI bus..." );
-
- /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
-// sun3_disable_irq( IRQ_SUN3_SCSI );
-
- /* get in phase */
- NCR5380_write( TARGET_COMMAND_REG,
- PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
-
- /* assert RST */
- NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST );
-
- /* The min. reset hold time is 25us, so 40us should be enough */
- udelay( 50 );
-
- /* reset RST and interrupt */
- NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
- NCR5380_read( RESET_PARITY_INTERRUPT_REG );
-
- for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); )
- barrier();
-
- /* switch on SCSI IRQ again */
-// sun3_enable_irq( IRQ_SUN3_SCSI );
-
- printk( " done\n" );
-}
-#endif
-
-static const char * sun3scsi_info (struct Scsi_Host *spnt) {
- return "";
-}
-
-// safe bits for the CSR
-#define CSR_GOOD 0x060f
-
-static irqreturn_t scsi_sun3_intr(int irq, void *dummy)
-{
- unsigned short csr = dregs->csr;
- int handled = 0;
-
- dregs->csr &= ~CSR_DMA_ENABLE;
-
-
-#ifdef SUN3_SCSI_DEBUG
- printk("scsi_intr csr %x\n", csr);
-#endif
-
- if(csr & ~CSR_GOOD) {
- if(csr & CSR_DMA_BUSERR) {
- printk("scsi%d: bus error in dma\n", default_instance->host_no);
-#ifdef SUN3_SCSI_DEBUG
- printk("scsi: residual %x count %x addr %p dmaaddr %x\n",
- dregs->fifo_count,
- dregs->dma_count_lo | (dregs->dma_count_hi << 16),
- sun3_dma_orig_addr,
- dregs->dma_addr_lo | (dregs->dma_addr_hi << 16));
-#endif
- }
-
- if(csr & CSR_DMA_CONFLICT) {
- printk("scsi%d: dma conflict\n", default_instance->host_no);
- }
- handled = 1;
- }
-
- if(csr & (CSR_SDB_INT | CSR_DMA_INT)) {
- NCR5380_intr(irq, dummy);
- handled = 1;
- }
-
- return IRQ_RETVAL(handled);
-}
-
-/*
- * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk;
- * reentering NCR5380_print_status seems to have ugly side effects
- */
-
-/* this doesn't seem to get used at all -- sam */
-#if 0
-void sun3_sun3_debug (void)
-{
- unsigned long flags;
- NCR5380_local_declare();
-
- if (default_instance) {
- local_irq_save(flags);
- NCR5380_print_status(default_instance);
- local_irq_restore(flags);
- }
-}
-#endif
-
-
-/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
-static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag)
-{
- void *addr;
-
- if(sun3_dma_orig_addr != NULL)
- dvma_unmap(sun3_dma_orig_addr);
-
-// addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf);
- addr = (void *)dvma_map_vme((unsigned long) data, count);
-
- sun3_dma_orig_addr = addr;
- sun3_dma_orig_count = count;
-
-#ifdef SUN3_SCSI_DEBUG
- printk("scsi: dma_setup addr %p count %x\n", addr, count);
-#endif
-
-// dregs->fifo_count = 0;
-#if 0
- /* reset fifo */
- dregs->csr &= ~CSR_FIFO;
- dregs->csr |= CSR_FIFO;
-#endif
- /* set direction */
- if(write_flag)
- dregs->csr |= CSR_SEND;
- else
- dregs->csr &= ~CSR_SEND;
-
- /* reset fifo */
-// dregs->csr &= ~CSR_FIFO;
-// dregs->csr |= CSR_FIFO;
-
- dregs->csr |= CSR_PACK_ENABLE;
-
- dregs->dma_addr_hi = ((unsigned long)addr >> 16);
- dregs->dma_addr_lo = ((unsigned long)addr & 0xffff);
-
- dregs->dma_count_hi = 0;
- dregs->dma_count_lo = 0;
- dregs->fifo_count_hi = 0;
- dregs->fifo_count = 0;
-
-#ifdef SUN3_SCSI_DEBUG
- printk("scsi: dma_setup done csr %x\n", dregs->csr);
-#endif
- return count;
-
-}
-
-static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
-{
- return last_residual;
-}
-
-static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
- struct scsi_cmnd *cmd,
- int write_flag)
-{
- if (cmd->request->cmd_type == REQ_TYPE_FS)
- return wanted;
- else
- return 0;
-}
-
-static int sun3scsi_dma_start(unsigned long count, char *data)
-{
-
- unsigned short csr;
-
- csr = dregs->csr;
-#ifdef SUN3_SCSI_DEBUG
- printk("scsi: dma_start data %p count %x csr %x fifo %x\n", data, count, csr, dregs->fifo_count);
-#endif
-
- dregs->dma_count_hi = (sun3_dma_orig_count >> 16);
- dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff);
-
- dregs->fifo_count_hi = (sun3_dma_orig_count >> 16);
- dregs->fifo_count = (sun3_dma_orig_count & 0xffff);
-
-// if(!(csr & CSR_DMA_ENABLE))
-// dregs->csr |= CSR_DMA_ENABLE;
-
- return 0;
-}
-
-/* clean up after our dma is done */
-static int sun3scsi_dma_finish(int write_flag)
-{
- unsigned short fifo;
- int ret = 0;
-
- sun3_dma_active = 0;
-
- dregs->csr &= ~CSR_DMA_ENABLE;
-
- fifo = dregs->fifo_count;
- if(write_flag) {
- if((fifo > 0) && (fifo < sun3_dma_orig_count))
- fifo++;
- }
-
- last_residual = fifo;
-#ifdef SUN3_SCSI_DEBUG
- printk("scsi: residual %x total %x\n", fifo, sun3_dma_orig_count);
-#endif
- /* empty bytes from the fifo which didn't make it */
- if((!write_flag) && (dregs->csr & CSR_LEFT)) {
- unsigned char *vaddr;
-
-#ifdef SUN3_SCSI_DEBUG
- printk("scsi: got left over bytes\n");
-#endif
-
- vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr);
-
- vaddr += (sun3_dma_orig_count - fifo);
- vaddr--;
-
- switch(dregs->csr & CSR_LEFT) {
- case CSR_LEFT_3:
- *vaddr = (dregs->bpack_lo & 0xff00) >> 8;
- vaddr--;
-
- case CSR_LEFT_2:
- *vaddr = (dregs->bpack_hi & 0x00ff);
- vaddr--;
-
- case CSR_LEFT_1:
- *vaddr = (dregs->bpack_hi & 0xff00) >> 8;
- break;
- }
-
-
- }
-
- dvma_unmap(sun3_dma_orig_addr);
- sun3_dma_orig_addr = NULL;
-
- dregs->dma_addr_hi = 0;
- dregs->dma_addr_lo = 0;
- dregs->dma_count_hi = 0;
- dregs->dma_count_lo = 0;
-
- dregs->fifo_count = 0;
- dregs->fifo_count_hi = 0;
-
- dregs->csr &= ~CSR_SEND;
-
-// dregs->csr |= CSR_DMA_ENABLE;
-
-#if 0
- /* reset fifo */
- dregs->csr &= ~CSR_FIFO;
- dregs->csr |= CSR_FIFO;
-#endif
- sun3_dma_setup_done = NULL;
-
- return ret;
-
-}
-
-#include "sun3_NCR5380.c"
-
-static struct scsi_host_template driver_template = {
- .name = SUN3_SCSI_NAME,
- .detect = sun3scsi_detect,
- .release = sun3scsi_release,
- .info = sun3scsi_info,
- .queuecommand = sun3scsi_queue_command,
- .eh_abort_handler = sun3scsi_abort,
- .eh_bus_reset_handler = sun3scsi_bus_reset,
- .can_queue = CAN_QUEUE,
- .this_id = 7,
- .sg_tablesize = SG_TABLESIZE,
- .cmd_per_lun = CMD_PER_LUN,
- .use_clustering = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
+#include "sun3_scsi.c"
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index a4abce9d526e..8cc80931df14 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -102,10 +102,6 @@
* 15 9-11
*/
-/*
- * $Log: t128.c,v $
- */
-
#include <linux/signal.h>
#include <linux/io.h>
#include <linux/blkdev.h>
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index 1df82c28e56d..fd68cecc62af 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -34,10 +34,6 @@
* 1+ (800) 334-5454
*/
-/*
- * $Log: t128.h,v $
- */
-
#ifndef T128_H
#define T128_H
@@ -107,8 +103,6 @@ static int t128_bus_reset(struct scsi_cmnd *);
#define CAN_QUEUE 32
#endif
-#ifndef HOSTS_C
-
#define NCR5380_implementation_fields \
void __iomem *base
@@ -148,6 +142,5 @@ static int t128_bus_reset(struct scsi_cmnd *);
#define T128_IRQS 0xc4a8
-#endif /* else def HOSTS_C */
#endif /* ndef ASM */
#endif /* T128_H */
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 721050090520..f42d1cee652a 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -196,9 +196,9 @@ enum {
* @dword_2: UPIU header DW-2
*/
struct utp_upiu_header {
- u32 dword_0;
- u32 dword_1;
- u32 dword_2;
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
};
/**
@@ -207,7 +207,7 @@ struct utp_upiu_header {
* @cdb: Command Descriptor Block CDB DW-4 to DW-7
*/
struct utp_upiu_cmd {
- u32 exp_data_transfer_len;
+ __be32 exp_data_transfer_len;
u8 cdb[MAX_CDB_SIZE];
};
@@ -228,10 +228,10 @@ struct utp_upiu_query {
u8 idn;
u8 index;
u8 selector;
- u16 reserved_osf;
- u16 length;
- u32 value;
- u32 reserved[2];
+ __be16 reserved_osf;
+ __be16 length;
+ __be32 value;
+ __be32 reserved[2];
};
/**
@@ -256,9 +256,9 @@ struct utp_upiu_req {
* @sense_data: Sense data field DW-8 to DW-12
*/
struct utp_cmd_rsp {
- u32 residual_transfer_count;
- u32 reserved[4];
- u16 sense_data_len;
+ __be32 residual_transfer_count;
+ __be32 reserved[4];
+ __be16 sense_data_len;
u8 sense_data[18];
};
@@ -286,10 +286,10 @@ struct utp_upiu_rsp {
*/
struct utp_upiu_task_req {
struct utp_upiu_header header;
- u32 input_param1;
- u32 input_param2;
- u32 input_param3;
- u32 reserved[2];
+ __be32 input_param1;
+ __be32 input_param2;
+ __be32 input_param3;
+ __be32 reserved[2];
};
/**
@@ -301,9 +301,9 @@ struct utp_upiu_task_req {
*/
struct utp_upiu_task_rsp {
struct utp_upiu_header header;
- u32 output_param1;
- u32 output_param2;
- u32 reserved[3];
+ __be32 output_param1;
+ __be32 output_param2;
+ __be32 reserved[3];
};
/**
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 04884d663e4e..0c2877251251 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -55,6 +55,9 @@
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 30 /* msec */
+/* Task management command timeout */
+#define TM_CMD_TIMEOUT 100 /* msecs */
+
/* Expose the flag value from utp_upiu_query.value */
#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
@@ -71,9 +74,22 @@ enum {
/* UFSHCD states */
enum {
- UFSHCD_STATE_OPERATIONAL,
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
+ UFSHCD_STATE_OPERATIONAL,
+};
+
+/* UFSHCD error handling flags */
+enum {
+ UFSHCD_EH_IN_PROGRESS = (1 << 0),
+};
+
+/* UFSHCD UIC layer error flags */
+enum {
+ UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
+ UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
+ UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
+ UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
};
/* Interrupt configuration options */
@@ -83,6 +99,18 @@ enum {
UFSHCD_INT_CLEAR,
};
+#define ufshcd_set_eh_in_progress(h) \
+ (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_eh_in_progress(h) \
+ (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_clear_eh_in_progress(h) \
+ (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
+
+static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static void ufshcd_async_scan(void *data, async_cookie_t cookie);
+static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
+
/*
* ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface
@@ -163,7 +191,7 @@ static inline int ufshcd_is_device_present(u32 reg_hcs)
*/
static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
{
- return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
+ return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
}
/**
@@ -176,19 +204,41 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
static inline int
ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
{
- return task_req_descp->header.dword_2 & MASK_OCS;
+ return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
}
/**
* ufshcd_get_tm_free_slot - get a free slot for task management request
* @hba: per adapter instance
+ * @free_slot: pointer to variable with available slot value
*
- * Returns maximum number of task management request slots in case of
- * task management queue full or returns the free slot number
+ * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
+ * Returns 0 if free slot is not available, else return 1 with tag value
+ * in @free_slot.
*/
-static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
+static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
{
- return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
+ int tag;
+ bool ret = false;
+
+ if (!free_slot)
+ goto out;
+
+ do {
+ tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
+ if (tag >= hba->nutmrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
+
+ *free_slot = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
+{
+ clear_bit_unlock(slot, &hba->tm_slots_in_use);
}
/**
@@ -390,26 +440,6 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_query_to_cpu() - formats the buffer to native cpu endian
- * @response: upiu query response to convert
- */
-static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response)
-{
- response->length = be16_to_cpu(response->length);
- response->value = be32_to_cpu(response->value);
-}
-
-/**
- * ufshcd_query_to_be() - formats the buffer to big endian
- * @request: upiu query request to convert
- */
-static inline void ufshcd_query_to_be(struct utp_upiu_query *request)
-{
- request->length = cpu_to_be16(request->length);
- request->value = cpu_to_be32(request->value);
-}
-
-/**
* ufshcd_copy_query_response() - Copy the Query Response and the data
* descriptor
* @hba: per adapter instance
@@ -425,7 +455,6 @@ void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
UPIU_RSP_CODE_OFFSET;
memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
- ufshcd_query_to_cpu(&query_res->upiu_res);
/* Get the descriptor */
@@ -749,7 +778,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
{
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
struct ufs_query *query = &hba->dev_cmd.query;
- u16 len = query->request.upiu_req.length;
+ u16 len = be16_to_cpu(query->request.upiu_req.length);
u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
/* Query request header */
@@ -766,7 +795,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Query Request buffer as is */
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
QUERY_OSF_SIZE);
- ufshcd_query_to_be(&ucd_req_ptr->qr);
/* Copy the Descriptor */
if ((len > 0) && (query->request.upiu_req.opcode ==
@@ -853,10 +881,25 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
tag = cmd->request->tag;
- if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ break;
+ case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
+ goto out_unlock;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
}
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
/* acquire the tag to make sure device cmds don't use it */
if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
@@ -893,6 +936,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_send_command(hba, tag);
+out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
return err;
@@ -1151,7 +1195,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
}
if (flag_res)
- *flag_res = (response->upiu_res.value &
+ *flag_res = (be32_to_cpu(response->upiu_res.value) &
MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
out_unlock:
@@ -1170,7 +1214,7 @@ out_unlock:
*
* Returns 0 for success, non-zero in case of failure
*/
-int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
struct ufs_query_req *request;
@@ -1195,7 +1239,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
switch (opcode) {
case UPIU_QUERY_OPCODE_WRITE_ATTR:
request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
- request->upiu_req.value = *attr_val;
+ request->upiu_req.value = cpu_to_be32(*attr_val);
break;
case UPIU_QUERY_OPCODE_READ_ATTR:
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
@@ -1222,7 +1266,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- *attr_val = response->upiu_res.value;
+ *attr_val = be32_to_cpu(response->upiu_res.value);
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
@@ -1481,7 +1525,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
*
* Returns 0 on success, non-zero value on failure
*/
-int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
struct uic_command uic_cmd = {0};
struct completion pwr_done;
@@ -1701,11 +1745,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
goto out;
}
- if (hba->ufshcd_state == UFSHCD_STATE_RESET)
- scsi_unblock_requests(hba->host);
-
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-
out:
return err;
}
@@ -1831,66 +1870,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
}
/**
- * ufshcd_do_reset - reset the host controller
- * @hba: per adapter instance
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_do_reset(struct ufs_hba *hba)
-{
- struct ufshcd_lrb *lrbp;
- unsigned long flags;
- int tag;
-
- /* block commands from midlayer */
- scsi_block_requests(hba->host);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->ufshcd_state = UFSHCD_STATE_RESET;
-
- /* send controller to reset state */
- ufshcd_hba_stop(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* abort outstanding commands */
- for (tag = 0; tag < hba->nutrs; tag++) {
- if (test_bit(tag, &hba->outstanding_reqs)) {
- lrbp = &hba->lrb[tag];
- if (lrbp->cmd) {
- scsi_dma_unmap(lrbp->cmd);
- lrbp->cmd->result = DID_RESET << 16;
- lrbp->cmd->scsi_done(lrbp->cmd);
- lrbp->cmd = NULL;
- clear_bit_unlock(tag, &hba->lrb_in_use);
- }
- }
- }
-
- /* complete device management command */
- if (hba->dev_cmd.complete)
- complete(hba->dev_cmd.complete);
-
- /* clear outstanding request/task bit maps */
- hba->outstanding_reqs = 0;
- hba->outstanding_tasks = 0;
-
- /* Host controller enable */
- if (ufshcd_hba_enable(hba)) {
- dev_err(hba->dev,
- "Reset: Controller initialization failed\n");
- return FAILED;
- }
-
- if (ufshcd_link_startup(hba)) {
- dev_err(hba->dev,
- "Reset: Link start-up failed\n");
- return FAILED;
- }
-
- return SUCCESS;
-}
-
-/**
* ufshcd_slave_alloc - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
@@ -1907,6 +1886,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
sdev->use_10_for_ms = 1;
scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
+ /* allow SCSI layer to restart the device in case of errors */
+ sdev->allow_restart = 1;
+
/*
* Inform SCSI Midlayer that the LUN queue depth is same as the
* controller queue depth. If a LUN queue depth is less than the
@@ -1934,10 +1916,11 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
* ufshcd_task_req_compl - handle task management request completion
* @hba: per adapter instance
* @index: index of the completed request
+ * @resp: task management service response
*
- * Returns SUCCESS/FAILED
+ * Returns non-zero value on error, zero on success
*/
-static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
{
struct utp_task_req_desc *task_req_descp;
struct utp_upiu_task_rsp *task_rsp_upiup;
@@ -1958,19 +1941,15 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
task_req_descp[index].task_rsp_upiu;
task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
-
- if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
- task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
- task_result = FAILED;
- else
- task_result = SUCCESS;
+ if (resp)
+ *resp = (u8)task_result;
} else {
- task_result = FAILED;
- dev_err(hba->dev,
- "trc: Invalid ocs = %x\n", ocs_value);
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+ __func__, ocs_value);
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- return task_result;
+
+ return ocs_value;
}
/**
@@ -2105,6 +2084,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case OCS_ABORTED:
result |= DID_ABORT << 16;
break;
+ case OCS_INVALID_COMMAND_STATUS:
+ result |= DID_REQUEUE << 16;
+ break;
case OCS_INVALID_CMD_TABLE_ATTR:
case OCS_INVALID_PRDT_ATTR:
case OCS_MISMATCH_DATA_BUF_SIZE:
@@ -2422,41 +2404,145 @@ out:
}
/**
- * ufshcd_fatal_err_handler - handle fatal errors
- * @hba: per adapter instance
+ * ufshcd_err_handler - handle UFS errors that require s/w attention
+ * @work: pointer to work structure
*/
-static void ufshcd_fatal_err_handler(struct work_struct *work)
+static void ufshcd_err_handler(struct work_struct *work)
{
struct ufs_hba *hba;
- hba = container_of(work, struct ufs_hba, feh_workq);
+ unsigned long flags;
+ u32 err_xfer = 0;
+ u32 err_tm = 0;
+ int err = 0;
+ int tag;
+
+ hba = container_of(work, struct ufs_hba, eh_work);
pm_runtime_get_sync(hba->dev);
- /* check if reset is already in progress */
- if (hba->ufshcd_state != UFSHCD_STATE_RESET)
- ufshcd_do_reset(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto out;
+ }
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+
+ /* Complete requests that have door-bell cleared by h/w */
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* Clear pending transfer requests */
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
+ if (ufshcd_clear_cmd(hba, tag))
+ err_xfer |= 1 << tag;
+
+ /* Clear pending task management requests */
+ for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
+ if (ufshcd_clear_tm_cmd(hba, tag))
+ err_tm |= 1 << tag;
+
+ /* Complete the requests that are cleared by s/w */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* Fatal errors need reset */
+ if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
+ err = ufshcd_reset_and_restore(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: reset and restore failed\n",
+ __func__);
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ /*
+ * Inform scsi mid-layer that we did reset and allow to handle
+ * Unit Attention properly.
+ */
+ scsi_report_bus_reset(hba->host, 0);
+ hba->saved_err = 0;
+ hba->saved_uic_err = 0;
+ }
+ ufshcd_clear_eh_in_progress(hba);
+
+out:
+ scsi_unblock_requests(hba->host);
pm_runtime_put_sync(hba->dev);
}
/**
- * ufshcd_err_handler - Check for fatal errors
- * @work: pointer to a work queue structure
+ * ufshcd_update_uic_error - check and set fatal UIC error flags.
+ * @hba: per-adapter instance
*/
-static void ufshcd_err_handler(struct ufs_hba *hba)
+static void ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ /* PA_INIT_ERROR is fatal and needs UIC reset */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+
+ /* UIC NL/TL/DME errors needs software retry */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
+ if (reg)
+ hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
+ if (reg)
+ hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
+ if (reg)
+ hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+
+ dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
+ __func__, hba->uic_error);
+}
+
+/**
+ * ufshcd_check_errors - Check for errors that need s/w attention
+ * @hba: per-adapter instance
+ */
+static void ufshcd_check_errors(struct ufs_hba *hba)
+{
+ bool queue_eh_work = false;
+
if (hba->errors & INT_FATAL_ERRORS)
- goto fatal_eh;
+ queue_eh_work = true;
if (hba->errors & UIC_ERROR) {
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
- goto fatal_eh;
+ hba->uic_error = 0;
+ ufshcd_update_uic_error(hba);
+ if (hba->uic_error)
+ queue_eh_work = true;
}
- return;
-fatal_eh:
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- schedule_work(&hba->feh_workq);
+
+ if (queue_eh_work) {
+ /* handle fatal errors only when link is functional */
+ if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
+ /* block commands from scsi mid-layer */
+ scsi_block_requests(hba->host);
+
+ /* transfer error masks to sticky bits */
+ hba->saved_err |= hba->errors;
+ hba->saved_uic_err |= hba->uic_error;
+
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ schedule_work(&hba->eh_work);
+ }
+ }
+ /*
+ * if (!queue_eh_work) -
+ * Other errors are either non-fatal where host recovers
+ * itself without s/w intervention or errors that will be
+ * handled by the SCSI core layer.
+ */
}
/**
@@ -2469,7 +2555,7 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba)
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
+ wake_up(&hba->tm_wq);
}
/**
@@ -2481,7 +2567,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (hba->errors)
- ufshcd_err_handler(hba);
+ ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK)
ufshcd_uic_cmd_compl(hba, intr_status);
@@ -2519,38 +2605,58 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
return retval;
}
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ u32 mask = 1 << tag;
+ unsigned long flags;
+
+ if (!test_bit(tag, &hba->outstanding_tasks))
+ goto out;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* poll for max. 1 sec to clear door bell register by h/w */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TASK_REQ_DOOR_BELL,
+ mask, 0, 1000, 1000);
+out:
+ return err;
+}
+
/**
* ufshcd_issue_tm_cmd - issues task management commands to controller
* @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
*
- * Returns SUCCESS/FAILED
+ * Returns non-zero value on error, zero on success.
*/
-static int
-ufshcd_issue_tm_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp,
- u8 tm_function)
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+ u8 tm_function, u8 *tm_response)
{
struct utp_task_req_desc *task_req_descp;
struct utp_upiu_task_req *task_req_upiup;
struct Scsi_Host *host;
unsigned long flags;
- int free_slot = 0;
+ int free_slot;
int err;
+ int task_tag;
host = hba->host;
- spin_lock_irqsave(host->host_lock, flags);
-
- /* If task management queue is full */
- free_slot = ufshcd_get_tm_free_slot(hba);
- if (free_slot >= hba->nutmrs) {
- spin_unlock_irqrestore(host->host_lock, flags);
- dev_err(hba->dev, "Task management queue full\n");
- err = FAILED;
- goto out;
- }
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+ */
+ wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ spin_lock_irqsave(host->host_lock, flags);
task_req_descp = hba->utmrdl_base_addr;
task_req_descp += free_slot;
@@ -2562,18 +2668,15 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
/* Configure task request UPIU */
task_req_upiup =
(struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+ task_tag = hba->nutrs + free_slot;
task_req_upiup->header.dword_0 =
UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
- lrbp->lun, lrbp->task_tag);
+ lun_id, task_tag);
task_req_upiup->header.dword_1 =
UPIU_HEADER_DWORD(0, tm_function, 0, 0);
- task_req_upiup->input_param1 = lrbp->lun;
- task_req_upiup->input_param1 =
- cpu_to_be32(task_req_upiup->input_param1);
- task_req_upiup->input_param2 = lrbp->task_tag;
- task_req_upiup->input_param2 =
- cpu_to_be32(task_req_upiup->input_param2);
+ task_req_upiup->input_param1 = cpu_to_be32(lun_id);
+ task_req_upiup->input_param2 = cpu_to_be32(task_id);
/* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks);
@@ -2582,91 +2685,88 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_unlock_irqrestore(host->host_lock, flags);
/* wait until the task management command is completed */
- err =
- wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
- (test_bit(free_slot,
- &hba->tm_condition) != 0),
- 60 * HZ);
+ err = wait_event_timeout(hba->tm_wq,
+ test_bit(free_slot, &hba->tm_condition),
+ msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) {
- dev_err(hba->dev,
- "Task management command timed-out\n");
- err = FAILED;
- goto out;
+ dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
+ __func__, tm_function);
+ if (ufshcd_clear_tm_cmd(hba, free_slot))
+ dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
+ __func__, free_slot);
+ err = -ETIMEDOUT;
+ } else {
+ err = ufshcd_task_req_compl(hba, free_slot, tm_response);
}
+
clear_bit(free_slot, &hba->tm_condition);
- err = ufshcd_task_req_compl(hba, free_slot);
-out:
+ ufshcd_put_tm_slot(hba, free_slot);
+ wake_up(&hba->tm_tag_wq);
+
return err;
}
/**
- * ufshcd_device_reset - reset device and abort all the pending commands
+ * ufshcd_eh_device_reset_handler - device reset handler registered to
+ * scsi layer.
* @cmd: SCSI command pointer
*
* Returns SUCCESS/FAILED
*/
-static int ufshcd_device_reset(struct scsi_cmnd *cmd)
+static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
unsigned int tag;
u32 pos;
int err;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
+ unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
tag = cmd->request->tag;
- err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
- if (err == FAILED)
+ lrbp = &hba->lrb[tag];
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err)
+ err = resp;
goto out;
+ }
- for (pos = 0; pos < hba->nutrs; pos++) {
- if (test_bit(pos, &hba->outstanding_reqs) &&
- (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
-
- /* clear the respective UTRLCLR register bit */
- ufshcd_utrl_clear(hba, pos);
-
- clear_bit(pos, &hba->outstanding_reqs);
-
- if (hba->lrb[pos].cmd) {
- scsi_dma_unmap(hba->lrb[pos].cmd);
- hba->lrb[pos].cmd->result =
- DID_ABORT << 16;
- hba->lrb[pos].cmd->scsi_done(cmd);
- hba->lrb[pos].cmd = NULL;
- clear_bit_unlock(pos, &hba->lrb_in_use);
- wake_up(&hba->dev_cmd.tag_wq);
- }
+ /* clear the commands that were pending for corresponding LUN */
+ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[pos].lun == lrbp->lun) {
+ err = ufshcd_clear_cmd(hba, pos);
+ if (err)
+ break;
}
- } /* end of for */
+ }
+ spin_lock_irqsave(host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ spin_unlock_irqrestore(host->host_lock, flags);
out:
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ err = FAILED;
+ }
return err;
}
/**
- * ufshcd_host_reset - Main reset function registered with scsi layer
- * @cmd: SCSI command pointer
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_host_reset(struct scsi_cmnd *cmd)
-{
- struct ufs_hba *hba;
-
- hba = shost_priv(cmd->device->host);
-
- if (hba->ufshcd_state == UFSHCD_STATE_RESET)
- return SUCCESS;
-
- return ufshcd_do_reset(hba);
-}
-
-/**
* ufshcd_abort - abort a specific command
* @cmd: SCSI command pointer
*
+ * Abort the pending command in device by sending UFS_ABORT_TASK task management
+ * command, and in host controller by clearing the door-bell register. There can
+ * be race between controller sending the command to the device while abort is
+ * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
+ * really issued and then try to abort it.
+ *
* Returns SUCCESS/FAILED
*/
static int ufshcd_abort(struct scsi_cmnd *cmd)
@@ -2675,33 +2775,68 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
struct ufs_hba *hba;
unsigned long flags;
unsigned int tag;
- int err;
+ int err = 0;
+ int poll_cnt;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
host = cmd->device->host;
hba = shost_priv(host);
tag = cmd->request->tag;
- spin_lock_irqsave(host->host_lock, flags);
+ /* If command is already aborted/completed, return SUCCESS */
+ if (!(test_bit(tag, &hba->outstanding_reqs)))
+ goto out;
- /* check if command is still pending */
- if (!(test_bit(tag, &hba->outstanding_reqs))) {
- err = FAILED;
- spin_unlock_irqrestore(host->host_lock, flags);
+ lrbp = &hba->lrb[tag];
+ for (poll_cnt = 100; poll_cnt; poll_cnt--) {
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_QUERY_TASK, &resp);
+ if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
+ /* cmd pending in the device */
+ break;
+ } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ u32 reg;
+
+ /*
+ * cmd not pending in the device, check if it is
+ * in transition.
+ */
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (reg & (1 << tag)) {
+ /* sleep for max. 200us to stabilize */
+ usleep_range(100, 200);
+ continue;
+ }
+ /* command completed already */
+ goto out;
+ } else {
+ if (!err)
+ err = resp; /* service response error */
+ goto out;
+ }
+ }
+
+ if (!poll_cnt) {
+ err = -EBUSY;
goto out;
}
- spin_unlock_irqrestore(host->host_lock, flags);
- err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
- if (err == FAILED)
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_ABORT_TASK, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err)
+ err = resp; /* service response error */
+ goto out;
+ }
+
+ err = ufshcd_clear_cmd(hba, tag);
+ if (err)
goto out;
scsi_dma_unmap(cmd);
spin_lock_irqsave(host->host_lock, flags);
-
- /* clear the respective UTRLCLR register bit */
- ufshcd_utrl_clear(hba, tag);
-
__clear_bit(tag, &hba->outstanding_reqs);
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
@@ -2709,6 +2844,129 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
clear_bit_unlock(tag, &hba->lrb_in_use);
wake_up(&hba->dev_cmd.tag_wq);
out:
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ err = FAILED;
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_host_reset_and_restore - reset and restore host controller
+ * @hba: per-adapter instance
+ *
+ * Note that host controller reset may issue DME_RESET to
+ * local and remote (device) Uni-Pro stack and the attributes
+ * are reset to default state.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
+{
+ int err;
+ async_cookie_t cookie;
+ unsigned long flags;
+
+ /* Reset the host controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_hba_stop(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_hba_enable(hba);
+ if (err)
+ goto out;
+
+ /* Establish the link again and restore the device */
+ cookie = async_schedule(ufshcd_async_scan, hba);
+ /* wait for async scan to be completed */
+ async_synchronize_cookie(++cookie);
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ err = -EIO;
+out:
+ if (err)
+ dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
+
+ return err;
+}
+
+/**
+ * ufshcd_reset_and_restore - reset and re-initialize host/device
+ * @hba: per-adapter instance
+ *
+ * Reset and recover device, host and re-establish link. This
+ * is helpful to recover the communication in fatal error conditions.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_reset_and_restore(struct ufs_hba *hba)
+{
+ int err = 0;
+ unsigned long flags;
+
+ err = ufshcd_host_reset_and_restore(hba);
+
+ /*
+ * After reset the door-bell might be cleared, complete
+ * outstanding requests in s/w here.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return err;
+}
+
+/**
+ * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
+ * @cmd - SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ int err;
+ unsigned long flags;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(cmd->device->host);
+
+ /*
+ * Check if there is any race with fatal error handling.
+ * If so, wait for it to complete. Even though fatal error
+ * handling does reset and restore in some cases, don't assume
+ * anything out of it. We are just avoiding race here.
+ */
+ do {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ } while (1);
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!err) {
+ err = SUCCESS;
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ } else {
+ err = FAILED;
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return err;
}
@@ -2737,8 +2995,13 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
goto out;
ufshcd_force_reset_auto_bkops(hba);
- scsi_scan_host(hba->host);
- pm_runtime_put_sync(hba->dev);
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+
+ /* If we are in error handling context no need to scan the host */
+ if (!ufshcd_eh_in_progress(hba)) {
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+ }
out:
return;
}
@@ -2751,8 +3014,8 @@ static struct scsi_host_template ufshcd_driver_template = {
.slave_alloc = ufshcd_slave_alloc,
.slave_destroy = ufshcd_slave_destroy,
.eh_abort_handler = ufshcd_abort,
- .eh_device_reset_handler = ufshcd_device_reset,
- .eh_host_reset_handler = ufshcd_host_reset,
+ .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
+ .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
@@ -2916,10 +3179,11 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
host->max_cmd_len = MAX_CDB_SIZE;
/* Initailize wait queue for task management */
- init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
+ init_waitqueue_head(&hba->tm_wq);
+ init_waitqueue_head(&hba->tm_tag_wq);
/* Initialize work queues */
- INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
+ INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
/* Initialize UIC command mutex */
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 577679a2d189..acf318e338ed 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -174,15 +174,21 @@ struct ufs_dev_cmd {
* @irq: Irq number of the controller
* @active_uic_cmd: handle of active UIC command
* @uic_cmd_mutex: mutex for uic command
- * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_wq: wait queue for task management
+ * @tm_tag_wq: wait queue for free task management slots
+ * @tm_slots_in_use: bit map of task management request slots in use
* @pwr_done: completion for power mode change
* @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states
+ * @eh_flags: Error handling flags
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
- * @feh_workq: Work queue for fatal controller error handling
+ * @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
+ * @uic_error: UFS interconnect layer error status
+ * @saved_err: sticky error mask
+ * @saved_uic_err: sticky UIC error mask
* @dev_cmd: ufs device management command information
* @auto_bkops_enabled: to track whether bkops is enabled in device
*/
@@ -217,21 +223,27 @@ struct ufs_hba {
struct uic_command *active_uic_cmd;
struct mutex uic_cmd_mutex;
- wait_queue_head_t ufshcd_tm_wait_queue;
+ wait_queue_head_t tm_wq;
+ wait_queue_head_t tm_tag_wq;
unsigned long tm_condition;
+ unsigned long tm_slots_in_use;
struct completion *pwr_done;
u32 ufshcd_state;
+ u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
/* Work Queues */
- struct work_struct feh_workq;
+ struct work_struct eh_work;
struct work_struct eeh_work;
/* HBA Errors */
u32 errors;
+ u32 uic_error;
+ u32 saved_err;
+ u32 saved_uic_err;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
@@ -263,6 +275,8 @@ static inline void check_upiu_size(void)
GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
}
+extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state);
+extern int ufshcd_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 0475c6619a68..9abc7e32b43d 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -304,10 +304,10 @@ enum {
* @size: size of physical segment DW-3
*/
struct ufshcd_sg_entry {
- u32 base_addr;
- u32 upper_addr;
- u32 reserved;
- u32 size;
+ __le32 base_addr;
+ __le32 upper_addr;
+ __le32 reserved;
+ __le32 size;
};
/**
@@ -330,10 +330,10 @@ struct utp_transfer_cmd_desc {
* @dword3: Descriptor Header DW3
*/
struct request_desc_header {
- u32 dword_0;
- u32 dword_1;
- u32 dword_2;
- u32 dword_3;
+ __le32 dword_0;
+ __le32 dword_1;
+ __le32 dword_2;
+ __le32 dword_3;
};
/**
@@ -352,16 +352,16 @@ struct utp_transfer_req_desc {
struct request_desc_header header;
/* DW 4-5*/
- u32 command_desc_base_addr_lo;
- u32 command_desc_base_addr_hi;
+ __le32 command_desc_base_addr_lo;
+ __le32 command_desc_base_addr_hi;
/* DW 6 */
- u16 response_upiu_length;
- u16 response_upiu_offset;
+ __le16 response_upiu_length;
+ __le16 response_upiu_offset;
/* DW 7 */
- u16 prd_table_length;
- u16 prd_table_offset;
+ __le16 prd_table_length;
+ __le16 prd_table_offset;
};
/**
@@ -376,10 +376,10 @@ struct utp_task_req_desc {
struct request_desc_header header;
/* DW 4-11 */
- u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
+ __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
/* DW 12-19 */
- u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+ __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
};
#endif /* End of Header */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index db3b494e5926..308256b5e4cb 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -23,6 +23,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_scsi.h>
#include <linux/cpu.h>
+#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
@@ -37,6 +38,7 @@ struct virtio_scsi_cmd {
struct completion *comp;
union {
struct virtio_scsi_cmd_req cmd;
+ struct virtio_scsi_cmd_req_pi cmd_pi;
struct virtio_scsi_ctrl_tmf_req tmf;
struct virtio_scsi_ctrl_an_req an;
} req;
@@ -73,17 +75,12 @@ struct virtio_scsi_vq {
* queue, and also lets the driver optimize the IRQ affinity for the virtqueues
* (each virtqueue's affinity is set to the CPU that "owns" the queue).
*
- * An interesting effect of this policy is that only writes to req_vq need to
- * take the tgt_lock. Read can be done outside the lock because:
+ * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq
+ * could be done locklessly, but we do not do it yet.
*
- * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1.
- * In that case, no other CPU is reading req_vq: even if they were in
- * virtscsi_queuecommand_multi, they would be spinning on tgt_lock.
- *
- * - reads of req_vq only occur when the target is not idle (reqs != 0).
- * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
- *
- * Similarly, decrements of reqs are never concurrent with writes of req_vq.
+ * Decrements of reqs are never concurrent with writes of req_vq: before the
+ * decrement reqs will be != 0; after the decrement the virtqueue completion
+ * routine will not use the req_vq so it can be changed by a new request.
* Thus they can happen outside the tgt_lock, provided of course we make reqs
* an atomic_t.
*/
@@ -204,7 +201,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
set_driver_byte(sc, DRIVER_SENSE);
}
- mempool_free(cmd, virtscsi_cmd_pool);
sc->scsi_done(sc);
atomic_dec(&tgt->reqs);
@@ -238,49 +234,25 @@ static void virtscsi_req_done(struct virtqueue *vq)
int index = vq->index - VIRTIO_SCSI_VQ_BASE;
struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
- /*
- * Read req_vq before decrementing the reqs field in
- * virtscsi_complete_cmd.
- *
- * With barriers:
- *
- * CPU #0 virtscsi_queuecommand_multi (CPU #1)
- * ------------------------------------------------------------
- * lock vq_lock
- * read req_vq
- * read reqs (reqs = 1)
- * write reqs (reqs = 0)
- * increment reqs (reqs = 1)
- * write req_vq
- *
- * Possible reordering without barriers:
- *
- * CPU #0 virtscsi_queuecommand_multi (CPU #1)
- * ------------------------------------------------------------
- * lock vq_lock
- * read reqs (reqs = 1)
- * write reqs (reqs = 0)
- * increment reqs (reqs = 1)
- * write req_vq
- * read (wrong) req_vq
- *
- * We do not need a full smp_rmb, because req_vq is required to get
- * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
- * in the virtqueue as the user token.
- */
- smp_read_barrier_depends();
-
virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
};
+static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
+{
+ int i, num_vqs;
+
+ num_vqs = vscsi->num_queues;
+ for (i = 0; i < num_vqs; i++)
+ virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
+ virtscsi_complete_cmd);
+}
+
static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
{
struct virtio_scsi_cmd *cmd = buf;
if (cmd->comp)
complete_all(cmd->comp);
- else
- mempool_free(cmd, virtscsi_cmd_pool);
}
static void virtscsi_ctrl_done(struct virtqueue *vq)
@@ -291,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
};
+static void virtscsi_handle_event(struct work_struct *work);
+
static int virtscsi_kick_event(struct virtio_scsi *vscsi,
struct virtio_scsi_event_node *event_node)
{
@@ -298,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
struct scatterlist sg;
unsigned long flags;
+ INIT_WORK(&event_node->work, virtscsi_handle_event);
sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
@@ -415,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
{
struct virtio_scsi_event_node *event_node = buf;
- INIT_WORK(&event_node->work, virtscsi_handle_event);
schedule_work(&event_node->work);
}
@@ -433,14 +407,13 @@ static void virtscsi_event_done(struct virtqueue *vq)
* @cmd : command structure
* @req_size : size of the request buffer
* @resp_size : size of the response buffer
- * @gfp : flags to use for memory allocations
*/
static int virtscsi_add_cmd(struct virtqueue *vq,
struct virtio_scsi_cmd *cmd,
- size_t req_size, size_t resp_size, gfp_t gfp)
+ size_t req_size, size_t resp_size)
{
struct scsi_cmnd *sc = cmd->sc;
- struct scatterlist *sgs[4], req, resp;
+ struct scatterlist *sgs[6], req, resp;
struct sg_table *out, *in;
unsigned out_num = 0, in_num = 0;
@@ -458,30 +431,38 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
sgs[out_num++] = &req;
/* Data-out buffer. */
- if (out)
+ if (out) {
+ /* Place WRITE protection SGLs before Data OUT payload */
+ if (scsi_prot_sg_count(sc))
+ sgs[out_num++] = scsi_prot_sglist(sc);
sgs[out_num++] = out->sgl;
+ }
/* Response header. */
sg_init_one(&resp, &cmd->resp, resp_size);
sgs[out_num + in_num++] = &resp;
/* Data-in buffer */
- if (in)
+ if (in) {
+ /* Place READ protection SGLs before Data IN payload */
+ if (scsi_prot_sg_count(sc))
+ sgs[out_num + in_num++] = scsi_prot_sglist(sc);
sgs[out_num + in_num++] = in->sgl;
+ }
- return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp);
+ return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
}
static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
struct virtio_scsi_cmd *cmd,
- size_t req_size, size_t resp_size, gfp_t gfp)
+ size_t req_size, size_t resp_size)
{
unsigned long flags;
int err;
bool needs_kick = false;
spin_lock_irqsave(&vq->vq_lock, flags);
- err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp);
+ err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
if (!err)
needs_kick = virtqueue_kick_prepare(vq->vq);
@@ -492,14 +473,46 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
return err;
}
+static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
+ struct scsi_cmnd *sc)
+{
+ cmd->lun[0] = 1;
+ cmd->lun[1] = sc->device->id;
+ cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
+ cmd->lun[3] = sc->device->lun & 0xff;
+ cmd->tag = (unsigned long)sc;
+ cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
+ cmd->prio = 0;
+ cmd->crn = 0;
+}
+
+static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
+ struct scsi_cmnd *sc)
+{
+ struct request *rq = sc->request;
+ struct blk_integrity *bi;
+
+ virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
+
+ if (!rq || !scsi_prot_sg_count(sc))
+ return;
+
+ bi = blk_get_integrity(rq->rq_disk);
+
+ if (sc->sc_data_direction == DMA_TO_DEVICE)
+ cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
+ else if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
+}
+
static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
struct virtio_scsi_vq *req_vq,
struct scsi_cmnd *sc)
{
- struct virtio_scsi_cmd *cmd;
- int ret;
-
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ int req_size;
+
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
/* TODO: check feature bit and fail if unsupported? */
@@ -508,36 +521,24 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
dev_dbg(&sc->device->sdev_gendev,
"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
- ret = SCSI_MLQUEUE_HOST_BUSY;
- cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
- if (!cmd)
- goto out;
-
memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc;
- cmd->req.cmd = (struct virtio_scsi_cmd_req){
- .lun[0] = 1,
- .lun[1] = sc->device->id,
- .lun[2] = (sc->device->lun >> 8) | 0x40,
- .lun[3] = sc->device->lun & 0xff,
- .tag = (unsigned long)sc,
- .task_attr = VIRTIO_SCSI_S_SIMPLE,
- .prio = 0,
- .crn = 0,
- };
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
- memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
- if (virtscsi_kick_cmd(req_vq, cmd,
- sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
- GFP_ATOMIC) == 0)
- ret = 0;
- else
- mempool_free(cmd, virtscsi_cmd_pool);
+ if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
+ virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
+ memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
+ req_size = sizeof(cmd->req.cmd_pi);
+ } else {
+ virtio_scsi_init_hdr(&cmd->req.cmd, sc);
+ memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
+ req_size = sizeof(cmd->req.cmd);
+ }
-out:
- return ret;
+ if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ return 0;
}
static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
@@ -560,12 +561,8 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
spin_lock_irqsave(&tgt->tgt_lock, flags);
- /*
- * The memory barrier after atomic_inc_return matches
- * the smp_read_barrier_depends() in virtscsi_req_done.
- */
if (atomic_inc_return(&tgt->reqs) > 1)
- vq = ACCESS_ONCE(tgt->req_vq);
+ vq = tgt->req_vq;
else {
queue_num = smp_processor_id();
while (unlikely(queue_num >= vscsi->num_queues))
@@ -596,8 +593,7 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
cmd->comp = &comp;
if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
- sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
- GFP_NOIO) < 0)
+ sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
goto out;
wait_for_completion(&comp);
@@ -605,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
ret = SUCCESS;
+ /*
+ * The spec guarantees that all requests related to the TMF have
+ * been completed, but the callback might not have run yet if
+ * we're using independent interrupts (e.g. MSI). Poll the
+ * virtqueues once.
+ *
+ * In the abort case, sc->scsi_done will do nothing, because
+ * the block layer must have detected a timeout and as a result
+ * REQ_ATOM_COMPLETE has been set.
+ */
+ virtscsi_poll_requests(vscsi);
+
out:
mempool_free(cmd, virtscsi_cmd_pool);
return ret;
@@ -683,6 +691,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
.name = "Virtio SCSI HBA",
.proc_name = "virtio_scsi",
.this_id = -1,
+ .cmd_size = sizeof(struct virtio_scsi_cmd),
.queuecommand = virtscsi_queuecommand_single,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
@@ -699,6 +708,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.name = "Virtio SCSI HBA",
.proc_name = "virtio_scsi",
.this_id = -1,
+ .cmd_size = sizeof(struct virtio_scsi_cmd),
.queuecommand = virtscsi_queuecommand_multi,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
@@ -875,7 +885,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
{
struct Scsi_Host *shost;
struct virtio_scsi *vscsi;
- int err;
+ int err, host_prot;
u32 sg_elems, num_targets;
u32 cmd_per_lun;
u32 num_queues;
@@ -925,6 +935,16 @@ static int virtscsi_probe(struct virtio_device *vdev)
shost->max_id = num_targets;
shost->max_channel = 0;
shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+ host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+
+ scsi_host_set_prot(shost, host_prot);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+ }
+
err = scsi_add_host(shost, &vdev->dev);
if (err)
goto scsi_add_host_failed;
@@ -994,6 +1014,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_SCSI_F_HOTPLUG,
VIRTIO_SCSI_F_CHANGE,
+ VIRTIO_SCSI_F_T10_PI,
};
static struct virtio_driver virtio_scsi_driver = {
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index f6759dc0153b..c41ff148a2b4 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -368,7 +368,7 @@ int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
* otherwise we use the default. Also we use the default FIFO
* thresholds for now.
*/
- *burst_code = chip_info ? chip_info->dma_burst_size : 16;
+ *burst_code = chip_info ? chip_info->dma_burst_size : 1;
*threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
| SSCR1_TxTresh(TX_THRESH_DFLT);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index a98df7eeb42d..fe792106bdc5 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -118,6 +118,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
*/
orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
value = orig | SPI_CS_CONTROL_SW_MODE;
writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
@@ -126,10 +127,13 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
goto detection_done;
}
- value &= ~SPI_CS_CONTROL_SW_MODE;
+ orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+
+ /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
+ value = orig & ~SPI_CS_CONTROL_SW_MODE;
writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
- if (value != orig) {
+ if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
offset = 0x800;
goto detection_done;
}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index fc1de86d3c8a..c08da380cb23 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -424,31 +424,6 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
return 0;
}
-static void spi_qup_set_cs(struct spi_device *spi, bool enable)
-{
- struct spi_qup *controller = spi_master_get_devdata(spi->master);
-
- u32 iocontol, mask;
-
- iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
-
- /* Disable auto CS toggle and use manual */
- iocontol &= ~SPI_IO_C_MX_CS_MODE;
- iocontol |= SPI_IO_C_FORCE_CS;
-
- iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
- iocontol |= SPI_IO_C_CS_SELECT(spi->chip_select);
-
- mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
-
- if (enable)
- iocontol |= mask;
- else
- iocontol &= ~mask;
-
- writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
-}
-
static int spi_qup_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
@@ -571,12 +546,16 @@ static int spi_qup_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ /* use num-cs unless not present or out of range */
+ if (of_property_read_u16(dev->of_node, "num-cs",
+ &master->num_chipselect) ||
+ (master->num_chipselect > SPI_NUM_CHIPSELECTS))
+ master->num_chipselect = SPI_NUM_CHIPSELECTS;
+
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
- master->num_chipselect = SPI_NUM_CHIPSELECTS;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->max_speed_hz = max_freq;
- master->set_cs = spi_qup_set_cs;
master->transfer_one = spi_qup_transfer_one;
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
@@ -640,16 +619,19 @@ static int spi_qup_probe(struct platform_device *pdev)
if (ret)
goto error;
- ret = devm_spi_register_master(dev, master);
- if (ret)
- goto error;
-
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto disable_pm;
+
return 0;
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
error:
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 1f56ef651d1a..b83dd733684c 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -175,9 +175,9 @@ static int sh_sci_spi_remove(struct platform_device *dev)
{
struct sh_sci_spi *sp = platform_get_drvdata(dev);
- iounmap(sp->membase);
- setbits(sp, PIN_INIT, 0);
spi_bitbang_stop(&sp->bitbang);
+ setbits(sp, PIN_INIT, 0);
+ iounmap(sp->membase);
spi_master_put(sp->bitbang.master);
return 0;
}
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
index 2c617834dc46..c341ac11c5a3 100644
--- a/drivers/staging/android/timed_output.c
+++ b/drivers/staging/android/timed_output.c
@@ -97,7 +97,6 @@ void timed_output_dev_unregister(struct timed_output_dev *tdev)
{
tdev->enable(tdev, 0);
device_destroy(timed_output_class, MKDEV(0, tdev->index));
- dev_set_drvdata(tdev->dev, NULL);
}
EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 5d56428d508a..a2f6957e7ee9 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -651,6 +651,7 @@ config COMEDI_ADDI_APCI_1516
config COMEDI_ADDI_APCI_1564
tristate "ADDI-DATA APCI_1564 support"
+ select COMEDI_ADDI_WATCHDOG
---help---
Enable support for ADDI-DATA APCI_1564 cards
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 0901ef5d6e8a..08356b6955a4 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -4605,7 +4605,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
netdev->netdev_ops = &et131x_netdev_ops;
SET_NETDEV_DEV(netdev, &pdev->dev);
- SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
+ netdev->ethtool_ops = &et131x_ethtool_ops;
adapter = et131x_adapter_init(netdev, pdev);
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index d6421b9b5981..a6158bef58e5 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -2249,7 +2249,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
ft1000InitProc(dev);
ft1000_card_present = 1;
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
printk(KERN_INFO "ft1000: %s: addr 0x%04lx irq %d, MAC addr %pM\n",
dev->name, dev->base_addr, dev->irq, dev->dev_addr);
return dev;
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index b36feb080cba..fa38be0982f9 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -36,10 +36,11 @@ config IIO_SIMPLE_DUMMY_EVENTS
Add some dummy events to the simple dummy driver.
config IIO_SIMPLE_DUMMY_BUFFER
- boolean "Buffered capture support"
- select IIO_KFIFO_BUF
- help
- Add buffered data capture to the simple dummy driver.
+ boolean "Buffered capture support"
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Add buffered data capture to the simple dummy driver.
endif # IIO_SIMPLE_DUMMY
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 357cef2a6f4c..7194bd138762 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -465,7 +465,7 @@ static int ad7291_probe(struct i2c_client *client,
struct ad7291_platform_data *pdata = client->dev.platform_data;
struct ad7291_chip_info *chip;
struct iio_dev *indio_dev;
- int ret = 0;
+ int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
@@ -475,7 +475,7 @@ static int ad7291_probe(struct i2c_client *client,
if (pdata && pdata->use_external_ref) {
chip->reg = devm_regulator_get(&client->dev, "vref");
if (IS_ERR(chip->reg))
- return ret;
+ return PTR_ERR(chip->reg);
ret = regulator_enable(chip->reg);
if (ret)
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index dae8d1a9038e..52d7517b342e 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -846,6 +846,14 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
LRADC_CTRL1);
mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+ /* Enable / disable the divider per requirement */
+ if (test_bit(chan, &lradc->is_divided))
+ mxs_lradc_reg_set(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
+ LRADC_CTRL2);
+ else
+ mxs_lradc_reg_clear(lradc,
+ 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2);
+
/* Clean the slot's previous content, then set new one. */
mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
LRADC_CTRL4);
@@ -961,15 +969,11 @@ static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
/* divider by two disabled */
- writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
- lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_CLR);
clear_bit(chan->channel, &lradc->is_divided);
ret = 0;
} else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
/* divider by two enabled */
- writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
- lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_SET);
set_bit(chan->channel, &lradc->is_divided);
ret = 0;
}
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 9e0f2a9c73ae..ab338e3ddd05 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -667,9 +667,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
chip->tsl2x7x_settings.prox_pulse_count;
chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
- chip->tsl2x7x_settings.prox_thres_low;
+ (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
+ (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
- chip->tsl2x7x_settings.prox_thres_high;
+ (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
+ (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
/* and make sure we're not already on */
if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index c6e8ba7b3e4e..82fb758a29bc 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -39,19 +39,10 @@ config DRM_IMX_LDB
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
-config DRM_IMX_IPUV3_CORE
- tristate "IPUv3 core support"
- depends on DRM_IMX
- depends on RESET_CONTROLLER
- help
- Choose this if you have a i.MX5/6 system and want
- to use the IPU. This option only enables IPU base
- support.
-
config DRM_IMX_IPUV3
tristate "DRM Support for i.MX IPUv3"
depends on DRM_IMX
- depends on DRM_IMX_IPUV3_CORE
+ depends on IMX_IPUV3_CORE
help
Choose this if you have a i.MX5 or i.MX6 processor.
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 129e3a3f59f1..582c438d8cbd 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -6,7 +6,6 @@ obj-$(CONFIG_DRM_IMX) += imxdrm.o
obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index c270c9ae6d27..def8280d7ee6 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -200,13 +200,6 @@ static const struct file_operations imx_drm_driver_fops = {
.llseek = noop_llseek,
};
-int imx_drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-EXPORT_SYMBOL(imx_drm_connector_mode_valid);
-
void imx_drm_connector_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
@@ -305,7 +298,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
dev_err(drm->dev,
"[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n",
connector->base.id,
- drm_get_connector_name(connector), ret);
+ connector->name, ret);
goto err_unbind;
}
}
diff --git a/drivers/staging/imx-drm/imx-drm.h b/drivers/staging/imx-drm/imx-drm.h
index a322bac55414..7453ae00c412 100644
--- a/drivers/staging/imx-drm/imx-drm.h
+++ b/drivers/staging/imx-drm/imx-drm.h
@@ -50,8 +50,6 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
-int imx_drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
void imx_drm_connector_destroy(struct drm_connector *connector);
void imx_drm_encoder_destroy(struct drm_encoder *encoder);
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index 1b440483f28f..18c9ccd460b7 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -27,8 +27,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
+#include <video/imx-ipu-v3.h>
-#include "ipu-v3/imx-ipu-v3.h"
#include "imx-hdmi.h"
#include "imx-drm.h"
@@ -1490,7 +1490,6 @@ static struct drm_connector_funcs imx_hdmi_connector_funcs = {
static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = {
.get_modes = imx_hdmi_connector_get_modes,
- .mode_valid = imx_drm_connector_mode_valid,
.best_encoder = imx_hdmi_connector_best_encoder,
};
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
index fe4c1ef4e7a5..7e3f019d7e72 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -317,7 +317,6 @@ static struct drm_connector_funcs imx_ldb_connector_funcs = {
static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
.get_modes = imx_ldb_connector_get_modes,
.best_encoder = imx_ldb_connector_best_encoder,
- .mode_valid = imx_drm_connector_mode_valid,
};
static struct drm_encoder_funcs imx_ldb_encoder_funcs = {
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index a23f4f773146..c628fcdc22ae 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -30,8 +30,8 @@
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <video/imx-ipu-v3.h>
-#include "ipu-v3/imx-ipu-v3.h"
#include "imx-drm.h"
#define TVE_COM_CONF_REG 0x00
@@ -249,11 +249,6 @@ static int imx_tve_connector_mode_valid(struct drm_connector *connector,
{
struct imx_tve *tve = con_to_tve(connector);
unsigned long rate;
- int ret;
-
- ret = imx_drm_connector_mode_valid(connector, mode);
- if (ret != MODE_OK)
- return ret;
/* pixel clock with 2x oversampling */
rate = clk_round_rate(tve->clk, 2000UL * mode->clock) / 2000;
diff --git a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h b/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
deleted file mode 100644
index c2c6fab05eaa..000000000000
--- a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Copyright 2005-2009 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU Lesser General
- * Public License. You may obtain a copy of the GNU Lesser General
- * Public License Version 2.1 or later at the following locations:
- *
- * http://www.opensource.org/licenses/lgpl-license.html
- * http://www.gnu.org/copyleft/lgpl.html
- */
-
-#ifndef __DRM_IPU_H__
-#define __DRM_IPU_H__
-
-#include <linux/types.h>
-#include <linux/videodev2.h>
-#include <linux/bitmap.h>
-#include <linux/fb.h>
-
-struct ipu_soc;
-
-enum ipuv3_type {
- IPUV3EX,
- IPUV3M,
- IPUV3H,
-};
-
-#define IPU_PIX_FMT_GBR24 v4l2_fourcc('G', 'B', 'R', '3')
-
-/*
- * Bitfield of Display Interface signal polarities.
- */
-struct ipu_di_signal_cfg {
- unsigned datamask_en:1;
- unsigned interlaced:1;
- unsigned odd_field_first:1;
- unsigned clksel_en:1;
- unsigned clkidle_en:1;
- unsigned data_pol:1; /* true = inverted */
- unsigned clk_pol:1; /* true = rising edge */
- unsigned enable_pol:1;
- unsigned Hsync_pol:1; /* true = active high */
- unsigned Vsync_pol:1;
-
- u16 width;
- u16 height;
- u32 pixel_fmt;
- u16 h_start_width;
- u16 h_sync_width;
- u16 h_end_width;
- u16 v_start_width;
- u16 v_sync_width;
- u16 v_end_width;
- u32 v_to_h_sync;
- unsigned long pixelclock;
-#define IPU_DI_CLKMODE_SYNC (1 << 0)
-#define IPU_DI_CLKMODE_EXT (1 << 1)
- unsigned long clkflags;
-
- u8 hsync_pin;
- u8 vsync_pin;
-};
-
-enum ipu_color_space {
- IPUV3_COLORSPACE_RGB,
- IPUV3_COLORSPACE_YUV,
- IPUV3_COLORSPACE_UNKNOWN,
-};
-
-struct ipuv3_channel;
-
-enum ipu_channel_irq {
- IPU_IRQ_EOF = 0,
- IPU_IRQ_NFACK = 64,
- IPU_IRQ_NFB4EOF = 128,
- IPU_IRQ_EOS = 192,
-};
-
-int ipu_map_irq(struct ipu_soc *ipu, int irq);
-int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
- enum ipu_channel_irq irq);
-
-#define IPU_IRQ_DP_SF_START (448 + 2)
-#define IPU_IRQ_DP_SF_END (448 + 3)
-#define IPU_IRQ_BG_SF_END IPU_IRQ_DP_SF_END,
-#define IPU_IRQ_DC_FC_0 (448 + 8)
-#define IPU_IRQ_DC_FC_1 (448 + 9)
-#define IPU_IRQ_DC_FC_2 (448 + 10)
-#define IPU_IRQ_DC_FC_3 (448 + 11)
-#define IPU_IRQ_DC_FC_4 (448 + 12)
-#define IPU_IRQ_DC_FC_6 (448 + 13)
-#define IPU_IRQ_VSYNC_PRE_0 (448 + 14)
-#define IPU_IRQ_VSYNC_PRE_1 (448 + 15)
-
-/*
- * IPU Image DMA Controller (idmac) functions
- */
-struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned channel);
-void ipu_idmac_put(struct ipuv3_channel *);
-
-int ipu_idmac_enable_channel(struct ipuv3_channel *channel);
-int ipu_idmac_disable_channel(struct ipuv3_channel *channel);
-int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms);
-
-void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
- bool doublebuffer);
-void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
-
-/*
- * IPU Display Controller (dc) functions
- */
-struct ipu_dc;
-struct ipu_di;
-struct ipu_dc *ipu_dc_get(struct ipu_soc *ipu, int channel);
-void ipu_dc_put(struct ipu_dc *dc);
-int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
- u32 pixel_fmt, u32 width);
-void ipu_dc_enable(struct ipu_soc *ipu);
-void ipu_dc_enable_channel(struct ipu_dc *dc);
-void ipu_dc_disable_channel(struct ipu_dc *dc);
-void ipu_dc_disable(struct ipu_soc *ipu);
-
-/*
- * IPU Display Interface (di) functions
- */
-struct ipu_di *ipu_di_get(struct ipu_soc *ipu, int disp);
-void ipu_di_put(struct ipu_di *);
-int ipu_di_disable(struct ipu_di *);
-int ipu_di_enable(struct ipu_di *);
-int ipu_di_get_num(struct ipu_di *);
-int ipu_di_init_sync_panel(struct ipu_di *, struct ipu_di_signal_cfg *sig);
-
-/*
- * IPU Display Multi FIFO Controller (dmfc) functions
- */
-struct dmfc_channel;
-int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc);
-void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
- unsigned long bandwidth_mbs, int burstsize);
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
-int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width);
-struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
-void ipu_dmfc_put(struct dmfc_channel *dmfc);
-
-/*
- * IPU Display Processor (dp) functions
- */
-#define IPU_DP_FLOW_SYNC_BG 0
-#define IPU_DP_FLOW_SYNC_FG 1
-#define IPU_DP_FLOW_ASYNC0_BG 2
-#define IPU_DP_FLOW_ASYNC0_FG 3
-#define IPU_DP_FLOW_ASYNC1_BG 4
-#define IPU_DP_FLOW_ASYNC1_FG 5
-
-struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow);
-void ipu_dp_put(struct ipu_dp *);
-int ipu_dp_enable(struct ipu_soc *ipu);
-int ipu_dp_enable_channel(struct ipu_dp *dp);
-void ipu_dp_disable_channel(struct ipu_dp *dp);
-void ipu_dp_disable(struct ipu_soc *ipu);
-int ipu_dp_setup_channel(struct ipu_dp *dp,
- enum ipu_color_space in, enum ipu_color_space out);
-int ipu_dp_set_window_pos(struct ipu_dp *, u16 x_pos, u16 y_pos);
-int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha,
- bool bg_chan);
-
-#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size))
-
-#define IPU_FIELD_UBO IPU_CPMEM_WORD(0, 46, 22)
-#define IPU_FIELD_VBO IPU_CPMEM_WORD(0, 68, 22)
-#define IPU_FIELD_IOX IPU_CPMEM_WORD(0, 90, 4)
-#define IPU_FIELD_RDRW IPU_CPMEM_WORD(0, 94, 1)
-#define IPU_FIELD_SO IPU_CPMEM_WORD(0, 113, 1)
-#define IPU_FIELD_SLY IPU_CPMEM_WORD(1, 102, 14)
-#define IPU_FIELD_SLUV IPU_CPMEM_WORD(1, 128, 14)
-
-#define IPU_FIELD_XV IPU_CPMEM_WORD(0, 0, 10)
-#define IPU_FIELD_YV IPU_CPMEM_WORD(0, 10, 9)
-#define IPU_FIELD_XB IPU_CPMEM_WORD(0, 19, 13)
-#define IPU_FIELD_YB IPU_CPMEM_WORD(0, 32, 12)
-#define IPU_FIELD_NSB_B IPU_CPMEM_WORD(0, 44, 1)
-#define IPU_FIELD_CF IPU_CPMEM_WORD(0, 45, 1)
-#define IPU_FIELD_SX IPU_CPMEM_WORD(0, 46, 12)
-#define IPU_FIELD_SY IPU_CPMEM_WORD(0, 58, 11)
-#define IPU_FIELD_NS IPU_CPMEM_WORD(0, 69, 10)
-#define IPU_FIELD_SDX IPU_CPMEM_WORD(0, 79, 7)
-#define IPU_FIELD_SM IPU_CPMEM_WORD(0, 86, 10)
-#define IPU_FIELD_SCC IPU_CPMEM_WORD(0, 96, 1)
-#define IPU_FIELD_SCE IPU_CPMEM_WORD(0, 97, 1)
-#define IPU_FIELD_SDY IPU_CPMEM_WORD(0, 98, 7)
-#define IPU_FIELD_SDRX IPU_CPMEM_WORD(0, 105, 1)
-#define IPU_FIELD_SDRY IPU_CPMEM_WORD(0, 106, 1)
-#define IPU_FIELD_BPP IPU_CPMEM_WORD(0, 107, 3)
-#define IPU_FIELD_DEC_SEL IPU_CPMEM_WORD(0, 110, 2)
-#define IPU_FIELD_DIM IPU_CPMEM_WORD(0, 112, 1)
-#define IPU_FIELD_BNDM IPU_CPMEM_WORD(0, 114, 3)
-#define IPU_FIELD_BM IPU_CPMEM_WORD(0, 117, 2)
-#define IPU_FIELD_ROT IPU_CPMEM_WORD(0, 119, 1)
-#define IPU_FIELD_HF IPU_CPMEM_WORD(0, 120, 1)
-#define IPU_FIELD_VF IPU_CPMEM_WORD(0, 121, 1)
-#define IPU_FIELD_THE IPU_CPMEM_WORD(0, 122, 1)
-#define IPU_FIELD_CAP IPU_CPMEM_WORD(0, 123, 1)
-#define IPU_FIELD_CAE IPU_CPMEM_WORD(0, 124, 1)
-#define IPU_FIELD_FW IPU_CPMEM_WORD(0, 125, 13)
-#define IPU_FIELD_FH IPU_CPMEM_WORD(0, 138, 12)
-#define IPU_FIELD_EBA0 IPU_CPMEM_WORD(1, 0, 29)
-#define IPU_FIELD_EBA1 IPU_CPMEM_WORD(1, 29, 29)
-#define IPU_FIELD_ILO IPU_CPMEM_WORD(1, 58, 20)
-#define IPU_FIELD_NPB IPU_CPMEM_WORD(1, 78, 7)
-#define IPU_FIELD_PFS IPU_CPMEM_WORD(1, 85, 4)
-#define IPU_FIELD_ALU IPU_CPMEM_WORD(1, 89, 1)
-#define IPU_FIELD_ALBM IPU_CPMEM_WORD(1, 90, 3)
-#define IPU_FIELD_ID IPU_CPMEM_WORD(1, 93, 2)
-#define IPU_FIELD_TH IPU_CPMEM_WORD(1, 95, 7)
-#define IPU_FIELD_SL IPU_CPMEM_WORD(1, 102, 14)
-#define IPU_FIELD_WID0 IPU_CPMEM_WORD(1, 116, 3)
-#define IPU_FIELD_WID1 IPU_CPMEM_WORD(1, 119, 3)
-#define IPU_FIELD_WID2 IPU_CPMEM_WORD(1, 122, 3)
-#define IPU_FIELD_WID3 IPU_CPMEM_WORD(1, 125, 3)
-#define IPU_FIELD_OFS0 IPU_CPMEM_WORD(1, 128, 5)
-#define IPU_FIELD_OFS1 IPU_CPMEM_WORD(1, 133, 5)
-#define IPU_FIELD_OFS2 IPU_CPMEM_WORD(1, 138, 5)
-#define IPU_FIELD_OFS3 IPU_CPMEM_WORD(1, 143, 5)
-#define IPU_FIELD_SXYS IPU_CPMEM_WORD(1, 148, 1)
-#define IPU_FIELD_CRE IPU_CPMEM_WORD(1, 149, 1)
-#define IPU_FIELD_DEC_SEL2 IPU_CPMEM_WORD(1, 150, 1)
-
-struct ipu_cpmem_word {
- u32 data[5];
- u32 res[3];
-};
-
-struct ipu_ch_param {
- struct ipu_cpmem_word word[2];
-};
-
-void ipu_ch_param_write_field(struct ipu_ch_param __iomem *base, u32 wbs, u32 v);
-u32 ipu_ch_param_read_field(struct ipu_ch_param __iomem *base, u32 wbs);
-struct ipu_ch_param __iomem *ipu_get_cpmem(struct ipuv3_channel *channel);
-void ipu_ch_param_dump(struct ipu_ch_param __iomem *p);
-
-static inline void ipu_ch_param_zero(struct ipu_ch_param __iomem *p)
-{
- int i;
- void __iomem *base = p;
-
- for (i = 0; i < sizeof(*p) / sizeof(u32); i++)
- writel(0, base + i * sizeof(u32));
-}
-
-static inline void ipu_cpmem_set_buffer(struct ipu_ch_param __iomem *p,
- int bufnum, dma_addr_t buf)
-{
- if (bufnum)
- ipu_ch_param_write_field(p, IPU_FIELD_EBA1, buf >> 3);
- else
- ipu_ch_param_write_field(p, IPU_FIELD_EBA0, buf >> 3);
-}
-
-static inline void ipu_cpmem_set_resolution(struct ipu_ch_param __iomem *p,
- int xres, int yres)
-{
- ipu_ch_param_write_field(p, IPU_FIELD_FW, xres - 1);
- ipu_ch_param_write_field(p, IPU_FIELD_FH, yres - 1);
-}
-
-static inline void ipu_cpmem_set_stride(struct ipu_ch_param __iomem *p,
- int stride)
-{
- ipu_ch_param_write_field(p, IPU_FIELD_SLY, stride - 1);
-}
-
-void ipu_cpmem_set_high_priority(struct ipuv3_channel *channel);
-
-struct ipu_rgb {
- struct fb_bitfield red;
- struct fb_bitfield green;
- struct fb_bitfield blue;
- struct fb_bitfield transp;
- int bits_per_pixel;
-};
-
-struct ipu_image {
- struct v4l2_pix_format pix;
- struct v4l2_rect rect;
- dma_addr_t phys;
-};
-
-int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem *p,
- int width);
-
-int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *,
- const struct ipu_rgb *rgb);
-
-static inline void ipu_cpmem_interlaced_scan(struct ipu_ch_param *p,
- int stride)
-{
- ipu_ch_param_write_field(p, IPU_FIELD_SO, 1);
- ipu_ch_param_write_field(p, IPU_FIELD_ILO, stride / 8);
- ipu_ch_param_write_field(p, IPU_FIELD_SLY, (stride * 2) - 1);
-};
-
-void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem *p, u32 pixel_format,
- int stride, int height);
-void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param __iomem *p,
- u32 pixel_format);
-void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem *p,
- u32 pixel_format, int stride, int u_offset, int v_offset);
-int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat);
-int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
- struct ipu_image *image);
-
-enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc);
-enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat);
-
-static inline void ipu_cpmem_set_burstsize(struct ipu_ch_param __iomem *p,
- int burstsize)
-{
- ipu_ch_param_write_field(p, IPU_FIELD_NPB, burstsize - 1);
-};
-
-struct ipu_client_platformdata {
- int di;
- int dc;
- int dp;
- int dmfc;
- int dma[2];
-};
-
-#endif /* __DRM_IPU_H__ */
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 47bec5e17358..720868bff35b 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -30,7 +30,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
-#include "ipu-v3/imx-ipu-v3.h"
+#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
#include "ipuv3-plane.h"
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
index 5697e59ddf1d..6f393a11f44d 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
@@ -17,7 +17,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include "ipu-v3/imx-ipu-v3.h"
+#include "video/imx-ipu-v3.h"
#include "ipuv3-plane.h"
#define to_ipu_plane(x) container_of(x, struct ipu_plane, base)
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c
index eaf4dda1a0c4..4ca61afdf622 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/staging/imx-drm/parallel-display.c
@@ -148,7 +148,6 @@ static struct drm_connector_funcs imx_pd_connector_funcs = {
static struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
.get_modes = imx_pd_connector_get_modes,
.best_encoder = imx_pd_connector_best_encoder,
- .mode_valid = imx_drm_connector_mode_valid,
};
static struct drm_encoder_funcs imx_pd_encoder_funcs = {
@@ -174,6 +173,13 @@ static int imx_pd_register(struct drm_device *drm,
if (ret)
return ret;
+ /* set the connector's dpms to OFF so that
+ * drm_helper_connector_dpms() won't return
+ * immediately since the current state is ON
+ * at this point.
+ */
+ imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
+
drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
DRM_MODE_ENCODER_NONE);
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
index 827209ea6bd0..386a36c00f57 100644
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ b/drivers/staging/lustre/lustre/include/lclient.h
@@ -82,16 +82,7 @@ struct ccc_io {
/**
* I/O vector information to or from which read/write is going.
*/
- struct iovec *cui_iov;
- unsigned long cui_nrsegs;
- /**
- * Total iov count for left IO.
- */
- unsigned long cui_tot_nrsegs;
- /**
- * Old length for iov that was truncated partially.
- */
- size_t cui_iov_olen;
+ struct iov_iter *cui_iter;
/**
* Total size for the left IO.
*/
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index dc24cfa58037..1b0c216bc568 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -720,31 +720,12 @@ int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
void ccc_io_update_iov(const struct lu_env *env,
struct ccc_io *cio, struct cl_io *io)
{
- int i;
size_t size = io->u.ci_rw.crw_count;
- cio->cui_iov_olen = 0;
- if (!cl_is_normalio(env, io) || cio->cui_tot_nrsegs == 0)
+ if (!cl_is_normalio(env, io) || cio->cui_iter == NULL)
return;
- for (i = 0; i < cio->cui_tot_nrsegs; i++) {
- struct iovec *iv = &cio->cui_iov[i];
-
- if (iv->iov_len < size)
- size -= iv->iov_len;
- else {
- if (iv->iov_len > size) {
- cio->cui_iov_olen = iv->iov_len;
- iv->iov_len = size;
- }
- break;
- }
- }
-
- cio->cui_nrsegs = i + 1;
- LASSERTF(cio->cui_tot_nrsegs >= cio->cui_nrsegs,
- "tot_nrsegs: %lu, nrsegs: %lu\n",
- cio->cui_tot_nrsegs, cio->cui_nrsegs);
+ iov_iter_truncate(cio->cui_iter, size);
}
int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
@@ -775,30 +756,7 @@ void ccc_io_advance(const struct lu_env *env,
if (!cl_is_normalio(env, io))
return;
- LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs);
- LASSERT(cio->cui_tot_count >= nob);
-
- cio->cui_iov += cio->cui_nrsegs;
- cio->cui_tot_nrsegs -= cio->cui_nrsegs;
- cio->cui_tot_count -= nob;
-
- /* update the iov */
- if (cio->cui_iov_olen > 0) {
- struct iovec *iv;
-
- cio->cui_iov--;
- cio->cui_tot_nrsegs++;
- iv = &cio->cui_iov[0];
- if (io->ci_continue) {
- iv->iov_base += iv->iov_len;
- LASSERT(cio->cui_iov_olen > iv->iov_len);
- iv->iov_len = cio->cui_iov_olen - iv->iov_len;
- } else {
- /* restore the iov_len, in case of restart io. */
- iv->iov_len = cio->cui_iov_olen;
- }
- cio->cui_iov_olen = 0;
- }
+ iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob);
}
/**
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index c4ddec2b3589..716e1ee0104f 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -1114,9 +1114,7 @@ restart:
switch (vio->cui_io_subtype) {
case IO_NORMAL:
- cio->cui_iov = args->u.normal.via_iov;
- cio->cui_nrsegs = args->u.normal.via_nrsegs;
- cio->cui_tot_nrsegs = cio->cui_nrsegs;
+ cio->cui_iter = args->u.normal.via_iter;
cio->cui_iocb = args->u.normal.via_iocb;
if ((iot == CIT_WRITE) &&
!(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
@@ -1180,58 +1178,23 @@ out:
return result;
}
-static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct lu_env *env;
struct vvp_io_args *args;
- size_t count = 0;
ssize_t result;
int refcheck;
- result = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
- if (result)
- return result;
-
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
args = vvp_env_args(env, IO_NORMAL);
- args->u.normal.via_iov = (struct iovec *)iov;
- args->u.normal.via_nrsegs = nr_segs;
+ args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
- &iocb->ki_pos, count);
- cl_env_put(env, &refcheck);
- return result;
-}
-
-static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
- loff_t *ppos)
-{
- struct lu_env *env;
- struct iovec *local_iov;
- struct kiocb *kiocb;
- ssize_t result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- local_iov = &vvp_env_info(env)->vti_local_iov;
- kiocb = &vvp_env_info(env)->vti_kiocb;
- local_iov->iov_base = (void __user *)buf;
- local_iov->iov_len = count;
- init_sync_kiocb(kiocb, file);
- kiocb->ki_pos = *ppos;
- kiocb->ki_nbytes = count;
-
- result = ll_file_aio_read(kiocb, local_iov, 1, kiocb->ki_pos);
- *ppos = kiocb->ki_pos;
-
+ &iocb->ki_pos, iov_iter_count(to));
cl_env_put(env, &refcheck);
return result;
}
@@ -1239,64 +1202,27 @@ static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
/*
* Write to a file (through the page cache).
*/
-static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct lu_env *env;
struct vvp_io_args *args;
- size_t count = 0;
ssize_t result;
int refcheck;
- result = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
- if (result)
- return result;
-
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
args = vvp_env_args(env, IO_NORMAL);
- args->u.normal.via_iov = (struct iovec *)iov;
- args->u.normal.via_nrsegs = nr_segs;
+ args->u.normal.via_iter = from;
args->u.normal.via_iocb = iocb;
result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
- &iocb->ki_pos, count);
+ &iocb->ki_pos, iov_iter_count(from));
cl_env_put(env, &refcheck);
return result;
}
-static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
- loff_t *ppos)
-{
- struct lu_env *env;
- struct iovec *local_iov;
- struct kiocb *kiocb;
- ssize_t result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- local_iov = &vvp_env_info(env)->vti_local_iov;
- kiocb = &vvp_env_info(env)->vti_kiocb;
- local_iov->iov_base = (void __user *)buf;
- local_iov->iov_len = count;
- init_sync_kiocb(kiocb, file);
- kiocb->ki_pos = *ppos;
- kiocb->ki_nbytes = count;
-
- result = ll_file_aio_write(kiocb, local_iov, 1, kiocb->ki_pos);
- *ppos = kiocb->ki_pos;
-
- cl_env_put(env, &refcheck);
- return result;
-}
-
-
-
/*
* Send file content (through pagecache) somewhere with helper
*/
@@ -3143,10 +3069,10 @@ int ll_inode_permission(struct inode *inode, int mask)
/* -o localflock - only provides locally consistent flock locks */
struct file_operations ll_file_operations = {
- .read = ll_file_read,
- .aio_read = ll_file_aio_read,
- .write = ll_file_write,
- .aio_write = ll_file_aio_write,
+ .read = new_sync_read,
+ .read_iter = ll_file_read_iter,
+ .write = new_sync_write,
+ .write_iter = ll_file_write_iter,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
@@ -3158,10 +3084,10 @@ struct file_operations ll_file_operations = {
};
struct file_operations ll_file_operations_flock = {
- .read = ll_file_read,
- .aio_read = ll_file_aio_read,
- .write = ll_file_write,
- .aio_write = ll_file_aio_write,
+ .read = new_sync_read,
+ .read_iter = ll_file_read_iter,
+ .write = new_sync_write,
+ .write_iter = ll_file_write_iter,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
@@ -3176,10 +3102,10 @@ struct file_operations ll_file_operations_flock = {
/* These are for -o noflock - to return ENOSYS on flock calls */
struct file_operations ll_file_operations_noflock = {
- .read = ll_file_read,
- .aio_read = ll_file_aio_read,
- .write = ll_file_write,
- .aio_write = ll_file_aio_write,
+ .read = new_sync_read,
+ .read_iter = ll_file_read_iter,
+ .write = new_sync_write,
+ .write_iter = ll_file_write_iter,
.unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index dde7632ba01f..140ee947ba49 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -917,8 +917,7 @@ struct vvp_io_args {
union {
struct {
struct kiocb *via_iocb;
- struct iovec *via_iov;
- unsigned long via_nrsegs;
+ struct iov_iter *via_iter;
} normal;
struct {
struct pipe_inode_info *via_pipe;
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index f0122c568a09..56162103cc79 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -151,8 +151,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
if (result == 0) {
cio->cui_fd = LUSTRE_FPRIVATE(file);
- cio->cui_iov = NULL;
- cio->cui_nrsegs = 0;
+ cio->cui_iter = NULL;
result = cl_io_iter_init(env, io);
if (result == 0) {
result = cl_io_lock(env, io);
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 55ca8d3c3e46..af84c1aaa5f8 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -218,14 +218,11 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
int i;
for (i = 0; i < npages; i++) {
- if (pages[i] == NULL)
- break;
if (do_dirty)
set_page_dirty_lock(pages[i]);
page_cache_release(pages[i]);
}
-
- OBD_FREE_LARGE(pages, npages * sizeof(*pages));
+ kvfree(pages);
}
ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
@@ -363,18 +360,16 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t file_offset,
- unsigned long nr_segs)
+ struct iov_iter *iter, loff_t file_offset)
{
struct lu_env *env;
struct cl_io *io;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ccc_object *obj = cl_inode2ccc(inode);
- long count = iov_length(iov, nr_segs);
- long tot_bytes = 0, result = 0;
+ ssize_t count = iov_iter_count(iter);
+ ssize_t tot_bytes = 0, result = 0;
struct ll_inode_info *lli = ll_i2info(inode);
- unsigned long seg = 0;
long size = MAX_DIO_SIZE;
int refcheck;
@@ -392,11 +387,8 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
/* Check that all user buffers are aligned as well */
- for (seg = 0; seg < nr_segs; seg++) {
- if (((unsigned long)iov[seg].iov_base & ~CFS_PAGE_MASK) ||
- (iov[seg].iov_len & ~CFS_PAGE_MASK))
- return -EINVAL;
- }
+ if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
+ return -EINVAL;
env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env));
@@ -411,63 +403,49 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
mutex_lock(&inode->i_mutex);
LASSERT(obj->cob_transient_pages == 0);
- for (seg = 0; seg < nr_segs; seg++) {
- long iov_left = iov[seg].iov_len;
- unsigned long user_addr = (unsigned long)iov[seg].iov_base;
+ while (iov_iter_count(iter)) {
+ struct page **pages;
+ size_t offs;
+ count = min_t(size_t, iov_iter_count(iter), size);
if (rw == READ) {
if (file_offset >= i_size_read(inode))
break;
- if (file_offset + iov_left > i_size_read(inode))
- iov_left = i_size_read(inode) - file_offset;
+ if (file_offset + count > i_size_read(inode))
+ count = i_size_read(inode) - file_offset;
}
- while (iov_left > 0) {
- struct page **pages;
- int page_count, max_pages = 0;
- long bytes;
-
- bytes = min(size, iov_left);
- page_count = ll_get_user_pages(rw, user_addr, bytes,
- &pages, &max_pages);
- if (likely(page_count > 0)) {
- if (unlikely(page_count < max_pages))
- bytes = page_count << PAGE_CACHE_SHIFT;
- result = ll_direct_IO_26_seg(env, io, rw, inode,
- file->f_mapping,
- bytes, file_offset,
- pages, page_count);
- ll_free_user_pages(pages, max_pages, rw==READ);
- } else if (page_count == 0) {
- GOTO(out, result = -EFAULT);
- } else {
- result = page_count;
- }
- if (unlikely(result <= 0)) {
- /* If we can't allocate a large enough buffer
- * for the request, shrink it to a smaller
- * PAGE_SIZE multiple and try again.
- * We should always be able to kmalloc for a
- * page worth of page pointers = 4MB on i386. */
- if (result == -ENOMEM &&
- size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
- PAGE_CACHE_SIZE) {
- size = ((((size / 2) - 1) |
- ~CFS_PAGE_MASK) + 1) &
- CFS_PAGE_MASK;
- CDEBUG(D_VFSTRACE,"DIO size now %lu\n",
- size);
- continue;
- }
-
- GOTO(out, result);
+ result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
+ if (likely(result > 0)) {
+ int n = (result + offs + PAGE_SIZE - 1) / PAGE_SIZE;
+ result = ll_direct_IO_26_seg(env, io, rw, inode,
+ file->f_mapping,
+ result, file_offset,
+ pages, n);
+ ll_free_user_pages(pages, n, rw==READ);
+ }
+ if (unlikely(result <= 0)) {
+ /* If we can't allocate a large enough buffer
+ * for the request, shrink it to a smaller
+ * PAGE_SIZE multiple and try again.
+ * We should always be able to kmalloc for a
+ * page worth of page pointers = 4MB on i386. */
+ if (result == -ENOMEM &&
+ size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
+ PAGE_CACHE_SIZE) {
+ size = ((((size / 2) - 1) |
+ ~CFS_PAGE_MASK) + 1) &
+ CFS_PAGE_MASK;
+ CDEBUG(D_VFSTRACE,"DIO size now %lu\n",
+ size);
+ continue;
}
- tot_bytes += result;
- file_offset += result;
- iov_left -= result;
- user_addr += result;
+ GOTO(out, result);
}
+ iov_iter_advance(iter, result);
+ tot_bytes += result;
+ file_offset += result;
}
out:
LASSERT(obj->cob_transient_pages == 0);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 7dd2b4723c5f..0e0b404cb5e6 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -211,27 +211,26 @@ static int vvp_mmap_locks(const struct lu_env *env,
struct cl_lock_descr *descr = &cti->cti_descr;
ldlm_policy_data_t policy;
unsigned long addr;
- unsigned long seg;
ssize_t count;
int result;
+ struct iov_iter i;
+ struct iovec iov;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
if (!cl_is_normalio(env, io))
return 0;
- if (vio->cui_iov == NULL) /* nfs or loop back device write */
+ if (vio->cui_iter == NULL) /* nfs or loop back device write */
return 0;
/* No MM (e.g. NFS)? No vmas too. */
if (mm == NULL)
return 0;
- for (seg = 0; seg < vio->cui_nrsegs; seg++) {
- const struct iovec *iv = &vio->cui_iov[seg];
-
- addr = (unsigned long)iv->iov_base;
- count = iv->iov_len;
+ iov_for_each(iov, i, *(vio->cui_iter)) {
+ addr = (unsigned long)iov.iov_base;
+ count = iov.iov_len;
if (count == 0)
continue;
@@ -527,9 +526,7 @@ static int vvp_io_read_start(const struct lu_env *env,
switch (vio->cui_io_subtype) {
case IO_NORMAL:
LASSERT(cio->cui_iocb->ki_pos == pos);
- result = generic_file_aio_read(cio->cui_iocb,
- cio->cui_iov, cio->cui_nrsegs,
- cio->cui_iocb->ki_pos);
+ result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
break;
case IO_SPLICE:
result = generic_file_splice_read(file, &pos,
@@ -595,12 +592,11 @@ static int vvp_io_write_start(const struct lu_env *env,
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
+ if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */
result = 0;
else
- result = generic_file_aio_write(cio->cui_iocb,
- cio->cui_iov, cio->cui_nrsegs,
- cio->cui_iocb->ki_pos);
+ result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
+
if (result > 0) {
if (result < cnt)
io->ci_continue = 0;
@@ -1162,10 +1158,9 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
* results." -- Single Unix Spec */
if (count == 0)
result = 1;
- else {
+ else
cio->cui_tot_count = count;
- cio->cui_tot_nrsegs = 0;
- }
+
/* for read/write, we store the jobid in the inode, and
* it'll be fetched by osc when building RPC.
*
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index ded31ea6bd39..cbf455d66f73 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -396,7 +396,7 @@ static void iss_video_buf_queue(struct vb2_buffer *vb)
}
}
-static struct vb2_ops iss_video_vb2ops = {
+static const struct vb2_ops iss_video_vb2ops = {
.queue_setup = iss_video_queue_setup,
.buf_prepare = iss_video_buf_prepare,
.buf_queue = iss_video_buf_queue,
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 75d7c63cb413..e320d6bae913 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -1067,7 +1067,7 @@ static int xlr_net_probe(struct platform_device *pdev)
xlr_set_rx_mode(ndev);
priv->num_rx_desc += MAX_NUM_DESC_SPILL;
- SET_ETHTOOL_OPS(ndev, &xlr_ethtool_ops);
+ ndev->ethtool_ops = &xlr_ethtool_ops;
SET_NETDEV_DEV(ndev, &pdev->dev);
/* Common registers, do one time initialization */
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ff7214aac9dd..da9dd6bc5660 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -469,7 +469,7 @@ int cvm_oct_common_init(struct net_device *dev)
/* We do our own locking, Linux doesn't need to */
dev->features |= NETIF_F_LLTX;
- SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
+ dev->ethtool_ops = &cvm_oct_ethtool_ops;
cvm_oct_phy_setup_device(dev);
cvm_oct_set_mac_filter(dev);
diff --git a/drivers/staging/rtl8192ee/core.c b/drivers/staging/rtl8192ee/core.c
index 76ea356163b6..7f6accd59986 100644
--- a/drivers/staging/rtl8192ee/core.c
+++ b/drivers/staging/rtl8192ee/core.c
@@ -322,7 +322,7 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
struct rtl_mac *mac = &(rtlpriv->mac80211);
struct cfg80211_pkt_pattern *patterns = wow->patterns;
struct rtl_wow_pattern rtl_pattern;
- u8 *pattern_os, *mask_os;
+ const u8 *pattern_os, *mask_os;
u8 mask[MAX_WOL_BIT_MASK_SIZE] = {0};
u8 content[MAX_WOL_PATTERN_SIZE] = {0};
u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -1561,7 +1561,7 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
* before switch channle or power save, or tx buffer packet
* maybe send after offchannel or rf sleep, this may cause
* dis-association by AP */
-static void rtl_op_flush(struct ieee80211_hw *hw,
+static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index 0acacab95a48..46f5abcbaeeb 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -298,7 +298,7 @@ int rtl8723a_FirmwareDownload(struct rtw_adapter *padapter)
RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
if (IS_8723A_A_CUT(pHalData->VersionID)) {
- fw_name = "rtlwifi/rtl8723aufw.bin";
+ fw_name = "rtlwifi/rtl8723aufw_A.bin";
RT_TRACE(_module_hal_init_c_, _drv_info_,
("rtl8723a_FirmwareDownload: R8723FwImageArray_UMC "
"for RTL8723A A CUT\n"));
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 0c9f5cebfb42..f0839f6a9345 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -1227,7 +1227,7 @@ static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
static int cfg80211_rtw_get_station(struct wiphy *wiphy,
struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
int ret = 0;
struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
@@ -2903,7 +2903,7 @@ static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
}
static int cfg80211_rtw_add_station(struct wiphy *wiphy,
- struct net_device *ndev, u8 *mac,
+ struct net_device *ndev, const u8 *mac,
struct station_parameters *params)
{
DBG_8723A("%s(%s)\n", __func__, ndev->name);
@@ -2912,7 +2912,7 @@ static int cfg80211_rtw_add_station(struct wiphy *wiphy,
}
static int cfg80211_rtw_del_station(struct wiphy *wiphy,
- struct net_device *ndev, u8 *mac)
+ struct net_device *ndev, const u8 *mac)
{
int ret = 0;
struct list_head *phead, *plist, *ptmp;
@@ -2988,7 +2988,7 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy,
}
static int cfg80211_rtw_change_station(struct wiphy *wiphy,
- struct net_device *ndev, u8 *mac,
+ struct net_device *ndev, const u8 *mac,
struct station_parameters *params)
{
DBG_8723A("%s(%s)\n", __func__, ndev->name);
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
index 4e32003a4437..1fb34386a4e5 100644
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -29,7 +29,9 @@ MODULE_AUTHOR("Realtek Semiconductor Corp.");
MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
MODULE_VERSION(DRIVERVERSION);
-MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
/* module param defaults */
static int rtw_chip_version = 0x00;
diff --git a/drivers/staging/rtl8821ae/core.c b/drivers/staging/rtl8821ae/core.c
index 9a37408708f4..046be2ce9c1a 100644
--- a/drivers/staging/rtl8821ae/core.c
+++ b/drivers/staging/rtl8821ae/core.c
@@ -1278,7 +1278,9 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
* before switch channel or power save, or tx buffer packet
* maybe send after offchannel or rf sleep, this may cause
* dis-association by AP */
-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void rtl_op_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 8945b4e3a2a6..cb50120ed7b5 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -280,8 +280,10 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
/* Wait until the state has moved to ON */
- while (*pdata->dsp_prm_read(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST)&
- OMAP_INTRANSITION_MASK);
+ while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
+ OMAP2_PM_PWSTST) &
+ OMAP_INTRANSITION_MASK)
+ ;
/* Disable Automatic transition */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index f76f95c29617..723319ee08f3 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -84,7 +84,7 @@ static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data)
}
static int prism2_domibset_pstr32(wlandevice_t *wlandev,
- u32 did, u8 len, u8 *data)
+ u32 did, u8 len, const u8 *data)
{
struct p80211msg_dot11req_mibset msg;
p80211item_pstr32_t *mibitem =
@@ -298,7 +298,7 @@ static int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
wlandevice_t *wlandev = dev->ml_priv;
struct p80211msg_lnxreq_commsquality quality;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9189bc0a87ae..1f4c794f5fcc 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -300,7 +300,7 @@ bool iscsit_check_np_match(
port = ntohs(sock_in->sin_port);
}
- if ((ip_match == true) && (np->np_port == port) &&
+ if (ip_match && (np->np_port == port) &&
(np->np_network_transport == network_transport))
return true;
@@ -325,7 +325,7 @@ static struct iscsi_np *iscsit_get_np(
}
match = iscsit_check_np_match(sockaddr, np, network_transport);
- if (match == true) {
+ if (match) {
/*
* Increment the np_exports reference count now to
* prevent iscsit_del_np() below from being called
@@ -1121,7 +1121,7 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
/*
* Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
*/
- if (dump_payload == true)
+ if (dump_payload)
goto after_immediate_data;
immed_ret = iscsit_handle_immediate_data(cmd, hdr,
@@ -1309,7 +1309,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
if (cmd->data_direction != DMA_TO_DEVICE) {
pr_err("Command ITT: 0x%08x received DataOUT for a"
" NON-WRITE command.\n", cmd->init_task_tag);
- return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
+ return iscsit_dump_data_payload(conn, payload_length, 1);
}
se_cmd = &cmd->se_cmd;
iscsit_mod_dataout_timer(cmd);
@@ -3390,7 +3390,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
#define SENDTARGETS_BUF_LIMIT 32768U
-static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+static int
+iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
+ enum iscsit_transport_type network_transport)
{
char *payload = NULL;
struct iscsi_conn *conn = cmd->conn;
@@ -3467,6 +3469,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
struct iscsi_np *np = tpg_np->tpg_np;
bool inaddr_any = iscsit_check_inaddr_any(np);
+ if (np->np_network_transport != network_transport)
+ continue;
+
if (!target_name_printed) {
len = sprintf(buf, "TargetName=%s",
tiqn->tiqn);
@@ -3485,10 +3490,8 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
len = sprintf(buf, "TargetAddress="
"%s:%hu,%hu",
- (inaddr_any == false) ?
- np->np_ip : conn->local_ip,
- (inaddr_any == false) ?
- np->np_port : conn->local_port,
+ inaddr_any ? conn->local_ip : np->np_ip,
+ inaddr_any ? conn->local_port : np->np_port,
tpg->tpgt);
len += 1;
@@ -3520,11 +3523,12 @@ eob:
int
iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
- struct iscsi_text_rsp *hdr)
+ struct iscsi_text_rsp *hdr,
+ enum iscsit_transport_type network_transport)
{
int text_length, padding;
- text_length = iscsit_build_sendtargets_response(cmd);
+ text_length = iscsit_build_sendtargets_response(cmd, network_transport);
if (text_length < 0)
return text_length;
@@ -3562,7 +3566,7 @@ static int iscsit_send_text_rsp(
u32 tx_size = 0;
int text_length, iov_count = 0, rc;
- rc = iscsit_build_text_rsp(cmd, conn, hdr);
+ rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
if (rc < 0)
return rc;
@@ -4234,8 +4238,6 @@ int iscsit_close_connection(
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
- iscsit_free_queue_reqs_for_conn(conn);
-
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
@@ -4252,6 +4254,7 @@ int iscsit_close_connection(
iscsit_clear_ooo_cmdsns_for_conn(conn);
iscsit_release_commands_from_conn(conn);
}
+ iscsit_free_queue_reqs_for_conn(conn);
/*
* Handle decrementing session or connection usage count if
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index de77d9aa22c6..ab4915c0d933 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -71,6 +71,40 @@ static void chap_gen_challenge(
challenge_asciihex);
}
+static int chap_check_algorithm(const char *a_str)
+{
+ char *tmp, *orig, *token;
+
+ tmp = kstrdup(a_str, GFP_KERNEL);
+ if (!tmp) {
+ pr_err("Memory allocation failed for CHAP_A temporary buffer\n");
+ return CHAP_DIGEST_UNKNOWN;
+ }
+ orig = tmp;
+
+ token = strsep(&tmp, "=");
+ if (!token)
+ goto out;
+
+ if (strcmp(token, "CHAP_A")) {
+ pr_err("Unable to locate CHAP_A key\n");
+ goto out;
+ }
+ while (token) {
+ token = strsep(&tmp, ",");
+ if (!token)
+ goto out;
+
+ if (!strncmp(token, "5", 1)) {
+ pr_debug("Selected MD5 Algorithm\n");
+ kfree(orig);
+ return CHAP_DIGEST_MD5;
+ }
+ }
+out:
+ kfree(orig);
+ return CHAP_DIGEST_UNKNOWN;
+}
static struct iscsi_chap *chap_server_open(
struct iscsi_conn *conn,
@@ -79,6 +113,7 @@ static struct iscsi_chap *chap_server_open(
char *aic_str,
unsigned int *aic_len)
{
+ int ret;
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
@@ -93,21 +128,24 @@ static struct iscsi_chap *chap_server_open(
return NULL;
chap = conn->auth_protocol;
- /*
- * We only support MD5 MDA presently.
- */
- if (strncmp(a_str, "CHAP_A=5", 8)) {
- pr_err("CHAP_A is not MD5.\n");
+ ret = chap_check_algorithm(a_str);
+ switch (ret) {
+ case CHAP_DIGEST_MD5:
+ pr_debug("[server] Got CHAP_A=5\n");
+ /*
+ * Send back CHAP_A set to MD5.
+ */
+ *aic_len = sprintf(aic_str, "CHAP_A=5");
+ *aic_len += 1;
+ chap->digest_type = CHAP_DIGEST_MD5;
+ pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ break;
+ case CHAP_DIGEST_UNKNOWN:
+ default:
+ pr_err("Unsupported CHAP_A value\n");
return NULL;
}
- pr_debug("[server] Got CHAP_A=5\n");
- /*
- * Send back CHAP_A set to MD5.
- */
- *aic_len = sprintf(aic_str, "CHAP_A=5");
- *aic_len += 1;
- chap->digest_type = CHAP_DIGEST_MD5;
- pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+
/*
* Set Identifier.
*/
@@ -136,7 +174,6 @@ static int chap_server_compute_md5(
char *nr_out_ptr,
unsigned int *nr_out_len)
{
- char *endptr;
unsigned long id;
unsigned char id_as_uchar;
unsigned char digest[MD5_SIGNATURE_SIZE];
@@ -282,9 +319,14 @@ static int chap_server_compute_md5(
}
if (type == HEX)
- id = simple_strtoul(&identifier[2], &endptr, 0);
+ ret = kstrtoul(&identifier[2], 0, &id);
else
- id = simple_strtoul(identifier, &endptr, 0);
+ ret = kstrtoul(identifier, 0, &id);
+
+ if (ret < 0) {
+ pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
+ goto out;
+ }
if (id > 255) {
pr_err("chap identifier: %lu greater than 255\n", id);
goto out;
@@ -313,6 +355,20 @@ static int chap_server_compute_md5(
pr_err("Unable to convert incoming challenge\n");
goto out;
}
+ if (challenge_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+ /*
+ * During mutual authentication, the CHAP_C generated by the
+ * initiator must not match the original CHAP_C generated by
+ * the target.
+ */
+ if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+ pr_err("initiator CHAP_C matches target CHAP_C, failing"
+ " login attempt\n");
+ goto out;
+ }
/*
* Generate CHAP_N and CHAP_R for mutual authentication.
*/
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index 2f463c09626d..d22f7b96a06c 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -1,6 +1,7 @@
#ifndef _ISCSI_CHAP_H_
#define _ISCSI_CHAP_H_
+#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
#define CHAP_DIGEST_SHA 6
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index d9b1d88e1ad3..5e71ac609418 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1145,7 +1145,7 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
void iscsi_target_login_sess_out(struct iscsi_conn *conn,
struct iscsi_np *np, bool zero_tsih, bool new_sess)
{
- if (new_sess == false)
+ if (!new_sess)
goto old_sess_out;
pr_err("iSCSI Login negotiation failed.\n");
@@ -1216,7 +1216,7 @@ old_sess_out:
static int __iscsi_target_login_thread(struct iscsi_np *np)
{
u8 *buffer, zero_tsih = 0;
- int ret = 0, rc, stop;
+ int ret = 0, rc;
struct iscsi_conn *conn = NULL;
struct iscsi_login *login;
struct iscsi_portal_group *tpg = NULL;
@@ -1230,6 +1230,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
complete(&np->np_restart_comp);
+ } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
+ spin_unlock_bh(&np->np_thread_lock);
+ goto exit;
} else {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
}
@@ -1422,10 +1425,8 @@ old_sess_out:
}
out:
- stop = kthread_should_stop();
- /* Wait for another socket.. */
- if (!stop)
- return 1;
+ return 1;
+
exit:
iscsi_stop_login_thread_timer(np);
spin_lock_bh(&np->np_thread_lock);
@@ -1442,7 +1443,7 @@ int iscsi_target_login_thread(void *arg)
allow_signal(SIGINT);
- while (!kthread_should_stop()) {
+ while (1) {
ret = __iscsi_target_login_thread(np);
/*
* We break and exit here unless another sock_accept() call
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 75b685960e80..62a095f36bf2 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -404,7 +404,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk)
}
rc = schedule_delayed_work(&conn->login_work, 0);
- if (rc == false) {
+ if (!rc) {
pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"
" got false\n");
}
@@ -513,7 +513,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
state = (tpg->tpg_state == TPG_STATE_ACTIVE);
spin_unlock(&tpg->tpg_state_lock);
- if (state == false) {
+ if (!state) {
pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
iscsi_target_restore_sock_callbacks(conn);
iscsi_target_login_drop(conn, login);
@@ -528,7 +528,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
state = iscsi_target_sk_state_check(sk);
read_unlock_bh(&sk->sk_callback_lock);
- if (state == false) {
+ if (!state) {
pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
iscsi_target_restore_sock_callbacks(conn);
iscsi_target_login_drop(conn, login);
@@ -773,6 +773,12 @@ static int iscsi_target_handle_csg_zero(
}
goto do_auth;
+ } else if (!payload_length) {
+ pr_err("Initiator sent zero length security payload,"
+ " login failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
}
if (login->first_request)
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 4d2e23fc76fd..02f9de26f38a 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -474,10 +474,10 @@ int iscsi_set_keys_to_negotiate(
if (!strcmp(param->name, AUTHMETHOD)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, HEADERDIGEST)) {
- if (iser == false)
+ if (!iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, DATADIGEST)) {
- if (iser == false)
+ if (!iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXCONNECTIONS)) {
SET_PSTATE_NEGOTIATE(param);
@@ -497,7 +497,7 @@ int iscsi_set_keys_to_negotiate(
} else if (!strcmp(param->name, IMMEDIATEDATA)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
- if (iser == false)
+ if (!iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
continue;
@@ -528,13 +528,13 @@ int iscsi_set_keys_to_negotiate(
} else if (!strcmp(param->name, OFMARKINT)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, RDMAEXTENSIONS)) {
- if (iser == true)
+ if (iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
- if (iser == true)
+ if (iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
- if (iser == true)
+ if (iser)
SET_PSTATE_NEGOTIATE(param);
}
}
@@ -1605,7 +1605,7 @@ int iscsi_decode_text_input(
tmpbuf = kzalloc(length + 1, GFP_KERNEL);
if (!tmpbuf) {
- pr_err("Unable to allocate memory for tmpbuf.\n");
+ pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
return -1;
}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 1431e8400d28..c3cb5c15efda 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -189,7 +189,7 @@ static void iscsit_clear_tpg_np_login_thread(
iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
}
-void iscsit_clear_tpg_np_login_threads(
+static void iscsit_clear_tpg_np_login_threads(
struct iscsi_portal_group *tpg,
bool shutdown)
{
@@ -276,8 +276,6 @@ int iscsit_tpg_del_portal_group(
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
- iscsit_clear_tpg_np_login_threads(tpg, true);
-
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
pr_err("Unable to delete iSCSI Target Portal Group:"
" %hu while active sessions exist, and force=0\n",
@@ -453,7 +451,7 @@ static bool iscsit_tpg_check_network_portal(
match = iscsit_check_np_match(sockaddr, np,
network_transport);
- if (match == true)
+ if (match)
break;
}
spin_unlock(&tpg->tpg_np_lock);
@@ -475,7 +473,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
if (!tpg_np_parent) {
if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
- network_transport) == true) {
+ network_transport)) {
pr_err("Network Portal: %s already exists on a"
" different TPG on %s\n", ip_str,
tpg->tpg_tiqn->tiqn);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 0a182f2aa8a2..e7265337bc43 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -8,7 +8,6 @@ extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
struct iscsi_np *, struct iscsi_tpg_np **);
extern int iscsit_get_tpg(struct iscsi_portal_group *);
extern void iscsit_put_tpg(struct iscsi_portal_group *);
-extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *, bool);
extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 53e157cb8c54..fd90b28f1d94 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1295,6 +1295,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
login->login_failed = 1;
iscsit_collect_login_stats(conn, status_class, status_detail);
+ memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
+
hdr = (struct iscsi_login_rsp *)&login->rsp[0];
hdr->opcode = ISCSI_OP_LOGIN_RSP;
hdr->status_class = status_class;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 73ab75ddaf42..8c64b8776a96 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
struct scatterlist *sgl_bidi = NULL;
- u32 sgl_bidi_count = 0;
+ u32 sgl_bidi_count = 0, transfer_length;
int rc;
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
@@ -213,12 +213,21 @@ static void tcm_loop_submission_work(struct work_struct *work)
}
- if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
+ transfer_length = scsi_transfer_length(sc);
+ if (!scsi_prot_sg_count(sc) &&
+ scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
se_cmd->prot_pto = true;
+ /*
+ * loopback transport doesn't support
+ * WRITE_GENERATE, READ_STRIP protection
+ * information operations, go ahead unprotected.
+ */
+ transfer_length = scsi_bufflen(sc);
+ }
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
- scsi_bufflen(sc), tcm_loop_sam_attr(sc),
+ transfer_length, tcm_loop_sam_attr(sc),
sc->sc_data_direction, 0,
scsi_sglist(sc), scsi_sg_count(sc),
sgl_bidi, sgl_bidi_count,
@@ -230,6 +239,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
return;
out_done:
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
sc->scsi_done(sc);
return;
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 11d26fe65bfb..98da90167159 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -616,6 +616,7 @@ void core_dev_unexport(
dev->export_count--;
spin_unlock(&hba->device_lock);
+ lun->lun_sep = NULL;
lun->lun_se_dev = NULL;
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 0f199f6a0738..94d00df28f39 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1055,6 +1055,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail;
}
+
+ blk_rq_set_block_pc(req);
} else {
BUG_ON(!cmd->data_length);
@@ -1071,7 +1073,6 @@ pscsi_execute_cmd(struct se_cmd *cmd)
}
}
- req->cmd_type = REQ_TYPE_BLOCK_PC;
req->end_io = pscsi_req_done;
req->end_io_data = cmd;
req->cmd_len = scsi_command_size(pt->pscsi_cdb);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index e0229592ec55..bd78d9235ac6 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -81,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, GOOD, 8);
return 0;
}
@@ -137,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, GOOD, 32);
return 0;
}
@@ -176,24 +176,6 @@ static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
return cmd->se_dev->dev_attrib.block_size * sectors;
}
-static int sbc_check_valid_sectors(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- unsigned long long end_lba;
- u32 sectors;
-
- sectors = cmd->data_length / dev->dev_attrib.block_size;
- end_lba = dev->transport->get_blocks(dev) + 1;
-
- if (cmd->t_task_lba + sectors > end_lba) {
- pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
- cmd->t_task_lba, sectors, end_lba);
- return -EINVAL;
- }
-
- return 0;
-}
-
static inline u32 transport_get_sectors_6(unsigned char *cdb)
{
/*
@@ -665,8 +647,19 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
cmd->prot_type = dev->dev_attrib.pi_prot_type;
cmd->prot_length = dev->prot_length * sectors;
- pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
- __func__, cmd->prot_type, cmd->prot_length,
+
+ /**
+ * In case protection information exists over the wire
+ * we modify command data length to describe pure data.
+ * The actual transfer length is data length + protection
+ * length
+ **/
+ if (protect)
+ cmd->data_length = sectors * dev->dev_attrib.block_size;
+
+ pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
+ "prot_op=%d prot_checks=%d\n",
+ __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
cmd->prot_op, cmd->prot_checks);
return true;
@@ -877,15 +870,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
- if (!ops->execute_sync_cache) {
- size = 0;
- cmd->execute_cmd = sbc_emulate_noop;
- break;
- }
-
- /*
- * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
- */
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
@@ -893,18 +877,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
}
-
- size = sbc_get_size(cmd, sectors);
-
- /*
- * Check to ensure that LBA + Range does not exceed past end of
- * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
- */
- if (cmd->t_task_lba || sectors) {
- if (sbc_check_valid_sectors(cmd) < 0)
- return TCM_ADDRESS_OUT_OF_RANGE;
+ if (ops->execute_sync_cache) {
+ cmd->execute_cmd = ops->execute_sync_cache;
+ goto check_lba;
}
- cmd->execute_cmd = ops->execute_sync_cache;
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_noop;
break;
case UNMAP:
if (!ops->execute_unmap)
@@ -947,8 +925,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
break;
case VERIFY:
size = 0;
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->execute_cmd = sbc_emulate_noop;
- break;
+ goto check_lba;
case REZERO_UNIT:
case SEEK_6:
case SEEK_10:
@@ -988,7 +968,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
dev->dev_attrib.hw_max_sectors);
return TCM_INVALID_CDB_FIELD;
}
-
+check_lba:
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
pr_err("cmd exceeds last lba %llu "
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 8653666612a8..6cd7222738fc 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -129,15 +129,10 @@ static sense_reason_t
spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
- u16 len = 0;
+ u16 len;
if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
- u32 unit_serial_len;
-
- unit_serial_len = strlen(dev->t10_wwn.unit_serial);
- unit_serial_len++; /* For NULL Terminator */
-
- len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
+ len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
@@ -721,6 +716,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
unsigned char *buf;
sense_reason_t ret;
int p;
+ int len = 0;
buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
if (!buf) {
@@ -742,6 +738,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
}
ret = spc_emulate_inquiry_std(cmd, buf);
+ len = buf[4] + 5;
goto out;
}
@@ -749,6 +746,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
if (cdb[2] == evpd_handlers[p].page) {
buf[1] = cdb[2];
ret = evpd_handlers[p].emulate(cmd, buf);
+ len = get_unaligned_be16(&buf[2]) + 4;
goto out;
}
}
@@ -765,7 +763,7 @@ out:
kfree(buf);
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, GOOD, len);
return ret;
}
@@ -1103,7 +1101,7 @@ set_length:
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, GOOD, length);
return 0;
}
@@ -1279,7 +1277,7 @@ done:
buf[3] = (lun_count & 0xff);
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
return 0;
}
EXPORT_SYMBOL(spc_emulate_report_luns);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2179feed0d63..7fa62fc93e0b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -504,7 +504,7 @@ void transport_deregister_session(struct se_session *se_sess)
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context.
*/
- if (se_nacl && comp_nacl == true)
+ if (se_nacl && comp_nacl)
target_put_nacl(se_nacl);
transport_free_session(se_sess);
@@ -562,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->t_transport_stop_comp);
+ complete_all(&cmd->t_transport_stop_comp);
return 1;
}
@@ -687,7 +687,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
if (cmd->transport_state & CMD_T_ABORTED &&
cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->t_transport_stop_comp);
+ complete_all(&cmd->t_transport_stop_comp);
return;
} else if (!success) {
INIT_WORK(&cmd->work, target_complete_failure_work);
@@ -703,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
}
EXPORT_SYMBOL(target_complete_cmd);
+void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
+{
+ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ cmd->residual_count += cmd->data_length - length;
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = cmd->data_length - length;
+ }
+
+ cmd->data_length = length;
+ }
+
+ target_complete_cmd(cmd, scsi_status);
+}
+EXPORT_SYMBOL(target_complete_cmd_with_length);
+
static void target_add_to_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1761,7 +1778,7 @@ void target_execute_cmd(struct se_cmd *cmd)
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->t_transport_stop_comp);
+ complete_all(&cmd->t_transport_stop_comp);
return;
}
@@ -2363,7 +2380,7 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref == true) {
+ if (ack_kref) {
kref_get(&se_cmd->cmd_kref);
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
}
@@ -2407,6 +2424,10 @@ static void target_release_cmd_kref(struct kref *kref)
*/
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
+ if (!se_sess) {
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ return 1;
+ }
return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
&se_sess->sess_cmd_lock);
}
@@ -2934,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work)
int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd->transport_state |= CMD_T_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
INIT_WORK(&cmd->work, target_tmr_work);
queue_work(cmd->se_dev->tmr_wq, &cmd->work);
return 0;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 669c536fd959..e9186cdf35e9 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -70,7 +70,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
int rc;
- if (src == true)
+ if (src)
dev_wwn = &xop->dst_tid_wwn[0];
else
dev_wwn = &xop->src_tid_wwn[0];
@@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
if (rc != 0)
continue;
- if (src == true) {
+ if (src) {
xop->dst_dev = se_dev;
pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
" se_dev\n", xop->dst_dev);
@@ -166,7 +166,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
return -EINVAL;
}
- if (src == true) {
+ if (src) {
memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/*
* Determine if the source designator matches the local device
@@ -236,7 +236,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
/*
* Assume target descriptors are in source -> destination order..
*/
- if (src == true)
+ if (src)
src = false;
else
src = true;
@@ -560,7 +560,7 @@ static int target_xcopy_init_pt_lun(
* reservations. The pt_cmd->se_lun pointer will be setup from within
* target_xcopy_setup_pt_port()
*/
- if (remote_port == false) {
+ if (!remote_port) {
pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
return 0;
}
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index f5fd515b2bee..be0c0d08c56a 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -128,6 +128,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
struct fc_lport *lport;
struct fc_exch *ep;
size_t len;
+ int rc;
if (cmd->aborted)
return 0;
@@ -137,9 +138,10 @@ int ft_queue_status(struct se_cmd *se_cmd)
len = sizeof(*fcp) + se_cmd->scsi_sense_length;
fp = fc_frame_alloc(lport, len);
if (!fp) {
- /* XXX shouldn't just drop it - requeue and retry? */
- return 0;
+ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ return -ENOMEM;
}
+
fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len);
fcp->resp.fr_status = se_cmd->scsi_status;
@@ -170,7 +172,18 @@ int ft_queue_status(struct se_cmd *se_cmd)
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
- lport->tt.seq_send(lport, cmd->seq, fp);
+ rc = lport->tt.seq_send(lport, cmd->seq, fp);
+ if (rc) {
+ pr_info_ratelimited("%s: Failed to send response frame %p, "
+ "xid <0x%x>\n", __func__, fp, ep->xid);
+ /*
+ * Generate a TASK_SET_FULL status to notify the initiator
+ * to reduce it's queue_depth after the se_cmd response has
+ * been re-queued by target-core.
+ */
+ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ return -ENOMEM;
+ }
lport->tt.exch_done(cmd->seq);
return 0;
}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index e415af32115a..97b486c3dda1 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -82,6 +82,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
if (cmd->aborted)
return 0;
+
+ if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
+ goto queue_status;
+
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq);
@@ -178,14 +182,23 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
FC_TYPE_FCP, f_ctl, fh_off);
error = lport->tt.seq_send(lport, seq, fp);
if (error) {
- /* XXX For now, initiator will retry */
- pr_err_ratelimited("%s: Failed to send frame %p, "
+ pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
+ /*
+ * Go ahead and set TASK_SET_FULL status ignoring the
+ * rest of the DataIN, and immediately attempt to
+ * send the response via ft_queue_status() in order
+ * to notify the initiator that it should reduce it's
+ * per LUN queue_depth.
+ */
+ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ break;
}
}
+queue_status:
return ft_queue_status(se_cmd);
}
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index a8aaf6ac2ae2..946562389ca8 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -129,7 +129,10 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
tc_device_get_irq(tdev);
- device_register(&tdev->dev);
+ if (device_register(&tdev->dev)) {
+ put_device(&tdev->dev);
+ goto out_err;
+ }
list_add_tail(&tdev->node, &tbus->devices);
out_err:
@@ -148,7 +151,10 @@ static int __init tc_init(void)
INIT_LIST_HEAD(&tc_bus.devices);
dev_set_name(&tc_bus.dev, "tc");
- device_register(&tc_bus.dev);
+ if (device_register(&tc_bus.dev)) {
+ put_device(&tc_bus.dev);
+ return 0;
+ }
if (tc_bus.info.slot_size) {
unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 2d51912a6e40..f9a13867cb70 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -222,12 +222,24 @@ config ACPI_INT3403_THERMAL
the Intel Thermal Daemon can use this information to allow the user
to select his laptop to run without turning on the fans.
+config INTEL_SOC_DTS_THERMAL
+ tristate "Intel SoCs DTS thermal driver"
+ depends on X86 && IOSF_MBI
+ help
+ Enable this to register Intel SoCs (e.g. Bay Trail) platform digital
+ temperature sensor (DTS). These SoCs have two additional DTSs in
+ addition to DTSs on CPU cores. Each DTS will be registered as a
+ thermal zone. There are two trip points. One of the trip point can
+ be set by user mode programs to get notifications via Linux thermal
+ notification methods.The other trip is a critical trip point, which
+ was set by the driver based on the TJ MAX temperature.
+
menu "Texas Instruments thermal drivers"
source "drivers/thermal/ti-soc-thermal/Kconfig"
endmenu
menu "Samsung thermal drivers"
-depends on PLAT_SAMSUNG
+depends on ARCH_EXYNOS
source "drivers/thermal/samsung/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 54e4ec9eb5df..de0636a57a64 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -29,5 +29,6 @@ obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
+obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_ACPI_INT3403_THERMAL) += int3403_thermal.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 5e53212b984f..9d1420acb391 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -24,10 +24,7 @@
#include <linux/of_device.h>
#include <linux/thermal.h>
-#define THERMAL_VALID_OFFSET 9
#define THERMAL_VALID_MASK 0x1
-#define THERMAL_TEMP_OFFSET 10
-#define THERMAL_TEMP_MASK 0x1ff
/* Thermal Manager Control and Status Register */
#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
@@ -38,24 +35,47 @@
#define PMU_TDC0_OTF_CAL_MASK (0x1 << 30)
#define PMU_TDC0_START_CAL_MASK (0x1 << 25)
-struct armada_thermal_ops;
+#define A375_Z1_CAL_RESET_LSB 0x8011e214
+#define A375_Z1_CAL_RESET_MSB 0x30a88019
+#define A375_Z1_WORKAROUND_BIT BIT(9)
+
+#define A375_UNIT_CONTROL_SHIFT 27
+#define A375_UNIT_CONTROL_MASK 0x7
+#define A375_READOUT_INVERT BIT(15)
+#define A375_HW_RESETn BIT(8)
+#define A380_HW_RESET BIT(8)
+
+struct armada_thermal_data;
/* Marvell EBU Thermal Sensor Dev Structure */
struct armada_thermal_priv {
void __iomem *sensor;
void __iomem *control;
- struct armada_thermal_ops *ops;
+ struct armada_thermal_data *data;
};
-struct armada_thermal_ops {
+struct armada_thermal_data {
/* Initialize the sensor */
- void (*init_sensor)(struct armada_thermal_priv *);
+ void (*init_sensor)(struct platform_device *pdev,
+ struct armada_thermal_priv *);
/* Test for a valid sensor value (optional) */
bool (*is_valid)(struct armada_thermal_priv *);
+
+ /* Formula coeficients: temp = (b + m * reg) / div */
+ unsigned long coef_b;
+ unsigned long coef_m;
+ unsigned long coef_div;
+ bool inverted;
+
+ /* Register shift and mask to access the sensor temperature */
+ unsigned int temp_shift;
+ unsigned int temp_mask;
+ unsigned int is_valid_shift;
};
-static void armadaxp_init_sensor(struct armada_thermal_priv *priv)
+static void armadaxp_init_sensor(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
unsigned long reg;
@@ -80,7 +100,8 @@ static void armadaxp_init_sensor(struct armada_thermal_priv *priv)
writel(reg, priv->sensor);
}
-static void armada370_init_sensor(struct armada_thermal_priv *priv)
+static void armada370_init_sensor(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
unsigned long reg;
@@ -99,11 +120,54 @@ static void armada370_init_sensor(struct armada_thermal_priv *priv)
mdelay(10);
}
+static void armada375_init_sensor(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ unsigned long reg;
+ bool quirk_needed =
+ !!of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada375-z1-thermal");
+
+ if (quirk_needed) {
+ /* Ensure these registers have the default (reset) values */
+ writel(A375_Z1_CAL_RESET_LSB, priv->control);
+ writel(A375_Z1_CAL_RESET_MSB, priv->control + 0x4);
+ }
+
+ reg = readl(priv->control + 4);
+ reg &= ~(A375_UNIT_CONTROL_MASK << A375_UNIT_CONTROL_SHIFT);
+ reg &= ~A375_READOUT_INVERT;
+ reg &= ~A375_HW_RESETn;
+
+ if (quirk_needed)
+ reg |= A375_Z1_WORKAROUND_BIT;
+
+ writel(reg, priv->control + 4);
+ mdelay(20);
+
+ reg |= A375_HW_RESETn;
+ writel(reg, priv->control + 4);
+ mdelay(50);
+}
+
+static void armada380_init_sensor(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ unsigned long reg = readl_relaxed(priv->control);
+
+ /* Reset hardware once */
+ if (!(reg & A380_HW_RESET)) {
+ reg |= A380_HW_RESET;
+ writel(reg, priv->control);
+ mdelay(10);
+ }
+}
+
static bool armada_is_valid(struct armada_thermal_priv *priv)
{
unsigned long reg = readl_relaxed(priv->sensor);
- return (reg >> THERMAL_VALID_OFFSET) & THERMAL_VALID_MASK;
+ return (reg >> priv->data->is_valid_shift) & THERMAL_VALID_MASK;
}
static int armada_get_temp(struct thermal_zone_device *thermal,
@@ -111,17 +175,27 @@ static int armada_get_temp(struct thermal_zone_device *thermal,
{
struct armada_thermal_priv *priv = thermal->devdata;
unsigned long reg;
+ unsigned long m, b, div;
/* Valid check */
- if (priv->ops->is_valid && !priv->ops->is_valid(priv)) {
+ if (priv->data->is_valid && !priv->data->is_valid(priv)) {
dev_err(&thermal->device,
"Temperature sensor reading not valid\n");
return -EIO;
}
reg = readl_relaxed(priv->sensor);
- reg = (reg >> THERMAL_TEMP_OFFSET) & THERMAL_TEMP_MASK;
- *temp = (3153000000UL - (10000000UL*reg)) / 13825;
+ reg = (reg >> priv->data->temp_shift) & priv->data->temp_mask;
+
+ /* Get formula coeficients */
+ b = priv->data->coef_b;
+ m = priv->data->coef_m;
+ div = priv->data->coef_div;
+
+ if (priv->data->inverted)
+ *temp = ((m * reg) - b) / div;
+ else
+ *temp = (b - (m * reg)) / div;
return 0;
}
@@ -129,23 +203,69 @@ static struct thermal_zone_device_ops ops = {
.get_temp = armada_get_temp,
};
-static const struct armada_thermal_ops armadaxp_ops = {
+static const struct armada_thermal_data armadaxp_data = {
.init_sensor = armadaxp_init_sensor,
+ .temp_shift = 10,
+ .temp_mask = 0x1ff,
+ .coef_b = 3153000000UL,
+ .coef_m = 10000000UL,
+ .coef_div = 13825,
};
-static const struct armada_thermal_ops armada370_ops = {
+static const struct armada_thermal_data armada370_data = {
.is_valid = armada_is_valid,
.init_sensor = armada370_init_sensor,
+ .is_valid_shift = 9,
+ .temp_shift = 10,
+ .temp_mask = 0x1ff,
+ .coef_b = 3153000000UL,
+ .coef_m = 10000000UL,
+ .coef_div = 13825,
+};
+
+static const struct armada_thermal_data armada375_data = {
+ .is_valid = armada_is_valid,
+ .init_sensor = armada375_init_sensor,
+ .is_valid_shift = 10,
+ .temp_shift = 0,
+ .temp_mask = 0x1ff,
+ .coef_b = 3171900000UL,
+ .coef_m = 10000000UL,
+ .coef_div = 13616,
+};
+
+static const struct armada_thermal_data armada380_data = {
+ .is_valid = armada_is_valid,
+ .init_sensor = armada380_init_sensor,
+ .is_valid_shift = 10,
+ .temp_shift = 0,
+ .temp_mask = 0x3ff,
+ .coef_b = 1169498786UL,
+ .coef_m = 2000000UL,
+ .coef_div = 4289,
+ .inverted = true,
};
static const struct of_device_id armada_thermal_id_table[] = {
{
.compatible = "marvell,armadaxp-thermal",
- .data = &armadaxp_ops,
+ .data = &armadaxp_data,
},
{
.compatible = "marvell,armada370-thermal",
- .data = &armada370_ops,
+ .data = &armada370_data,
+ },
+ {
+ .compatible = "marvell,armada375-thermal",
+ .data = &armada375_data,
+ },
+ {
+ .compatible = "marvell,armada375-z1-thermal",
+ .data = &armada375_data,
+ },
+ {
+ .compatible = "marvell,armada380-thermal",
+ .data = &armada380_data,
},
{
/* sentinel */
@@ -178,8 +298,8 @@ static int armada_thermal_probe(struct platform_device *pdev)
if (IS_ERR(priv->control))
return PTR_ERR(priv->control);
- priv->ops = (struct armada_thermal_ops *)match->data;
- priv->ops->init_sensor(priv);
+ priv->data = (struct armada_thermal_data *)match->data;
+ priv->data->init_sensor(pdev, priv);
thermal = thermal_zone_device_register("armada_thermal", 0, 0,
priv, &ops, NULL, 0, 0);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index a99c63152b8d..2c516f2eebed 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -306,7 +306,7 @@ static int imx_get_sensor_data(struct platform_device *pdev)
{
struct imx_thermal_data *data = platform_get_drvdata(pdev);
struct regmap *map;
- int t1, t2, n1, n2;
+ int t1, n1;
int ret;
u32 val;
u64 temp64;
@@ -333,14 +333,10 @@ static int imx_get_sensor_data(struct platform_device *pdev)
/*
* Sensor data layout:
* [31:20] - sensor value @ 25C
- * [19:8] - sensor value of hot
- * [7:0] - hot temperature value
* Use universal formula now and only need sensor value @ 25C
* slope = 0.4297157 - (0.0015976 * 25C fuse)
*/
n1 = val >> 20;
- n2 = (val & 0xfff00) >> 8;
- t2 = val & 0xff;
t1 = 25; /* t1 always 25C */
/*
@@ -366,16 +362,16 @@ static int imx_get_sensor_data(struct platform_device *pdev)
data->c2 = n1 * data->c1 + 1000 * t1;
/*
- * Set the default passive cooling trip point to 20 °C below the
- * maximum die temperature. Can be changed from userspace.
+ * Set the default passive cooling trip point,
+ * can be changed from userspace.
*/
- data->temp_passive = 1000 * (t2 - 20);
+ data->temp_passive = IMX_TEMP_PASSIVE;
/*
- * The maximum die temperature is t2, let's give 5 °C cushion
- * for noise and possible temperature rise between measurements.
+ * The maximum die temperature set to 20 C higher than
+ * IMX_TEMP_PASSIVE.
*/
- data->temp_critical = 1000 * (t2 - 5);
+ data->temp_critical = 1000 * 20 + data->temp_passive;
return 0;
}
diff --git a/drivers/thermal/int3403_thermal.c b/drivers/thermal/int3403_thermal.c
index 1301681d9a77..e93f0253f6ed 100644
--- a/drivers/thermal/int3403_thermal.c
+++ b/drivers/thermal/int3403_thermal.c
@@ -62,7 +62,13 @@ static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
if (ACPI_FAILURE(status))
return -EIO;
- *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET);
+ /*
+ * Thermal hysteresis represents a temperature difference.
+ * Kelvin and Celsius have same degree size. So the
+ * conversion here between tenths of degree Kelvin unit
+ * and Milli-Celsius unit is just to multiply 100.
+ */
+ *temp = hyst * 100;
return 0;
}
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index a084325f1386..95cb7fc20e17 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -681,8 +681,10 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x2d},
{ X86_VENDOR_INTEL, 6, 0x2e},
{ X86_VENDOR_INTEL, 6, 0x2f},
+ { X86_VENDOR_INTEL, 6, 0x37},
{ X86_VENDOR_INTEL, 6, 0x3a},
{ X86_VENDOR_INTEL, 6, 0x3c},
+ { X86_VENDOR_INTEL, 6, 0x3d},
{ X86_VENDOR_INTEL, 6, 0x3e},
{ X86_VENDOR_INTEL, 6, 0x3f},
{ X86_VENDOR_INTEL, 6, 0x45},
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
new file mode 100644
index 000000000000..a6a0a18ec0aa
--- /dev/null
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -0,0 +1,479 @@
+/*
+ * intel_soc_dts_thermal.c
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/thermal.h>
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+
+#define SOC_DTS_OFFSET_ENABLE 0xB0
+#define SOC_DTS_OFFSET_TEMP 0xB1
+
+#define SOC_DTS_OFFSET_PTPS 0xB2
+#define SOC_DTS_OFFSET_PTTS 0xB3
+#define SOC_DTS_OFFSET_PTTSS 0xB4
+#define SOC_DTS_OFFSET_PTMC 0x80
+#define SOC_DTS_TE_AUX0 0xB5
+#define SOC_DTS_TE_AUX1 0xB6
+
+#define SOC_DTS_AUX0_ENABLE_BIT BIT(0)
+#define SOC_DTS_AUX1_ENABLE_BIT BIT(1)
+#define SOC_DTS_CPU_MODULE0_ENABLE_BIT BIT(16)
+#define SOC_DTS_CPU_MODULE1_ENABLE_BIT BIT(17)
+#define SOC_DTS_TE_SCI_ENABLE BIT(9)
+#define SOC_DTS_TE_SMI_ENABLE BIT(10)
+#define SOC_DTS_TE_MSI_ENABLE BIT(11)
+#define SOC_DTS_TE_APICA_ENABLE BIT(14)
+#define SOC_DTS_PTMC_APIC_DEASSERT_BIT BIT(4)
+
+/* DTS encoding for TJ MAX temperature */
+#define SOC_DTS_TJMAX_ENCODING 0x7F
+
+/* IRQ 86 is a fixed APIC interrupt for BYT DTS Aux threshold notifications */
+#define BYT_SOC_DTS_APIC_IRQ 86
+
+/* Only 2 out of 4 is allowed for OSPM */
+#define SOC_MAX_DTS_TRIPS 2
+
+/* Mask for two trips in status bits */
+#define SOC_DTS_TRIP_MASK 0x03
+
+/* DTS0 and DTS 1 */
+#define SOC_MAX_DTS_SENSORS 2
+
+#define CRITICAL_OFFSET_FROM_TJ_MAX 5000
+
+struct soc_sensor_entry {
+ int id;
+ u32 tj_max;
+ u32 temp_mask;
+ u32 temp_shift;
+ u32 store_status;
+ struct thermal_zone_device *tzone;
+};
+
+static struct soc_sensor_entry *soc_dts[SOC_MAX_DTS_SENSORS];
+
+static int crit_offset = CRITICAL_OFFSET_FROM_TJ_MAX;
+module_param(crit_offset, int, 0644);
+MODULE_PARM_DESC(crit_offset,
+ "Critical Temperature offset from tj max in millidegree Celsius.");
+
+static DEFINE_MUTEX(aux_update_mutex);
+static spinlock_t intr_notify_lock;
+static int soc_dts_thres_irq;
+
+static int get_tj_max(u32 *tj_max)
+{
+ u32 eax, edx;
+ u32 val;
+ int err;
+
+ err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (err)
+ goto err_ret;
+ else {
+ val = (eax >> 16) & 0xff;
+ if (val)
+ *tj_max = val * 1000;
+ else {
+ err = -EINVAL;
+ goto err_ret;
+ }
+ }
+
+ return 0;
+err_ret:
+ *tj_max = 0;
+
+ return err;
+}
+
+static int sys_get_trip_temp(struct thermal_zone_device *tzd,
+ int trip, unsigned long *temp)
+{
+ int status;
+ u32 out;
+ struct soc_sensor_entry *aux_entry;
+
+ aux_entry = tzd->devdata;
+
+ if (!trip) {
+ /* Just return the critical temp */
+ *temp = aux_entry->tj_max - crit_offset;
+ return 0;
+ }
+
+ mutex_lock(&aux_update_mutex);
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTPS, &out);
+ mutex_unlock(&aux_update_mutex);
+ if (status)
+ return status;
+
+ out = (out >> (trip * 8)) & SOC_DTS_TJMAX_ENCODING;
+
+ if (!out)
+ *temp = 0;
+ else
+ *temp = aux_entry->tj_max - out * 1000;
+
+ return 0;
+}
+
+static int update_trip_temp(struct soc_sensor_entry *aux_entry,
+ int thres_index, unsigned long temp)
+{
+ int status;
+ u32 temp_out;
+ u32 out;
+ u32 store_ptps;
+ u32 store_ptmc;
+ u32 store_te_out;
+ u32 te_out;
+
+ u32 int_enable_bit = SOC_DTS_TE_APICA_ENABLE |
+ SOC_DTS_TE_MSI_ENABLE;
+
+ temp_out = (aux_entry->tj_max - temp) / 1000;
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTPS, &store_ptps);
+ if (status)
+ return status;
+
+ out = (store_ptps & ~(0xFF << (thres_index * 8)));
+ out |= (temp_out & 0xFF) << (thres_index * 8);
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTPS, out);
+ if (status)
+ return status;
+ pr_debug("update_trip_temp PTPS = %x\n", out);
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTMC, &out);
+ if (status)
+ goto err_restore_ptps;
+
+ store_ptmc = out;
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_TE_AUX0 + thres_index,
+ &te_out);
+ if (status)
+ goto err_restore_ptmc;
+
+ store_te_out = te_out;
+
+ /* Enable for CPU module 0 and module 1 */
+ out |= (SOC_DTS_CPU_MODULE0_ENABLE_BIT |
+ SOC_DTS_CPU_MODULE1_ENABLE_BIT);
+ if (temp) {
+ if (thres_index)
+ out |= SOC_DTS_AUX1_ENABLE_BIT;
+ else
+ out |= SOC_DTS_AUX0_ENABLE_BIT;
+ te_out |= int_enable_bit;
+ } else {
+ if (thres_index)
+ out &= ~SOC_DTS_AUX1_ENABLE_BIT;
+ else
+ out &= ~SOC_DTS_AUX0_ENABLE_BIT;
+ te_out &= ~int_enable_bit;
+ }
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTMC, out);
+ if (status)
+ goto err_restore_te_out;
+
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_TE_AUX0 + thres_index,
+ te_out);
+ if (status)
+ goto err_restore_te_out;
+
+ return 0;
+
+err_restore_te_out:
+ iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTMC, store_te_out);
+err_restore_ptmc:
+ iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTMC, store_ptmc);
+err_restore_ptps:
+ iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTPS, store_ptps);
+ /* Nothing we can do if restore fails */
+
+ return status;
+}
+
+static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
+ unsigned long temp)
+{
+ struct soc_sensor_entry *aux_entry = tzd->devdata;
+ int status;
+
+ if (temp > (aux_entry->tj_max - crit_offset))
+ return -EINVAL;
+
+ mutex_lock(&aux_update_mutex);
+ status = update_trip_temp(tzd->devdata, trip, temp);
+ mutex_unlock(&aux_update_mutex);
+
+ return status;
+}
+
+static int sys_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trip_type *type)
+{
+ if (trip)
+ *type = THERMAL_TRIP_PASSIVE;
+ else
+ *type = THERMAL_TRIP_CRITICAL;
+
+ return 0;
+}
+
+static int sys_get_curr_temp(struct thermal_zone_device *tzd,
+ unsigned long *temp)
+{
+ int status;
+ u32 out;
+ struct soc_sensor_entry *aux_entry;
+
+ aux_entry = tzd->devdata;
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_TEMP, &out);
+ if (status)
+ return status;
+
+ out = (out & aux_entry->temp_mask) >> aux_entry->temp_shift;
+ out -= SOC_DTS_TJMAX_ENCODING;
+ *temp = aux_entry->tj_max - out * 1000;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = sys_get_curr_temp,
+ .get_trip_temp = sys_get_trip_temp,
+ .get_trip_type = sys_get_trip_type,
+ .set_trip_temp = sys_set_trip_temp,
+};
+
+static void free_soc_dts(struct soc_sensor_entry *aux_entry)
+{
+ if (aux_entry) {
+ iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_ENABLE, aux_entry->store_status);
+ thermal_zone_device_unregister(aux_entry->tzone);
+ kfree(aux_entry);
+ }
+}
+
+static int soc_dts_enable(int id)
+{
+ u32 out;
+ int ret;
+
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_ENABLE, &out);
+ if (ret)
+ return ret;
+
+ if (!(out & BIT(id))) {
+ out |= BIT(id);
+ ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_ENABLE, out);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max)
+{
+ struct soc_sensor_entry *aux_entry;
+ char name[10];
+ int err;
+
+ aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
+ if (!aux_entry) {
+ err = -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Store status to restor on exit */
+ err = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_ENABLE,
+ &aux_entry->store_status);
+ if (err)
+ goto err_ret;
+
+ aux_entry->id = id;
+ aux_entry->tj_max = tj_max;
+ aux_entry->temp_mask = 0x00FF << (id * 8);
+ aux_entry->temp_shift = id * 8;
+ snprintf(name, sizeof(name), "soc_dts%d", id);
+ aux_entry->tzone = thermal_zone_device_register(name,
+ SOC_MAX_DTS_TRIPS,
+ 0x02,
+ aux_entry, &tzone_ops, NULL, 0, 0);
+ if (IS_ERR(aux_entry->tzone)) {
+ err = PTR_ERR(aux_entry->tzone);
+ goto err_ret;
+ }
+
+ err = soc_dts_enable(id);
+ if (err)
+ goto err_aux_status;
+
+ return aux_entry;
+
+err_aux_status:
+ thermal_zone_device_unregister(aux_entry->tzone);
+err_ret:
+ kfree(aux_entry);
+ return ERR_PTR(err);
+}
+
+static void proc_thermal_interrupt(void)
+{
+ u32 sticky_out;
+ int status;
+ u32 ptmc_out;
+
+ /* Clear APIC interrupt */
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTMC, &ptmc_out);
+
+ ptmc_out |= SOC_DTS_PTMC_APIC_DEASSERT_BIT;
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTMC, ptmc_out);
+
+ /* Read status here */
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTTSS, &sticky_out);
+ pr_debug("status %d PTTSS %x\n", status, sticky_out);
+ if (sticky_out & SOC_DTS_TRIP_MASK) {
+ int i;
+ /* reset sticky bit */
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTTSS, sticky_out);
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ pr_debug("TZD update for zone %d\n", i);
+ thermal_zone_device_update(soc_dts[i]->tzone);
+ }
+ }
+
+}
+
+static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&intr_notify_lock, flags);
+ proc_thermal_interrupt();
+ spin_unlock_irqrestore(&intr_notify_lock, flags);
+ pr_debug("proc_thermal_interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static const struct x86_cpu_id soc_thermal_ids[] = {
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
+
+static int __init intel_soc_thermal_init(void)
+{
+ u32 tj_max;
+ int err = 0;
+ int i;
+ const struct x86_cpu_id *match_cpu;
+
+ match_cpu = x86_match_cpu(soc_thermal_ids);
+ if (!match_cpu)
+ return -ENODEV;
+
+ if (get_tj_max(&tj_max))
+ return -EINVAL;
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ soc_dts[i] = alloc_soc_dts(i, tj_max);
+ if (IS_ERR(soc_dts[i])) {
+ err = PTR_ERR(soc_dts[i]);
+ goto err_free;
+ }
+ }
+
+ spin_lock_init(&intr_notify_lock);
+
+ soc_dts_thres_irq = (int)match_cpu->driver_data;
+
+ err = request_threaded_irq(soc_dts_thres_irq, NULL,
+ soc_irq_thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "soc_dts", soc_dts);
+ if (err) {
+ pr_err("request_threaded_irq ret %d\n", err);
+ goto err_free;
+ }
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ err = update_trip_temp(soc_dts[i], 0, tj_max - crit_offset);
+ if (err)
+ goto err_trip_temp;
+ }
+
+ return 0;
+
+err_trip_temp:
+ i = SOC_MAX_DTS_SENSORS;
+ free_irq(soc_dts_thres_irq, soc_dts);
+err_free:
+ while (--i >= 0)
+ free_soc_dts(soc_dts[i]);
+
+ return err;
+}
+
+static void __exit intel_soc_thermal_exit(void)
+{
+ int i;
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
+ update_trip_temp(soc_dts[i], 0, 0);
+
+ free_irq(soc_dts_thres_irq, soc_dts);
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
+ free_soc_dts(soc_dts[i]);
+
+}
+
+module_init(intel_soc_thermal_init)
+module_exit(intel_soc_thermal_exit)
+
+MODULE_DESCRIPTION("Intel SoC DTS Thermal Driver");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 04b1be7fa018..4b2b999b7611 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -156,8 +156,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
ret = thermal_zone_bind_cooling_device(thermal,
tbp->trip_id, cdev,
- tbp->min,
- tbp->max);
+ tbp->max,
+ tbp->min);
if (ret)
return ret;
}
@@ -712,11 +712,12 @@ thermal_of_build_thermal_zone(struct device_node *np)
}
i = 0;
- for_each_child_of_node(child, gchild)
+ for_each_child_of_node(child, gchild) {
ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
tz->trips, tz->ntrips);
if (ret)
goto free_tbps;
+ }
finish:
of_node_put(child);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 5a37940b02c9..8803e693fe68 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -374,10 +374,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
int idle = IDLE_INTERVAL;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
- if (!common) {
- dev_err(dev, "Could not allocate common\n");
+ if (!common)
return -ENOMEM;
- }
INIT_LIST_HEAD(&common->head);
spin_lock_init(&common->lock);
@@ -423,7 +421,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(dev, "Could not allocate priv\n");
ret = -ENOMEM;
goto error_unregister;
}
@@ -470,7 +467,7 @@ error_unregister:
rcar_thermal_irq_disable(priv);
}
- pm_runtime_put_sync(dev);
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
@@ -488,7 +485,7 @@ static int rcar_thermal_remove(struct platform_device *pdev)
rcar_thermal_irq_disable(priv);
}
- pm_runtime_put_sync(dev);
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
return 0;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 0d96a510389f..d7ca9f49c9cb 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -41,12 +41,13 @@
* @id: identifier of the one instance of the TMU controller.
* @pdata: pointer to the tmu platform/configuration data
* @base: base address of the single instance of the TMU controller.
- * @base_common: base address of the common registers of the TMU controller.
+ * @base_second: base address of the common registers of the TMU controller.
* @irq: irq number of the TMU controller.
* @soc: id of the SOC type.
* @irq_work: pointer to the irq work structure.
* @lock: lock to implement synchronization.
* @clk: pointer to the clock structure.
+ * @clk_sec: pointer to the clock structure for accessing the base_second.
* @temp_error1: fused value of the first point trim.
* @temp_error2: fused value of the second point trim.
* @regulator: pointer to the TMU regulator structure.
@@ -56,12 +57,12 @@ struct exynos_tmu_data {
int id;
struct exynos_tmu_platform_data *pdata;
void __iomem *base;
- void __iomem *base_common;
+ void __iomem *base_second;
int irq;
enum soc_type soc;
struct work_struct irq_work;
struct mutex lock;
- struct clk *clk;
+ struct clk *clk, *clk_sec;
u8 temp_error1, temp_error2;
struct regulator *regulator;
struct thermal_sensor_conf *reg_conf;
@@ -152,6 +153,8 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
mutex_lock(&data->lock);
clk_enable(data->clk);
+ if (!IS_ERR(data->clk_sec))
+ clk_enable(data->clk_sec);
if (TMU_SUPPORTS(pdata, READY_STATUS)) {
status = readb(data->base + reg->tmu_status);
@@ -186,7 +189,12 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
}
} else {
- trim_info = readl(data->base + reg->triminfo_data);
+ /* On exynos5420 the triminfo register is in the shared space */
+ if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
+ trim_info = readl(data->base_second +
+ reg->triminfo_data);
+ else
+ trim_info = readl(data->base + reg->triminfo_data);
}
data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
@@ -225,6 +233,8 @@ skip_calib_data:
trigger_levs++;
}
+ rising_threshold = readl(data->base + reg->threshold_th0);
+
if (data->soc == SOC_ARCH_EXYNOS4210) {
/* Write temperature code for threshold */
threshold_code = temp_to_code(data, pdata->threshold);
@@ -238,7 +248,7 @@ skip_calib_data:
writeb(pdata->trigger_levels[i], data->base +
reg->threshold_th0 + i * sizeof(reg->threshold_th0));
- writel(reg->inten_rise_mask, data->base + reg->tmu_intclear);
+ writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear);
} else {
/* Write temperature code for rising and falling threshold */
for (i = 0;
@@ -249,6 +259,7 @@ skip_calib_data:
ret = threshold_code;
goto out;
}
+ rising_threshold &= ~(0xff << 8 * i);
rising_threshold |= threshold_code << 8 * i;
if (pdata->threshold_falling) {
threshold_code = temp_to_code(data,
@@ -265,8 +276,8 @@ skip_calib_data:
writel(falling_threshold,
data->base + reg->threshold_th1);
- writel((reg->inten_rise_mask << reg->inten_rise_shift) |
- (reg->inten_fall_mask << reg->inten_fall_shift),
+ writel((reg->intclr_rise_mask << reg->intclr_rise_shift) |
+ (reg->intclr_fall_mask << reg->intclr_fall_shift),
data->base + reg->tmu_intclear);
/* if last threshold limit is also present */
@@ -281,6 +292,7 @@ skip_calib_data:
}
if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
/* 1-4 level to be assigned in th0 reg */
+ rising_threshold &= ~(0xff << 8 * i);
rising_threshold |= threshold_code << 8 * i;
writel(rising_threshold,
data->base + reg->threshold_th0);
@@ -298,10 +310,12 @@ skip_calib_data:
}
/*Clear the PMIN in the common TMU register*/
if (reg->tmu_pmin && !data->id)
- writel(0, data->base_common + reg->tmu_pmin);
+ writel(0, data->base_second + reg->tmu_pmin);
out:
clk_disable(data->clk);
mutex_unlock(&data->lock);
+ if (!IS_ERR(data->clk_sec))
+ clk_disable(data->clk_sec);
return ret;
}
@@ -453,12 +467,16 @@ static void exynos_tmu_work(struct work_struct *work)
const struct exynos_tmu_registers *reg = pdata->registers;
unsigned int val_irq, val_type;
+ if (!IS_ERR(data->clk_sec))
+ clk_enable(data->clk_sec);
/* Find which sensor generated this interrupt */
if (reg->tmu_irqstatus) {
- val_type = readl(data->base_common + reg->tmu_irqstatus);
+ val_type = readl(data->base_second + reg->tmu_irqstatus);
if (!((val_type >> data->id) & 0x1))
goto out;
}
+ if (!IS_ERR(data->clk_sec))
+ clk_disable(data->clk_sec);
exynos_report_trigger(data->reg_conf);
mutex_lock(&data->lock);
@@ -499,6 +517,18 @@ static const struct of_device_id exynos_tmu_match[] = {
.data = (void *)EXYNOS5250_TMU_DRV_DATA,
},
{
+ .compatible = "samsung,exynos5260-tmu",
+ .data = (void *)EXYNOS5260_TMU_DRV_DATA,
+ },
+ {
+ .compatible = "samsung,exynos5420-tmu",
+ .data = (void *)EXYNOS5420_TMU_DRV_DATA,
+ },
+ {
+ .compatible = "samsung,exynos5420-tmu-ext-triminfo",
+ .data = (void *)EXYNOS5420_TMU_DRV_DATA,
+ },
+ {
.compatible = "samsung,exynos5440-tmu",
.data = (void *)EXYNOS5440_TMU_DRV_DATA,
},
@@ -580,7 +610,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
* Check if the TMU shares some registers and then try to map the
* memory of common registers.
*/
- if (!TMU_SUPPORTS(pdata, SHARED_MEMORY))
+ if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
return 0;
if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
@@ -588,9 +618,9 @@ static int exynos_map_dt_data(struct platform_device *pdev)
return -ENODEV;
}
- data->base_common = devm_ioremap(&pdev->dev, res.start,
+ data->base_second = devm_ioremap(&pdev->dev, res.start,
resource_size(&res));
- if (!data->base_common) {
+ if (!data->base_second) {
dev_err(&pdev->dev, "Failed to ioremap memory\n");
return -ENOMEM;
}
@@ -607,10 +637,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
GFP_KERNEL);
- if (!data) {
- dev_err(&pdev->dev, "Failed to allocate driver structure\n");
+ if (!data)
return -ENOMEM;
- }
platform_set_drvdata(pdev, data);
mutex_init(&data->lock);
@@ -629,13 +657,31 @@ static int exynos_tmu_probe(struct platform_device *pdev)
return PTR_ERR(data->clk);
}
+ data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
+ if (IS_ERR(data->clk_sec)) {
+ if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
+ dev_err(&pdev->dev, "Failed to get triminfo clock\n");
+ return PTR_ERR(data->clk_sec);
+ }
+ } else {
+ ret = clk_prepare(data->clk_sec);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return ret;
+ }
+ }
+
ret = clk_prepare(data->clk);
- if (ret)
- return ret;
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ goto err_clk_sec;
+ }
if (pdata->type == SOC_ARCH_EXYNOS4210 ||
pdata->type == SOC_ARCH_EXYNOS4412 ||
pdata->type == SOC_ARCH_EXYNOS5250 ||
+ pdata->type == SOC_ARCH_EXYNOS5260 ||
+ pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO ||
pdata->type == SOC_ARCH_EXYNOS5440)
data->soc = pdata->type;
else {
@@ -656,7 +702,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
sensor_conf = devm_kzalloc(&pdev->dev,
sizeof(struct thermal_sensor_conf), GFP_KERNEL);
if (!sensor_conf) {
- dev_err(&pdev->dev, "Failed to allocate registration struct\n");
ret = -ENOMEM;
goto err_clk;
}
@@ -704,6 +749,9 @@ static int exynos_tmu_probe(struct platform_device *pdev)
return 0;
err_clk:
clk_unprepare(data->clk);
+err_clk_sec:
+ if (!IS_ERR(data->clk_sec))
+ clk_unprepare(data->clk_sec);
return ret;
}
@@ -716,6 +764,8 @@ static int exynos_tmu_remove(struct platform_device *pdev)
exynos_unregister_thermal(data->reg_conf);
clk_unprepare(data->clk);
+ if (!IS_ERR(data->clk_sec))
+ clk_unprepare(data->clk_sec);
if (!IS_ERR(data->regulator))
regulator_disable(data->regulator);
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index 3fb65547e64c..edd08cf76729 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -43,6 +43,8 @@ enum soc_type {
SOC_ARCH_EXYNOS4210 = 1,
SOC_ARCH_EXYNOS4412,
SOC_ARCH_EXYNOS5250,
+ SOC_ARCH_EXYNOS5260,
+ SOC_ARCH_EXYNOS5420_TRIMINFO,
SOC_ARCH_EXYNOS5440,
};
@@ -60,7 +62,7 @@ enum soc_type {
* state(active/idle) can be checked.
* TMU_SUPPORT_EMUL_TIME - This features allows to set next temp emulation
* sample time.
- * TMU_SUPPORT_SHARED_MEMORY - This feature tells that the different TMU
+ * TMU_SUPPORT_ADDRESS_MULTIPLE - This feature tells that the different TMU
* sensors shares some common registers.
* TMU_SUPPORT - macro to compare the above features with the supplied.
*/
@@ -70,7 +72,7 @@ enum soc_type {
#define TMU_SUPPORT_FALLING_TRIP BIT(3)
#define TMU_SUPPORT_READY_STATUS BIT(4)
#define TMU_SUPPORT_EMUL_TIME BIT(5)
-#define TMU_SUPPORT_SHARED_MEMORY BIT(6)
+#define TMU_SUPPORT_ADDRESS_MULTIPLE BIT(6)
#define TMU_SUPPORTS(a, b) (a->features & TMU_SUPPORT_ ## b)
@@ -122,10 +124,6 @@ enum soc_type {
* @threshold_th3_l0_shift: shift bits of level0 threshold temperature.
* @tmu_inten: register containing the different threshold interrupt
enable bits.
- * @inten_rise_shift: shift bits of all rising interrupt bits.
- * @inten_rise_mask: mask bits of all rising interrupt bits.
- * @inten_fall_shift: shift bits of all rising interrupt bits.
- * @inten_fall_mask: mask bits of all rising interrupt bits.
* @inten_rise0_shift: shift bits of rising 0 interrupt bits.
* @inten_rise1_shift: shift bits of rising 1 interrupt bits.
* @inten_rise2_shift: shift bits of rising 2 interrupt bits.
@@ -136,6 +134,10 @@ enum soc_type {
* @inten_fall3_shift: shift bits of falling 3 interrupt bits.
* @tmu_intstat: Register containing the interrupt status values.
* @tmu_intclear: Register for clearing the raised interrupt status.
+ * @intclr_fall_shift: shift bits for interrupt clear fall 0
+ * @intclr_rise_shift: shift bits of all rising interrupt bits.
+ * @intclr_rise_mask: mask bits of all rising interrupt bits.
+ * @intclr_fall_mask: mask bits of all rising interrupt bits.
* @emul_con: TMU emulation controller register.
* @emul_temp_shift: shift bits of emulation temperature.
* @emul_time_shift: shift bits of emulation time.
@@ -149,6 +151,7 @@ struct exynos_tmu_registers {
u32 triminfo_85_shift;
u32 triminfo_ctrl;
+ u32 triminfo_ctrl1;
u32 triminfo_reload_shift;
u32 tmu_ctrl;
@@ -191,10 +194,6 @@ struct exynos_tmu_registers {
u32 threshold_th3_l0_shift;
u32 tmu_inten;
- u32 inten_rise_shift;
- u32 inten_rise_mask;
- u32 inten_fall_shift;
- u32 inten_fall_mask;
u32 inten_rise0_shift;
u32 inten_rise1_shift;
u32 inten_rise2_shift;
@@ -207,6 +206,10 @@ struct exynos_tmu_registers {
u32 tmu_intstat;
u32 tmu_intclear;
+ u32 intclr_fall_shift;
+ u32 intclr_rise_shift;
+ u32 intclr_fall_mask;
+ u32 intclr_rise_mask;
u32 emul_con;
u32 emul_temp_shift;
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 476b768c633e..c1d81dcd7819 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -40,13 +40,13 @@ static const struct exynos_tmu_registers exynos4210_tmu_registers = {
.threshold_temp = EXYNOS4210_TMU_REG_THRESHOLD_TEMP,
.threshold_th0 = EXYNOS4210_TMU_REG_TRIG_LEVEL0,
.tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise_mask = EXYNOS4210_TMU_TRIG_LEVEL_MASK,
.inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
.inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
.inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
.inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
.tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
.tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
+ .intclr_rise_mask = EXYNOS4210_TMU_TRIG_LEVEL_MASK,
};
struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
@@ -112,10 +112,6 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
.threshold_th0 = EXYNOS_THD_TEMP_RISE,
.threshold_th1 = EXYNOS_THD_TEMP_FALL,
.tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
- .inten_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
- .inten_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
- .inten_fall_shift = EXYNOS_TMU_FALL_INT_SHIFT,
.inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
.inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
.inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
@@ -123,6 +119,10 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
.inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
.tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
.tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
+ .intclr_fall_shift = EXYNOS_TMU_CLEAR_FALL_INT_SHIFT,
+ .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
+ .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
+ .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
.emul_con = EXYNOS_EMUL_CON,
.emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
.emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
@@ -194,6 +194,197 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
};
#endif
+#if defined(CONFIG_SOC_EXYNOS5260)
+static const struct exynos_tmu_registers exynos5260_tmu_registers = {
+ .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
+ .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
+ .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+ .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+ .tmu_ctrl = EXYNOS_TMU_REG_CONTROL1,
+ .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
+ .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
+ .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
+ .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
+ .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
+ .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
+ .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
+ .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
+ .tmu_status = EXYNOS_TMU_REG_STATUS,
+ .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
+ .threshold_th0 = EXYNOS_THD_TEMP_RISE,
+ .threshold_th1 = EXYNOS_THD_TEMP_FALL,
+ .tmu_inten = EXYNOS5260_TMU_REG_INTEN,
+ .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
+ .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
+ .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
+ .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
+ .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
+ .tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT,
+ .tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR,
+ .intclr_fall_shift = EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT,
+ .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
+ .intclr_rise_mask = EXYNOS5260_TMU_RISE_INT_MASK,
+ .intclr_fall_mask = EXYNOS5260_TMU_FALL_INT_MASK,
+ .emul_con = EXYNOS5260_EMUL_CON,
+ .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
+ .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
+ .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
+};
+
+#define __EXYNOS5260_TMU_DATA \
+ .threshold_falling = 10, \
+ .trigger_levels[0] = 85, \
+ .trigger_levels[1] = 103, \
+ .trigger_levels[2] = 110, \
+ .trigger_levels[3] = 120, \
+ .trigger_enable[0] = true, \
+ .trigger_enable[1] = true, \
+ .trigger_enable[2] = true, \
+ .trigger_enable[3] = false, \
+ .trigger_type[0] = THROTTLE_ACTIVE, \
+ .trigger_type[1] = THROTTLE_ACTIVE, \
+ .trigger_type[2] = SW_TRIP, \
+ .trigger_type[3] = HW_TRIP, \
+ .max_trigger_level = 4, \
+ .gain = 8, \
+ .reference_voltage = 16, \
+ .noise_cancel_mode = 4, \
+ .cal_type = TYPE_ONE_POINT_TRIMMING, \
+ .efuse_value = 55, \
+ .min_efuse_value = 40, \
+ .max_efuse_value = 100, \
+ .first_point_trim = 25, \
+ .second_point_trim = 85, \
+ .default_temp_offset = 50, \
+ .freq_tab[0] = { \
+ .freq_clip_max = 800 * 1000, \
+ .temp_level = 85, \
+ }, \
+ .freq_tab[1] = { \
+ .freq_clip_max = 200 * 1000, \
+ .temp_level = 103, \
+ }, \
+ .freq_tab_count = 2, \
+ .registers = &exynos5260_tmu_registers, \
+
+#define EXYNOS5260_TMU_DATA \
+ __EXYNOS5260_TMU_DATA \
+ .type = SOC_ARCH_EXYNOS5260, \
+ .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
+ TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
+ TMU_SUPPORT_EMUL_TIME)
+
+struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
+ .tmu_data = {
+ { EXYNOS5260_TMU_DATA },
+ { EXYNOS5260_TMU_DATA },
+ { EXYNOS5260_TMU_DATA },
+ { EXYNOS5260_TMU_DATA },
+ { EXYNOS5260_TMU_DATA },
+ },
+ .tmu_count = 5,
+};
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5420)
+static const struct exynos_tmu_registers exynos5420_tmu_registers = {
+ .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
+ .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
+ .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+ .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+ .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
+ .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
+ .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
+ .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
+ .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
+ .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
+ .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
+ .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
+ .tmu_status = EXYNOS_TMU_REG_STATUS,
+ .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
+ .threshold_th0 = EXYNOS_THD_TEMP_RISE,
+ .threshold_th1 = EXYNOS_THD_TEMP_FALL,
+ .tmu_inten = EXYNOS_TMU_REG_INTEN,
+ .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
+ .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
+ .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
+ /* INTEN_RISE3 Not availble in exynos5420 */
+ .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
+ .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
+ .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
+ .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
+ .intclr_fall_shift = EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT,
+ .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
+ .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
+ .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
+ .emul_con = EXYNOS_EMUL_CON,
+ .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
+ .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
+ .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
+};
+
+#define __EXYNOS5420_TMU_DATA \
+ .threshold_falling = 10, \
+ .trigger_levels[0] = 85, \
+ .trigger_levels[1] = 103, \
+ .trigger_levels[2] = 110, \
+ .trigger_levels[3] = 120, \
+ .trigger_enable[0] = true, \
+ .trigger_enable[1] = true, \
+ .trigger_enable[2] = true, \
+ .trigger_enable[3] = false, \
+ .trigger_type[0] = THROTTLE_ACTIVE, \
+ .trigger_type[1] = THROTTLE_ACTIVE, \
+ .trigger_type[2] = SW_TRIP, \
+ .trigger_type[3] = HW_TRIP, \
+ .max_trigger_level = 4, \
+ .gain = 8, \
+ .reference_voltage = 16, \
+ .noise_cancel_mode = 4, \
+ .cal_type = TYPE_ONE_POINT_TRIMMING, \
+ .efuse_value = 55, \
+ .min_efuse_value = 40, \
+ .max_efuse_value = 100, \
+ .first_point_trim = 25, \
+ .second_point_trim = 85, \
+ .default_temp_offset = 50, \
+ .freq_tab[0] = { \
+ .freq_clip_max = 800 * 1000, \
+ .temp_level = 85, \
+ }, \
+ .freq_tab[1] = { \
+ .freq_clip_max = 200 * 1000, \
+ .temp_level = 103, \
+ }, \
+ .freq_tab_count = 2, \
+ .registers = &exynos5420_tmu_registers, \
+
+#define EXYNOS5420_TMU_DATA \
+ __EXYNOS5420_TMU_DATA \
+ .type = SOC_ARCH_EXYNOS5250, \
+ .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
+ TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
+ TMU_SUPPORT_EMUL_TIME)
+
+#define EXYNOS5420_TMU_DATA_SHARED \
+ __EXYNOS5420_TMU_DATA \
+ .type = SOC_ARCH_EXYNOS5420_TRIMINFO, \
+ .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
+ TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
+ TMU_SUPPORT_EMUL_TIME | TMU_SUPPORT_ADDRESS_MULTIPLE)
+
+struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
+ .tmu_data = {
+ { EXYNOS5420_TMU_DATA },
+ { EXYNOS5420_TMU_DATA },
+ { EXYNOS5420_TMU_DATA_SHARED },
+ { EXYNOS5420_TMU_DATA_SHARED },
+ { EXYNOS5420_TMU_DATA_SHARED },
+ },
+ .tmu_count = 5,
+};
+#endif
+
#if defined(CONFIG_SOC_EXYNOS5440)
static const struct exynos_tmu_registers exynos5440_tmu_registers = {
.triminfo_data = EXYNOS5440_TMU_S0_7_TRIM,
@@ -217,10 +408,6 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = {
.threshold_th2 = EXYNOS5440_TMU_S0_7_TH2,
.threshold_th3_l0_shift = EXYNOS5440_TMU_TH_RISE4_SHIFT,
.tmu_inten = EXYNOS5440_TMU_S0_7_IRQEN,
- .inten_rise_mask = EXYNOS5440_TMU_RISE_INT_MASK,
- .inten_rise_shift = EXYNOS5440_TMU_RISE_INT_SHIFT,
- .inten_fall_mask = EXYNOS5440_TMU_FALL_INT_MASK,
- .inten_fall_shift = EXYNOS5440_TMU_FALL_INT_SHIFT,
.inten_rise0_shift = EXYNOS5440_TMU_INTEN_RISE0_SHIFT,
.inten_rise1_shift = EXYNOS5440_TMU_INTEN_RISE1_SHIFT,
.inten_rise2_shift = EXYNOS5440_TMU_INTEN_RISE2_SHIFT,
@@ -228,6 +415,10 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = {
.inten_fall0_shift = EXYNOS5440_TMU_INTEN_FALL0_SHIFT,
.tmu_intstat = EXYNOS5440_TMU_S0_7_IRQ,
.tmu_intclear = EXYNOS5440_TMU_S0_7_IRQ,
+ .intclr_fall_shift = EXYNOS5440_TMU_CLEAR_FALL_INT_SHIFT,
+ .intclr_rise_shift = EXYNOS5440_TMU_RISE_INT_SHIFT,
+ .intclr_rise_mask = EXYNOS5440_TMU_RISE_INT_MASK,
+ .intclr_fall_mask = EXYNOS5440_TMU_FALL_INT_MASK,
.tmu_irqstatus = EXYNOS5440_TMU_IRQ_STATUS,
.emul_con = EXYNOS5440_TMU_S0_7_DEBUG,
.emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
@@ -255,7 +446,7 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = {
.type = SOC_ARCH_EXYNOS5440, \
.registers = &exynos5440_tmu_registers, \
.features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
- TMU_SUPPORT_MULTI_INST | TMU_SUPPORT_SHARED_MEMORY),
+ TMU_SUPPORT_MULTI_INST | TMU_SUPPORT_ADDRESS_MULTIPLE),
struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
.tmu_data = {
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
index a1ea19d9e0a6..d268981b65e5 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -69,9 +69,11 @@
#define EXYNOS_TMU_RISE_INT_MASK 0x111
#define EXYNOS_TMU_RISE_INT_SHIFT 0
#define EXYNOS_TMU_FALL_INT_MASK 0x111
-#define EXYNOS_TMU_FALL_INT_SHIFT 12
#define EXYNOS_TMU_CLEAR_RISE_INT 0x111
#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12)
+#define EXYNOS_TMU_CLEAR_FALL_INT_SHIFT 12
+#define EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT 16
+#define EXYNOS5440_TMU_CLEAR_FALL_INT_SHIFT 4
#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
#define EXYNOS_TMU_TRIP_MODE_MASK 0x7
#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
@@ -85,6 +87,7 @@
#define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
#define EXYNOS_TMU_INTEN_FALL1_SHIFT 20
#define EXYNOS_TMU_INTEN_FALL2_SHIFT 24
+#define EXYNOS_TMU_INTEN_FALL3_SHIFT 28
#define EXYNOS_EMUL_TIME 0x57F0
#define EXYNOS_EMUL_TIME_MASK 0xffff
@@ -95,6 +98,17 @@
#define EXYNOS_MAX_TRIGGER_PER_REG 4
+/* Exynos5260 specific */
+#define EXYNOS_TMU_REG_CONTROL1 0x24
+#define EXYNOS5260_TMU_REG_INTEN 0xC0
+#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
+#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
+#define EXYNOS5260_TMU_CLEAR_RISE_INT 0x1111
+#define EXYNOS5260_TMU_CLEAR_FALL_INT (0x1111 << 16)
+#define EXYNOS5260_TMU_RISE_INT_MASK 0x1111
+#define EXYNOS5260_TMU_FALL_INT_MASK 0x1111
+#define EXYNOS5260_EMUL_CON 0x100
+
/* Exynos4412 specific */
#define EXYNOS4412_MUX_ADDR_VALUE 6
#define EXYNOS4412_MUX_ADDR_SHIFT 20
@@ -119,7 +133,6 @@
#define EXYNOS5440_TMU_RISE_INT_MASK 0xf
#define EXYNOS5440_TMU_RISE_INT_SHIFT 0
#define EXYNOS5440_TMU_FALL_INT_MASK 0xf
-#define EXYNOS5440_TMU_FALL_INT_SHIFT 4
#define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
#define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
#define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
@@ -156,6 +169,20 @@ extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
#define EXYNOS5250_TMU_DRV_DATA (NULL)
#endif
+#if defined(CONFIG_SOC_EXYNOS5260)
+extern struct exynos_tmu_init_data const exynos5260_default_tmu_data;
+#define EXYNOS5260_TMU_DRV_DATA (&exynos5260_default_tmu_data)
+#else
+#define EXYNOS5260_TMU_DRV_DATA (NULL)
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5420)
+extern struct exynos_tmu_init_data const exynos5420_default_tmu_data;
+#define EXYNOS5420_TMU_DRV_DATA (&exynos5420_default_tmu_data)
+#else
+#define EXYNOS5420_TMU_DRV_DATA (NULL)
+#endif
+
#if defined(CONFIG_SOC_EXYNOS5440)
extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
#define EXYNOS5440_TMU_DRV_DATA (&exynos5440_default_tmu_data)
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index ab79ea4701d9..1e2193fc3241 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -113,10 +113,8 @@ static int spear_thermal_probe(struct platform_device *pdev)
}
stdev = devm_kzalloc(&pdev->dev, sizeof(*stdev), GFP_KERNEL);
- if (!stdev) {
- dev_err(&pdev->dev, "kzalloc fail\n");
+ if (!stdev)
return -ENOMEM;
- }
/* Enable thermal sensor */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index fdb07199d9c2..1967bee4f076 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
return NULL;
}
+static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
+{
+ unsigned long temp;
+ return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
+}
+
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
{
struct thermal_hwmon_device *hwmon;
@@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
if (result)
goto free_temp_mem;
- if (tz->ops->get_crit_temp) {
- unsigned long temperature;
- if (!tz->ops->get_crit_temp(tz, &temperature)) {
- snprintf(temp->temp_crit.name,
- sizeof(temp->temp_crit.name),
+ if (thermal_zone_crit_temp_valid(tz)) {
+ snprintf(temp->temp_crit.name,
+ sizeof(temp->temp_crit.name),
"temp%d_crit", hwmon->count);
- temp->temp_crit.attr.attr.name = temp->temp_crit.name;
- temp->temp_crit.attr.attr.mode = 0444;
- temp->temp_crit.attr.show = temp_crit_show;
- sysfs_attr_init(&temp->temp_crit.attr.attr);
- result = device_create_file(hwmon->device,
- &temp->temp_crit.attr);
- if (result)
- goto unregister_input;
- }
+ temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+ temp->temp_crit.attr.attr.mode = 0444;
+ temp->temp_crit.attr.show = temp_crit_show;
+ sysfs_attr_init(&temp->temp_crit.attr.attr);
+ result = device_create_file(hwmon->device,
+ &temp->temp_crit.attr);
+ if (result)
+ goto unregister_input;
}
mutex_lock(&thermal_hwmon_list_lock);
@@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
}
device_remove_file(hwmon->device, &temp->temp_input.attr);
- if (tz->ops->get_crit_temp)
+ if (thermal_zone_crit_temp_valid(tz))
device_remove_file(hwmon->device, &temp->temp_crit.attr);
mutex_lock(&thermal_hwmon_list_lock);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 3ab12ee359b7..634b6ce0e63a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1155,7 +1155,7 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
/* register shadow for context save and restore */
bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) *
bgp->conf->sensor_count, GFP_KERNEL);
- if (!bgp) {
+ if (!bgp->regval) {
dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
return ERR_PTR(-ENOMEM);
}
@@ -1248,7 +1248,7 @@ int ti_bandgap_probe(struct platform_device *pdev)
clk_rate = clk_round_rate(bgp->div_clk,
bgp->conf->sensors[0].ts_data->max_freq);
if (clk_rate < bgp->conf->sensors[0].ts_data->min_freq ||
- clk_rate == 0xffffffff) {
+ clk_rate <= 0) {
ret = -ENODEV;
dev_err(&pdev->dev, "wrong clock rate (%d)\n", clk_rate);
goto put_clks;
diff --git a/drivers/tty/hvc/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
index 147d49e95db2..df374860037c 100644
--- a/drivers/tty/hvc/hvc_tile.c
+++ b/drivers/tty/hvc/hvc_tile.c
@@ -196,7 +196,7 @@ static int __init hvc_tile_init(void)
#ifndef __tilegx__
struct hvc_struct *hp;
hp = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
- return IS_ERR(hp) ? PTR_ERR(hp) : 0;
+ return PTR_ERR_OR_ZERO(hp);
#else
platform_device_register(&hvc_tile_pdev);
return platform_driver_register(&hvc_tile_driver);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index f95569dedc88..f44f1ba762c3 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1214,15 +1214,16 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
- if (I_IGNPAR(tty))
- return;
- if (I_PARMRK(tty)) {
- put_tty_queue('\377', ldata);
- put_tty_queue('\0', ldata);
- put_tty_queue(c, ldata);
- } else if (I_INPCK(tty))
- put_tty_queue('\0', ldata);
- else
+ if (I_INPCK(tty)) {
+ if (I_IGNPAR(tty))
+ return;
+ if (I_PARMRK(tty)) {
+ put_tty_queue('\377', ldata);
+ put_tty_queue('\0', ldata);
+ put_tty_queue(c, ldata);
+ } else
+ put_tty_queue('\0', ldata);
+ } else
put_tty_queue(c, ldata);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 27f7ad6b74c1..7a91c6d1eb7d 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2357,7 +2357,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= UART_LSR_BI;
/*
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index cfef801a49d4..4858b8a99d3b 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -144,8 +144,11 @@ static int __init early_serial8250_setup(struct earlycon_device *device,
if (!(device->port.membase || device->port.iobase))
return 0;
- if (!device->baud)
+ if (!device->baud) {
device->baud = probe_baud(&device->port);
+ snprintf(device->options, sizeof(device->options), "%u",
+ device->baud);
+ }
init_port(device);
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 501667e3e3f5..323376668b72 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -185,6 +185,12 @@ static void altera_uart_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
spin_unlock_irqrestore(&port->lock, flags);
+
+ /*
+ * FIXME: port->read_status_mask and port->ignore_status_mask
+ * need to be initialized based on termios settings for
+ * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
+ */
}
static void altera_uart_rx_chars(struct altera_uart *pp)
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 01c9e72433e1..971af1e22d0f 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -420,7 +420,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
uap->port.read_status_mask = UART01x_RSR_OE;
if (termios->c_iflag & INPCK)
uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
uap->port.read_status_mask |= UART01x_RSR_BE;
/*
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 908a6e3142a2..0e26dcbd5ea4 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1744,7 +1744,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask = UART011_DR_OE | 255;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= UART011_DR_BE;
/*
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index c9f5c9dcc15c..008c223eaf26 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -177,7 +177,7 @@ static void arc_serial_tx_chars(struct arc_uart_port *uart)
uart->port.icount.tx++;
uart->port.x_char = 0;
sent = 1;
- } else if (xmit->tail != xmit->head) { /* TODO: uart_circ_empty */
+ } else if (!uart_circ_empty(xmit)) {
ch = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx++;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 3fceae099c44..c4f750314100 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1932,7 +1932,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask = ATMEL_US_OVRE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= ATMEL_US_RXBRK;
if (atmel_use_pdc_rx(port))
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a47421e4627c..231519022b73 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -567,7 +567,7 @@ static void bcm_uart_set_termios(struct uart_port *port,
port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
port->read_status_mask |= UART_FIFO_PARERR_MASK;
}
- if (new->c_iflag & (BRKINT))
+ if (new->c_iflag & (IGNBRK | BRKINT))
port->read_status_mask |= UART_FIFO_BRKDET_MASK;
port->ignore_status_mask = 0;
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 869ceba2ec57..ac86a20992e9 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -833,7 +833,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask = OE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (FE | PE);
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= BI;
/*
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 2f2b2e538a54..cdbbc788230a 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -625,7 +625,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
dport->port.read_status_mask = DZ_OERR;
if (termios->c_iflag & INPCK)
dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
dport->port.read_status_mask |= DZ_BREAK;
/* characters to ignore */
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 5131b5ee6164..a514ee6f5406 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -25,7 +25,7 @@
#include <asm/serial.h>
static struct console early_con = {
- .name = "earlycon",
+ .name = "uart", /* 8250 console switch requires this name */
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index b373f6416e8c..3b0ee9afd76f 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -407,7 +407,7 @@ static void efm32_uart_set_termios(struct uart_port *port,
if (new->c_iflag & INPCK)
port->read_status_mask |=
UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR;
- if (new->c_iflag & (BRKINT | PARMRK))
+ if (new->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= SW_UARTn_RXDATAX_BERR;
port->ignore_status_mask = 0;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index c5eb897de9de..49385c86cfba 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -902,7 +902,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
sport->port.read_status_mask |= (UARTSR1_FE | UARTSR1_PE);
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
sport->port.read_status_mask |= UARTSR1_FE;
/* characters to ignore */
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index e2f93874989b..044e86d528ae 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -567,6 +567,9 @@ static void imx_start_tx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
+ if (uart_circ_empty(&port->state->xmit))
+ return;
+
if (USE_IRDA(sport)) {
/* half duplex in IrDA mode; have to disable receive mode */
temp = readl(sport->port.membase + UCR4);
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 1d9420548e16..99b7b8697861 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -603,6 +603,8 @@ static void ip22zilog_start_tx(struct uart_port *port)
} else {
struct circ_buf *xmit = &port->state->xmit;
+ if (uart_circ_empty(xmit))
+ return;
writeb(xmit->buf[xmit->tail], &channel->data);
ZSDELAY();
ZS_WSYNC(channel);
@@ -850,7 +852,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
up->port.read_status_mask = Rx_OVR;
if (iflag & INPCK)
up->port.read_status_mask |= CRC_ERR | PAR_ERR;
- if (iflag & (BRKINT | PARMRK))
+ if (iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= BRK_ABRT;
up->port.ignore_status_mask = 0;
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 9cd9b4eba9fc..5702828fb62e 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -266,9 +266,11 @@ static void m32r_sio_start_tx(struct uart_port *port)
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
- serial_out(up, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- up->port.icount.tx++;
+ if (!uart_circ_empty(xmit)) {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ }
}
while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY);
#else
@@ -737,7 +739,7 @@ static void m32r_sio_set_termios(struct uart_port *port,
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/*
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 2a99d0c61b9e..ba285cd45b59 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -835,7 +835,7 @@ static void max310x_set_termios(struct uart_port *port,
if (termios->c_iflag & INPCK)
port->read_status_mask |= MAX310X_LSR_RXPAR_BIT |
MAX310X_LSR_FRERR_BIT;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= MAX310X_LSR_RXBRK_BIT;
/* Set status ignore mask */
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 0edfaf8cd269..a6f085717f94 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -248,6 +248,12 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
mr1 |= MCFUART_MR1_PARITYNONE;
}
+ /*
+ * FIXME: port->read_status_mask and port->ignore_status_mask
+ * need to be initialized based on termios settings for
+ * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
+ */
+
if (termios->c_cflag & CSTOPB)
mr2 |= MCFUART_MR2_STOP2;
else
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 52c930fac210..445799dc9846 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -977,7 +977,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/* Characters to ignore */
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index e30a3ca3cea3..759c6a6fa74a 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -1458,7 +1458,7 @@ static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
| SDMA_DESC_CMDSTAT_FR;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
/* Characters/events to ignore */
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 778e376f197e..72000a6d5af0 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -582,7 +582,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= UART_SR_RX_BREAK;
uart_update_timeout(port, termios->c_cflag, baud);
@@ -991,7 +991,7 @@ static const struct of_device_id msm_uartdm_table[] = {
{ }
};
-static int __init msm_serial_probe(struct platform_device *pdev)
+static int msm_serial_probe(struct platform_device *pdev)
{
struct msm_port *msm_port;
struct resource *resource;
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4b5b3c2fe328..86de4477d98a 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -604,7 +604,7 @@ static void mxs_auart_settermios(struct uart_port *u,
if (termios->c_iflag & INPCK)
u->read_status_mask |= AUART_STAT_PERR;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
u->read_status_mask |= AUART_STAT_BERR;
/*
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index 0a4dd70d29eb..7a6745601d4e 100644
--- a/drivers/tty/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
@@ -419,7 +419,7 @@ netx_set_termios(struct uart_port *port, struct ktermios *termios,
}
port->read_status_mask = 0;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= SR_BE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= SR_PE | SR_FE;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index e9d420ff3931..f7ad5b903055 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -653,6 +653,8 @@ static void pmz_start_tx(struct uart_port *port)
} else {
struct circ_buf *xmit = &port->state->xmit;
+ if (uart_circ_empty(xmit))
+ goto out;
write_zsdata(uap, xmit->buf[xmit->tail]);
zssync(uap);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -661,6 +663,7 @@ static void pmz_start_tx(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
}
+ out:
pmz_debug("pmz: start_tx() done.\n");
}
@@ -1092,7 +1095,7 @@ static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
uap->port.read_status_mask = Rx_OVR;
if (iflag & INPCK)
uap->port.read_status_mask |= CRC_ERR | PAR_ERR;
- if (iflag & (BRKINT | PARMRK))
+ if (iflag & (IGNBRK | BRKINT | PARMRK))
uap->port.read_status_mask |= BRK_ABRT;
uap->port.ignore_status_mask = 0;
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index de6c05c63683..2ba24a45c97f 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -477,7 +477,7 @@ pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.read_status_mask |=
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
sport->port.read_status_mask |=
ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 9e7ee39f8b2a..c638c53cd2b6 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -492,7 +492,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/*
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 329337711bb0..c1d3ebdf3b97 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -66,7 +66,7 @@ static void dbg(const char *fmt, ...)
char buff[256];
va_start(va, fmt);
- vscnprintf(buff, sizeof(buf), fmt, va);
+ vscnprintf(buff, sizeof(buff), fmt, va);
va_end(va);
printascii(buff);
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index a7cdec2962dd..771f361c47ea 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -596,7 +596,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
if (termios->c_iflag & INPCK)
uport->read_status_mask |= M_DUART_FRM_ERR |
M_DUART_PARITY_ERR;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
uport->read_status_mask |= M_DUART_RCVD_BRK;
uport->ignore_status_mask = 0;
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 5443b46345ed..e84b6a3bdd18 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -665,7 +665,7 @@ static void sccnxp_set_termios(struct uart_port *port,
port->read_status_mask = SR_OVR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= SR_PE | SR_FE;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= SR_BRK;
/* Set status ignore mask */
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index e1caa99e3d3b..5c79bdab985d 100644
--- a/drivers/tty/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
@@ -437,7 +437,7 @@ static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *term
port->read_status_mask = URLS_URROE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (URLS_URFE | URLS_URPE);
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= URLS_URBI;
/*
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 60f49b9d7e39..ea8546092c7e 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -697,7 +697,7 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= TXX9_SIDISR_UBRK;
/*
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 1f2be48c92ce..9b4d71cff00d 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -896,7 +896,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
if (termios->c_iflag & INPCK)
port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
}
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
if (termios->c_iflag & IGNPAR)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index c7f61ac27132..f48b1cc07eea 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -547,7 +547,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE;
if (termios->c_iflag & INPCK)
ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE;
/*
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 5faa8e905e98..2f57df9a71d9 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -427,6 +427,9 @@ static void sunsab_start_tx(struct uart_port *port)
struct circ_buf *xmit = &up->port.state->xmit;
int i;
+ if (uart_circ_empty(xmit))
+ return;
+
up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
writeb(up->interrupt_mask1, &up->regs->w.imr1);
@@ -719,7 +722,7 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
if (iflag & INPCK)
up->port.read_status_mask |= (SAB82532_ISR0_PERR |
SAB82532_ISR0_FERR);
- if (iflag & (BRKINT | PARMRK))
+ if (iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8);
/*
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 9a0f24f83720..5326ae195e5f 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -834,7 +834,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (iflag & (BRKINT | PARMRK))
+ if (iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index a2c40ed287d2..02df3940b95e 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -703,6 +703,8 @@ static void sunzilog_start_tx(struct uart_port *port)
} else {
struct circ_buf *xmit = &port->state->xmit;
+ if (uart_circ_empty(xmit))
+ return;
writeb(xmit->buf[xmit->tail], &channel->data);
ZSDELAY();
ZS_WSYNC(channel);
@@ -915,7 +917,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
up->port.read_status_mask = Rx_OVR;
if (iflag & INPCK)
up->port.read_status_mask |= CRC_ERR | PAR_ERR;
- if (iflag & (BRKINT | PARMRK))
+ if (iflag & (IGNBRK | BRKINT | PARMRK))
up->port.read_status_mask |= BRK_ABRT;
up->port.ignore_status_mask = 0;
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index d569ca58bab6..1c52074c38df 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -936,7 +936,7 @@ static void qe_uart_set_termios(struct uart_port *port,
port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
if (termios->c_iflag & INPCK)
port->read_status_mask |= BD_SC_FR | BD_SC_PR;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= BD_SC_BR;
/*
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index a63c14bc9a24..db0c8a4ab03e 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -559,7 +559,7 @@ static void siu_set_termios(struct uart_port *port, struct ktermios *new,
port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
if (c_iflag & INPCK)
port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (c_iflag & (BRKINT | PARMRK))
+ if (c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= UART_LSR_BI;
port->ignore_status_mask = 0;
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 6a169877109b..2b65bb7ffb8a 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -923,7 +923,7 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
uport->read_status_mask = Rx_OVR;
if (termios->c_iflag & INPCK)
uport->read_status_mask |= FRM_ERR | PAR_ERR;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
uport->read_status_mask |= Rx_BRK;
uport->ignore_status_mask = 0;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5e0f6ff2e2f5..b33b00b386de 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3226,8 +3226,7 @@ int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_back = &registered_con_driver[i];
- if (con_back->con &&
- !(con_back->flag & CON_DRIVER_FLAG_MODULE)) {
+ if (con_back->con && con_back->con != csw) {
defcsw = con_back->con;
retval = 0;
break;
@@ -3332,6 +3331,7 @@ static int vt_unbind(struct con_driver *con)
{
const struct consw *csw = NULL;
int i, more = 1, first = -1, last = -1, deflt = 0;
+ int ret;
if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
con_is_graphics(con->con, con->first, con->last))
@@ -3357,8 +3357,10 @@ static int vt_unbind(struct con_driver *con)
if (first != -1) {
console_lock();
- do_unbind_con_driver(csw, first, last, deflt);
+ ret = do_unbind_con_driver(csw, first, last, deflt);
console_unlock();
+ if (ret != 0)
+ return ret;
}
first = -1;
@@ -3645,17 +3647,20 @@ err:
*/
int do_unregister_con_driver(const struct consw *csw)
{
- int i, retval = -ENODEV;
+ int i;
/* cannot unregister a bound driver */
if (con_is_bound(csw))
- goto err;
+ return -EBUSY;
+
+ if (csw == conswitchp)
+ return -EINVAL;
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con_driver = &registered_con_driver[i];
if (con_driver->con == csw &&
- con_driver->flag & CON_DRIVER_FLAG_MODULE) {
+ con_driver->flag & CON_DRIVER_FLAG_INIT) {
vtconsole_deinit_device(con_driver);
device_destroy(vtconsole_class,
MKDEV(0, con_driver->node));
@@ -3666,12 +3671,11 @@ int do_unregister_con_driver(const struct consw *csw)
con_driver->flag = 0;
con_driver->first = 0;
con_driver->last = 0;
- retval = 0;
- break;
+ return 0;
}
}
-err:
- return retval;
+
+ return -ENODEV;
}
EXPORT_SYMBOL_GPL(do_unregister_con_driver);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index e371f5af11f5..a673e5b6a2e0 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -655,7 +655,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
if (mem->addr & ~PAGE_MASK)
return -ENODEV;
- if (vma->vm_end - vma->vm_start > PAGE_ALIGN(mem->size))
+ if (vma->vm_end - vma->vm_start > mem->size)
return -EINVAL;
vma->vm_ops = &uio_physical_vm_ops;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 69425b3cb6b7..9d2b673f90e3 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1321,6 +1321,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
unsigned long flags;
+ struct td_node *node, *tmpnode;
if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
@@ -1331,6 +1332,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+ list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
+ dma_pool_free(hwep->td_pool, node->ptr, node->dma);
+ list_del(&node->td);
+ kfree(node);
+ }
+
/* pop request */
list_del_init(&hwreq->queue);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 879b66e13370..21b99b4b4082 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1526,18 +1526,6 @@ static int hub_configure(struct usb_hub *hub,
dev_dbg(hub_dev, "%umA bus power budget for each child\n",
hub->mA_per_port);
- /* Update the HCD's internal representation of this hub before khubd
- * starts getting port status changes for devices under the hub.
- */
- if (hcd->driver->update_hub_device) {
- ret = hcd->driver->update_hub_device(hcd, hdev,
- &hub->tt, GFP_KERNEL);
- if (ret < 0) {
- message = "can't update HCD hub info";
- goto fail;
- }
- }
-
ret = hub_hub_status(hub, &hubstatus, &hubchange);
if (ret < 0) {
message = "can't get hub status";
@@ -1589,10 +1577,28 @@ static int hub_configure(struct usb_hub *hub,
}
}
hdev->maxchild = i;
+ for (i = 0; i < hdev->maxchild; i++) {
+ struct usb_port *port_dev = hub->ports[i];
+
+ pm_runtime_put(&port_dev->dev);
+ }
+
mutex_unlock(&usb_port_peer_mutex);
if (ret < 0)
goto fail;
+ /* Update the HCD's internal representation of this hub before khubd
+ * starts getting port status changes for devices under the hub.
+ */
+ if (hcd->driver->update_hub_device) {
+ ret = hcd->driver->update_hub_device(hcd, hdev,
+ &hub->tt, GFP_KERNEL);
+ if (ret < 0) {
+ message = "can't update HCD hub info";
+ goto fail;
+ }
+ }
+
usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
hub_activate(hub, HUB_INIT);
@@ -3458,7 +3464,8 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
struct usb_device *udev = port_dev->child;
if (udev && udev->can_submit) {
- dev_warn(&port_dev->dev, "not suspended yet\n");
+ dev_warn(&port_dev->dev, "device %s not suspended yet\n",
+ dev_name(&udev->dev));
if (PMSG_IS_AUTO(msg))
return -EBUSY;
}
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 0a7cdc0ef0a9..326308e53961 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -84,6 +84,7 @@ struct usb_hub {
* @dev: generic device interface
* @port_owner: port's owner
* @peer: related usb2 and usb3 ports (share the same connector)
+ * @req: default pm qos request for hubs without port power control
* @connect_type: port's connect type
* @location: opaque representation of platform connector location
* @status_lock: synchronize port_event() vs usb_port_{suspend|resume}
@@ -95,6 +96,7 @@ struct usb_port {
struct device dev;
struct usb_dev_state *port_owner;
struct usb_port *peer;
+ struct dev_pm_qos_request *req;
enum usb_port_connect_type connect_type;
usb_port_location_t location;
struct mutex status_lock;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 62036faf56c0..fe1b6d0967e3 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -21,6 +21,8 @@
#include "hub.h"
+static int usb_port_block_power_off;
+
static const struct attribute_group *port_dev_group[];
static ssize_t connect_type_show(struct device *dev,
@@ -66,6 +68,7 @@ static void usb_port_device_release(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
+ kfree(port_dev->req);
kfree(port_dev);
}
@@ -142,6 +145,9 @@ static int usb_port_runtime_suspend(struct device *dev)
== PM_QOS_FLAGS_ALL)
return -EAGAIN;
+ if (usb_port_block_power_off)
+ return -EBUSY;
+
usb_autopm_get_interface(intf);
retval = usb_hub_set_port_power(hdev, hub, port1, false);
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
@@ -190,11 +196,19 @@ static int link_peers(struct usb_port *left, struct usb_port *right)
if (left->peer || right->peer) {
struct usb_port *lpeer = left->peer;
struct usb_port *rpeer = right->peer;
-
- WARN(1, "failed to peer %s and %s (%s -> %p) (%s -> %p)\n",
- dev_name(&left->dev), dev_name(&right->dev),
- dev_name(&left->dev), lpeer,
- dev_name(&right->dev), rpeer);
+ char *method;
+
+ if (left->location && left->location == right->location)
+ method = "location";
+ else
+ method = "default";
+
+ pr_warn("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
+ dev_name(&left->dev), dev_name(&right->dev), method,
+ dev_name(&left->dev),
+ lpeer ? dev_name(&lpeer->dev) : "none",
+ dev_name(&right->dev),
+ rpeer ? dev_name(&rpeer->dev) : "none");
return -EBUSY;
}
@@ -251,6 +265,7 @@ static void link_peers_report(struct usb_port *left, struct usb_port *right)
dev_warn(&left->dev, "failed to peer to %s (%d)\n",
dev_name(&right->dev), rc);
pr_warn_once("usb: port power management may be unreliable\n");
+ usb_port_block_power_off = 1;
}
}
@@ -386,9 +401,13 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
int retval;
port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL);
- if (!port_dev) {
- retval = -ENOMEM;
- goto exit;
+ if (!port_dev)
+ return -ENOMEM;
+
+ port_dev->req = kzalloc(sizeof(*(port_dev->req)), GFP_KERNEL);
+ if (!port_dev->req) {
+ kfree(port_dev);
+ return -ENOMEM;
}
hub->ports[port1 - 1] = port_dev;
@@ -404,31 +423,53 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
port1);
mutex_init(&port_dev->status_lock);
retval = device_register(&port_dev->dev);
- if (retval)
- goto error_register;
+ if (retval) {
+ put_device(&port_dev->dev);
+ return retval;
+ }
+
+ /* Set default policy of port-poweroff disabled. */
+ retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
+ DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
+ if (retval < 0) {
+ device_unregister(&port_dev->dev);
+ return retval;
+ }
find_and_link_peer(hub, port1);
+ /*
+ * Enable runtime pm and hold a refernce that hub_configure()
+ * will drop once the PM_QOS_NO_POWER_OFF flag state has been set
+ * and the hub has been fully registered (hdev->maxchild set).
+ */
pm_runtime_set_active(&port_dev->dev);
+ pm_runtime_get_noresume(&port_dev->dev);
+ pm_runtime_enable(&port_dev->dev);
+ device_enable_async_suspend(&port_dev->dev);
/*
- * Do not enable port runtime pm if the hub does not support
- * power switching. Also, userspace must have final say of
- * whether a port is permitted to power-off. Do not enable
- * runtime pm if we fail to expose pm_qos_no_power_off.
+ * Keep hidden the ability to enable port-poweroff if the hub
+ * does not support power switching.
*/
- if (hub_is_port_power_switchable(hub)
- && dev_pm_qos_expose_flags(&port_dev->dev,
- PM_QOS_FLAG_NO_POWER_OFF) == 0)
- pm_runtime_enable(&port_dev->dev);
+ if (!hub_is_port_power_switchable(hub))
+ return 0;
- device_enable_async_suspend(&port_dev->dev);
- return 0;
+ /* Attempt to let userspace take over the policy. */
+ retval = dev_pm_qos_expose_flags(&port_dev->dev,
+ PM_QOS_FLAG_NO_POWER_OFF);
+ if (retval < 0) {
+ dev_warn(&port_dev->dev, "failed to expose pm_qos_no_poweroff\n");
+ return 0;
+ }
-error_register:
- put_device(&port_dev->dev);
-exit:
- return retval;
+ /* Userspace owns the policy, drop the kernel 'no_poweroff' request. */
+ retval = dev_pm_qos_remove_request(port_dev->req);
+ if (retval >= 0) {
+ kfree(port_dev->req);
+ port_dev->req = NULL;
+ }
+ return 0;
}
void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 8eb996e4f058..261c3b428220 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -45,6 +45,7 @@ comment "Platform Glue Driver Support"
config USB_DWC3_OMAP
tristate "Texas Instruments OMAP5 and similar Platforms"
depends on EXTCON && (ARCH_OMAP2PLUS || COMPILE_TEST)
+ depends on OF
default USB_DWC3
help
Some platforms from Texas Instruments like OMAP5, DRA7xxx and
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 4af4c3567656..07a736acd0f2 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -322,7 +322,7 @@ static int dwc3_omap_remove_core(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
- platform_device_unregister(pdev);
+ of_device_unregister(pdev);
return 0;
}
@@ -599,7 +599,7 @@ static int dwc3_omap_prepare(struct device *dev)
{
struct dwc3_omap *omap = dev_get_drvdata(dev);
- dwc3_omap_disable_irqs(omap);
+ dwc3_omap_write_irqmisc_set(omap, 0x00);
return 0;
}
@@ -607,8 +607,19 @@ static int dwc3_omap_prepare(struct device *dev)
static void dwc3_omap_complete(struct device *dev)
{
struct dwc3_omap *omap = dev_get_drvdata(dev);
+ u32 reg;
- dwc3_omap_enable_irqs(omap);
+ reg = (USBOTGSS_IRQMISC_OEVT |
+ USBOTGSS_IRQMISC_DRVVBUS_RISE |
+ USBOTGSS_IRQMISC_CHRGVBUS_RISE |
+ USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
+ USBOTGSS_IRQMISC_IDPULLUP_RISE |
+ USBOTGSS_IRQMISC_DRVVBUS_FALL |
+ USBOTGSS_IRQMISC_CHRGVBUS_FALL |
+ USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
+ USBOTGSS_IRQMISC_IDPULLUP_FALL);
+
+ dwc3_omap_write_irqmisc_set(omap, reg);
}
static int dwc3_omap_suspend(struct device *dev)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9d64dd02c57e..dab7927d1009 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -828,10 +828,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
length, last ? " last" : "",
chain ? " chain" : "");
- /* Skip the LINK-TRB on ISOC */
- if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
- usb_endpoint_xfer_isoc(dep->endpoint.desc))
- dep->free_slot++;
trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
@@ -843,6 +839,10 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
}
dep->free_slot++;
+ /* Skip the LINK-TRB on ISOC */
+ if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+ usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ dep->free_slot++;
trb->size = DWC3_TRB_SIZE_LENGTH(length);
trb->bpl = lower_32_bits(dma);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 2ddcd635ca2a..97142146eead 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1145,15 +1145,15 @@ static struct configfs_item_operations interf_item_ops = {
.store_attribute = usb_os_desc_attr_store,
};
-static ssize_t rndis_grp_compatible_id_show(struct usb_os_desc *desc,
- char *page)
+static ssize_t interf_grp_compatible_id_show(struct usb_os_desc *desc,
+ char *page)
{
memcpy(page, desc->ext_compat_id, 8);
return 8;
}
-static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc,
- const char *page, size_t len)
+static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
+ const char *page, size_t len)
{
int l;
@@ -1171,20 +1171,20 @@ static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc,
return len;
}
-static struct usb_os_desc_attribute rndis_grp_attr_compatible_id =
+static struct usb_os_desc_attribute interf_grp_attr_compatible_id =
__CONFIGFS_ATTR(compatible_id, S_IRUGO | S_IWUSR,
- rndis_grp_compatible_id_show,
- rndis_grp_compatible_id_store);
+ interf_grp_compatible_id_show,
+ interf_grp_compatible_id_store);
-static ssize_t rndis_grp_sub_compatible_id_show(struct usb_os_desc *desc,
- char *page)
+static ssize_t interf_grp_sub_compatible_id_show(struct usb_os_desc *desc,
+ char *page)
{
memcpy(page, desc->ext_compat_id + 8, 8);
return 8;
}
-static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc,
- const char *page, size_t len)
+static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
+ const char *page, size_t len)
{
int l;
@@ -1202,20 +1202,21 @@ static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc,
return len;
}
-static struct usb_os_desc_attribute rndis_grp_attr_sub_compatible_id =
+static struct usb_os_desc_attribute interf_grp_attr_sub_compatible_id =
__CONFIGFS_ATTR(sub_compatible_id, S_IRUGO | S_IWUSR,
- rndis_grp_sub_compatible_id_show,
- rndis_grp_sub_compatible_id_store);
+ interf_grp_sub_compatible_id_show,
+ interf_grp_sub_compatible_id_store);
static struct configfs_attribute *interf_grp_attrs[] = {
- &rndis_grp_attr_compatible_id.attr,
- &rndis_grp_attr_sub_compatible_id.attr,
+ &interf_grp_attr_compatible_id.attr,
+ &interf_grp_attr_sub_compatible_id.attr,
NULL
};
int usb_os_desc_prepare_interf_dir(struct config_group *parent,
int n_interf,
struct usb_os_desc **desc,
+ char **names,
struct module *owner)
{
struct config_group **f_default_groups, *os_desc_group,
@@ -1257,8 +1258,8 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
d = desc[n_interf];
d->owner = owner;
config_group_init_type_name(&d->group, "", interface_type);
- config_item_set_name(&d->group.cg_item, "interface.%d",
- n_interf);
+ config_item_set_name(&d->group.cg_item, "interface.%s",
+ names[n_interf]);
interface_groups[n_interf] = &d->group;
}
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
index a14ac792c698..36c468c4f5e9 100644
--- a/drivers/usb/gadget/configfs.h
+++ b/drivers/usb/gadget/configfs.h
@@ -8,6 +8,7 @@ void unregister_gadget_item(struct config_item *item);
int usb_os_desc_prepare_interf_dir(struct config_group *parent,
int n_interf,
struct usb_os_desc **desc,
+ char **names,
struct module *owner);
static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 74202d67f911..8598c27c7d43 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1483,11 +1483,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
ffs->ep0req->context = ffs;
lang = ffs->stringtabs;
- for (lang = ffs->stringtabs; *lang; ++lang) {
- struct usb_string *str = (*lang)->strings;
- int id = first_id;
- for (; str->s; ++id, ++str)
- str->id = id;
+ if (lang) {
+ for (; *lang; ++lang) {
+ struct usb_string *str = (*lang)->strings;
+ int id = first_id;
+ for (; str->s; ++id, ++str)
+ str->id = id;
+ }
}
ffs->gadget = cdev->gadget;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index eed3ad878047..9c41e9515b8e 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -687,7 +687,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
GFP_KERNEL);
if (!f->os_desc_table)
- return PTR_ERR(f->os_desc_table);
+ return -ENOMEM;
f->os_desc_n = 1;
f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
}
@@ -905,6 +905,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
{
struct f_rndis_opts *opts;
struct usb_os_desc *descs[1];
+ char *names[1];
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
@@ -922,8 +923,9 @@ static struct usb_function_instance *rndis_alloc_inst(void)
INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop);
descs[0] = &opts->rndis_os_desc;
+ names[0] = "rndis";
usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
- THIS_MODULE);
+ names, THIS_MODULE);
config_group_init_type_name(&opts->func_inst.group, "",
&rndis_func_type);
diff --git a/drivers/usb/gadget/gr_udc.c b/drivers/usb/gadget/gr_udc.c
index 99a37ed03e27..c7004ee89c90 100644
--- a/drivers/usb/gadget/gr_udc.c
+++ b/drivers/usb/gadget/gr_udc.c
@@ -1532,8 +1532,9 @@ static int gr_ep_enable(struct usb_ep *_ep,
"%s mode: multiple trans./microframe not valid\n",
(mode == 2 ? "Bulk" : "Control"));
return -EINVAL;
- } else if (nt == 0x11) {
- dev_err(dev->dev, "Invalid value for trans./microframe\n");
+ } else if (nt == 0x3) {
+ dev_err(dev->dev,
+ "Invalid value 0x3 for additional trans./microframe\n");
return -EINVAL;
} else if ((nt + 1) * max > buffer_size) {
dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index ee6c16416c30..2e4ce7704908 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1264,8 +1264,13 @@ dev_release (struct inode *inode, struct file *fd)
kfree (dev->buf);
dev->buf = NULL;
- put_dev (dev);
+ /* other endpoints were all decoupled from this device */
+ spin_lock_irq(&dev->lock);
+ dev->state = STATE_DEV_DISABLED;
+ spin_unlock_irq(&dev->lock);
+
+ put_dev (dev);
return 0;
}
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index ff205a7bc55c..648f9e489b39 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -220,11 +220,11 @@ int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
* If we can't read the file, it's no good.
* If we can't write the file, use it read-only.
*/
- if (!(filp->f_op->read || filp->f_op->aio_read)) {
+ if (!(filp->f_mode & FMODE_CAN_READ)) {
LINFO(curlun, "file not readable: %s\n", filename);
goto out;
}
- if (!(filp->f_op->write || filp->f_op->aio_write))
+ if (!(filp->f_mode & FMODE_CAN_WRITE))
ro = 1;
size = i_size_read(inode->i_mapping->host);
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index fe0880d0873e..97b027724ee7 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -793,7 +793,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
net->netdev_ops = &eth_netdev_ops;
- SET_ETHTOOL_OPS(net, &ops);
+ net->ethtool_ops = &ops;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
@@ -850,7 +850,7 @@ struct net_device *gether_setup_name_default(const char *netname)
net->netdev_ops = &eth_netdev_ops;
- SET_ETHTOOL_OPS(net, &ops);
+ net->ethtool_ops = &ops;
SET_NETDEV_DEVTYPE(net, &gadget_type);
return net;
@@ -1120,7 +1120,10 @@ void gether_disconnect(struct gether *link)
DBG(dev, "%s\n", __func__);
+ netif_tx_lock(dev->net);
netif_stop_queue(dev->net);
+ netif_tx_unlock(dev->net);
+
netif_carrier_off(dev->net);
/* disable endpoints, forcing (synchronous) completion
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 61b7817bd66b..03314f861bee 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -176,7 +176,7 @@ config USB_EHCI_HCD_AT91
config USB_EHCI_MSM
tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
- depends on ARCH_MSM
+ depends on ARCH_MSM || ARCH_QCOM
select USB_EHCI_ROOT_HUB_TT
---help---
Enables support for the USB Host controller present on the
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index af3974a5e7c2..7d75465d97c7 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -68,9 +68,6 @@ static void usb_hcd_tdi_set_mode(struct ehci_hcd *ehci)
/* set TWI GPIO USB_HOST_DEV pin high */
gpio_direction_output(MSP_PIN_USB0_HOST_DEV, 1);
-#ifdef CONFIG_MSP_HAS_DUAL_USB
- gpio_direction_output(MSP_PIN_USB1_HOST_DEV, 1);
-#endif
}
/* called during probe() after chip reset completes */
@@ -248,33 +245,6 @@ void usb_hcd_msp_remove(struct usb_hcd *hcd, struct platform_device *dev)
usb_put_hcd(hcd);
}
-#ifdef CONFIG_MSP_HAS_DUAL_USB
-/*
- * Wrapper around the main ehci_irq. Since both USB host controllers are
- * sharing the same IRQ, need to first determine whether we're the intended
- * recipient of this interrupt.
- */
-static irqreturn_t ehci_msp_irq(struct usb_hcd *hcd)
-{
- u32 int_src;
- struct device *dev = hcd->self.controller;
- struct platform_device *pdev;
- struct mspusb_device *mdev;
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- /* need to reverse-map a couple of containers to get our device */
- pdev = to_platform_device(dev);
- mdev = to_mspusb_device(pdev);
-
- /* Check to see if this interrupt is for this host controller */
- int_src = ehci_readl(ehci, &mdev->mab_regs->int_stat);
- if (int_src & (1 << pdev->id))
- return ehci_irq(hcd);
-
- /* Not for this device */
- return IRQ_NONE;
-}
-#endif /* DUAL_USB */
-
static const struct hc_driver ehci_msp_hc_driver = {
.description = hcd_name,
.product_desc = "PMC MSP EHCI",
@@ -283,11 +253,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
/*
* generic hardware linkage
*/
-#ifdef CONFIG_MSP_HAS_DUAL_USB
- .irq = ehci_msp_irq,
-#else
.irq = ehci_irq,
-#endif
.flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
@@ -334,9 +300,6 @@ static int ehci_hcd_msp_drv_probe(struct platform_device *pdev)
return -ENODEV;
gpio_request(MSP_PIN_USB0_HOST_DEV, "USB0_HOST_DEV_GPIO");
-#ifdef CONFIG_MSP_HAS_DUAL_USB
- gpio_request(MSP_PIN_USB1_HOST_DEV, "USB1_HOST_DEV_GPIO");
-#endif
ret = usb_hcd_msp_probe(&ehci_msp_hc_driver, pdev);
@@ -351,9 +314,6 @@ static int ehci_hcd_msp_drv_remove(struct platform_device *pdev)
/* free TWI GPIO USB_HOST_DEV pin */
gpio_free(MSP_PIN_USB0_HOST_DEV);
-#ifdef CONFIG_MSP_HAS_DUAL_USB
- gpio_free(MSP_PIN_USB1_HOST_DEV);
-#endif
return 0;
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 4a6d3dd68572..2f3acebb577a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -656,6 +656,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
},
},
+ {
+ /* HASEE E200 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
+ DMI_MATCH(DMI_BOARD_NAME, "E210"),
+ DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
+ },
+ },
{ }
};
@@ -665,9 +673,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
{
int try_handoff = 1, tried_handoff = 0;
- /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
- * the handoff on its unused controller. Skip it. */
- if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
+ /*
+ * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
+ * the handoff on its unused controller. Skip it.
+ *
+ * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
+ */
+ if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
+ pdev->device == 0x27cc)) {
if (dmi_check_system(ehci_dmi_nohandoff_table))
try_handoff = 0;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 6231ce6aa0c3..aa79e8749040 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
+#include <linux/device.h>
#include <asm/unaligned.h>
#include "xhci.h"
@@ -287,7 +288,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
struct xhci_command *command;
command = xhci_alloc_command(xhci, false, false,
- GFP_NOIO);
+ GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, cmd);
@@ -1139,7 +1140,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
* including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
* is enabled, so also enable remote wake here.
*/
- if (hcd->self.root_hub->do_remote_wakeup) {
+ if (hcd->self.root_hub->do_remote_wakeup
+ && device_may_wakeup(hcd->self.controller)) {
+
if (t1 & PORT_CONNECT) {
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
t2 &= ~PORT_WKCONN_E;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d67ff71209f5..749fc68eb5c1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1433,8 +1433,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
case TRB_RESET_DEV:
- WARN_ON(slot_id != TRB_TO_SLOT_ID(
- le32_to_cpu(cmd_trb->generic.field[3])));
+ /* SLOT_ID field in reset device cmd completion event TRB is 0.
+ * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
+ */
+ slot_id = TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3]));
xhci_handle_cmd_reset_dev(xhci, slot_id, event);
break;
case TRB_NEC_GET_FW:
@@ -3534,7 +3537,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
- return roundup(total_packet_count, max_burst + 1) - 1;
+ return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
}
/*
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2b8d9a24af09..7436d5f5e67a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -936,7 +936,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
*/
int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
- u32 command, temp = 0;
+ u32 command, temp = 0, status;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct usb_hcd *secondary_hcd;
int retval = 0;
@@ -1054,8 +1054,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
done:
if (retval == 0) {
- usb_hcd_resume_root_hub(hcd);
- usb_hcd_resume_root_hub(xhci->shared_hcd);
+ /* Resume root hubs only when have pending events. */
+ status = readl(&xhci->op_regs->status);
+ if (status & STS_EINT) {
+ usb_hcd_resume_root_hub(hcd);
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ }
}
/*
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 51a6da256772..829f446064ea 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -7,7 +7,7 @@
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/mutex.h>
-
+#include <linux/timer.h>
#include <linux/usb.h>
#define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
@@ -484,6 +484,14 @@ alloc_sglist(int nents, int max, int vary)
return sg;
}
+static void sg_timeout(unsigned long _req)
+{
+ struct usb_sg_request *req = (struct usb_sg_request *) _req;
+
+ req->status = -ETIMEDOUT;
+ usb_sg_cancel(req);
+}
+
static int perform_sglist(
struct usbtest_dev *tdev,
unsigned iterations,
@@ -495,6 +503,9 @@ static int perform_sglist(
{
struct usb_device *udev = testdev_to_usbdev(tdev);
int retval = 0;
+ struct timer_list sg_timer;
+
+ setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
while (retval == 0 && iterations-- > 0) {
retval = usb_sg_init(req, udev, pipe,
@@ -505,7 +516,10 @@ static int perform_sglist(
if (retval)
break;
+ mod_timer(&sg_timer, jiffies +
+ msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req);
+ del_timer_sync(&sg_timer);
retval = req->status;
/* FIXME check resulting data pattern */
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
index d2353781bd2d..1e58ed2361cc 100644
--- a/drivers/usb/musb/musb_am335x.c
+++ b/drivers/usb/musb/musb_am335x.c
@@ -19,21 +19,6 @@ err:
return ret;
}
-static int of_remove_populated_child(struct device *dev, void *d)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- of_device_unregister(pdev);
- return 0;
-}
-
-static int am335x_child_remove(struct platform_device *pdev)
-{
- device_for_each_child(&pdev->dev, NULL, of_remove_populated_child);
- pm_runtime_disable(&pdev->dev);
- return 0;
-}
-
static const struct of_device_id am335x_child_of_match[] = {
{ .compatible = "ti,am33xx-usb" },
{ },
@@ -42,13 +27,17 @@ MODULE_DEVICE_TABLE(of, am335x_child_of_match);
static struct platform_driver am335x_child_driver = {
.probe = am335x_child_probe,
- .remove = am335x_child_remove,
.driver = {
.name = "am335x-usb-childs",
.of_match_table = am335x_child_of_match,
},
};
-module_platform_driver(am335x_child_driver);
+static int __init am335x_child_init(void)
+{
+ return platform_driver_register(&am335x_child_driver);
+}
+module_init(am335x_child_init);
+
MODULE_DESCRIPTION("AM33xx child devices");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 61da471b7aed..eff3c5cf84f4 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -849,7 +849,7 @@ b_host:
}
/* handle babble condition */
- if (int_usb & MUSB_INTR_BABBLE)
+ if (int_usb & MUSB_INTR_BABBLE && is_host_active(musb))
schedule_work(&musb->recover_work);
#if 0
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 7b8bbf53127e..5341bb223b7c 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -318,7 +318,7 @@ static void cppi41_dma_callback(void *private_data)
}
list_add_tail(&cppi41_channel->tx_check,
&controller->early_tx_list);
- if (!hrtimer_active(&controller->early_tx)) {
+ if (!hrtimer_is_queued(&controller->early_tx)) {
hrtimer_start_range_ns(&controller->early_tx,
ktime_set(0, 140 * NSEC_PER_USEC),
40 * NSEC_PER_USEC,
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 51beb13c7e1a..09529f94e72d 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -494,10 +494,9 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *ctrl_base = musb->ctrl_base;
- void __iomem *base = musb->mregs;
u32 reg;
- reg = dsps_readl(base, wrp->mode);
+ reg = dsps_readl(ctrl_base, wrp->mode);
switch (mode) {
case MUSB_HOST:
@@ -510,7 +509,7 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
*/
reg |= (1 << wrp->iddig_mux);
- dsps_writel(base, wrp->mode, reg);
+ dsps_writel(ctrl_base, wrp->mode, reg);
dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
break;
case MUSB_PERIPHERAL:
@@ -523,10 +522,10 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
*/
reg |= (1 << wrp->iddig_mux);
- dsps_writel(base, wrp->mode, reg);
+ dsps_writel(ctrl_base, wrp->mode, reg);
break;
case MUSB_OTG:
- dsps_writel(base, wrp->phy_utmi, 0x02);
+ dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
break;
default:
dev_err(glue->dev, "unsupported mode %d\n", mode);
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index c2e45e632723..f202e5088461 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -274,7 +274,6 @@ static int ux500_probe(struct platform_device *pdev)
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
- musb->dev.of_node = pdev->dev.of_node;
glue->dev = &pdev->dev;
glue->musb = musb;
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index ced34f39bdd4..c929370cdaa6 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1229,7 +1229,9 @@ static void msm_otg_sm_work(struct work_struct *w)
motg->chg_state = USB_CHG_STATE_UNDEFINED;
motg->chg_type = USB_INVALID_CHARGER;
}
- pm_runtime_put_sync(otg->phy->dev);
+
+ if (otg->phy->state == OTG_STATE_B_IDLE)
+ pm_runtime_put_sync(otg->phy->dev);
break;
case OTG_STATE_B_PERIPHERAL:
dev_dbg(otg->phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index d49f9c326035..4fd36530bfa3 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -681,6 +681,14 @@ usbhs_fifo_read_end:
usbhs_pipe_number(pipe),
pkt->length, pkt->actual, *is_done, pkt->zero);
+ /*
+ * Transmission end
+ */
+ if (*is_done) {
+ if (usbhs_pipe_is_dcp(pipe))
+ usbhs_dcp_control_transfer_done(pipe);
+ }
+
usbhs_fifo_read_busy:
usbhsf_fifo_unselect(pipe, fifo);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 762e4a5f5ae9..330df5ce435b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index edf3b124583c..8a3813be1b28 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
- { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) },
+ { USB_DEVICE(TESTO_VID, TESTO_1_PID) },
+ { USB_DEVICE(TESTO_VID, TESTO_3_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
@@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
+ /* Infineon Devices */
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
{ } /* Terminating entry */
};
@@ -1566,14 +1569,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
struct usb_device *udev = serial->dev;
struct usb_interface *interface = serial->interface;
- struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
+ struct usb_endpoint_descriptor *ep_desc;
unsigned num_endpoints;
- int i;
+ unsigned i;
num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
+ if (!num_endpoints)
+ return;
+
/* NOTE: some customers have programmed FT232R/FT245R devices
* with an endpoint size of 0 - not good. In this case, we
* want to override the endpoint descriptor setting and use a
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 500474c48f4b..c4777bc6aee0 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -584,6 +584,12 @@
#define RATOC_PRODUCT_ID_USB60F 0xb020
/*
+ * Infineon Technologies
+ */
+#define INFINEON_VID 0x058b
+#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+
+/*
* Acton Research Corp.
*/
#define ACTON_VID 0x0647 /* Vendor ID */
@@ -798,7 +804,8 @@
* Submitted by Colin Leroy
*/
#define TESTO_VID 0x128D
-#define TESTO_USB_INTERFACE_PID 0x0001
+#define TESTO_1_PID 0x0001
+#define TESTO_3_PID 0x0003
/*
* Mobility Electronics products.
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 59c3108cc136..a9688940543d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb);
/* Zoom */
#define ZOOM_PRODUCT_4597 0x9607
+/* SpeedUp SU9800 usb 3g modem */
+#define SPEEDUP_PRODUCT_SU9800 0x9800
+
/* Haier products */
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
@@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb);
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+#define OLIVETTI_PRODUCT_OLICARD120 0xc001
+#define OLIVETTI_PRODUCT_OLICARD140 0xc002
#define OLIVETTI_PRODUCT_OLICARD145 0xc003
+#define OLIVETTI_PRODUCT_OLICARD155 0xc004
#define OLIVETTI_PRODUCT_OLICARD200 0xc005
+#define OLIVETTI_PRODUCT_OLICARD160 0xc00a
#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
/* Celot products */
@@ -1480,6 +1487,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1577,6 +1586,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
.driver_info = (kernel_ulong_t)&four_g_w14_blacklist
},
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1611,15 +1621,21 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
-
- { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist
- },
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist
- },
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 9d38ddc8da49..866b5df36ed1 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -256,6 +256,10 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_WRITE_CACHE)
sdev->wce_default_on = 1;
+ /* A few buggy USB-ATA bridges don't understand FUA */
+ if (us->fflags & US_FL_BROKEN_FUA)
+ sdev->broken_fua = 1;
+
} else {
/* Non-disk-type devices don't need to blacklist any pages
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 174a447868cd..80a5b366255f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1936,6 +1936,13 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Michael Büsch <m@bues.ch> */
+UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0114,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
* JMicron responds to USN and several other SCSI ioctls with a
* residue that causes subsequent I/O requests to fail. */
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index be414d2b2b22..8dae2f724a35 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -17,6 +17,7 @@
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/net.h>
#include <linux/if_packet.h>
@@ -373,7 +374,7 @@ static void handle_tx(struct vhost_net *net)
% UIO_MAXIOV == nvq->done_idx))
break;
- head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
+ head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
NULL, NULL);
@@ -505,7 +506,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
r = -ENOBUFS;
goto err;
}
- r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ r = vhost_get_vq_desc(vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
if (unlikely(r < 0))
@@ -584,9 +585,9 @@ static void handle_rx(struct vhost_net *net)
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
- vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
+ vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
- mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
+ mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
while ((sock_len = peek_head_len(sock->sk))) {
sock_len += sock_hlen;
@@ -701,16 +702,20 @@ static void handle_rx_net(struct vhost_work *work)
static int vhost_net_open(struct inode *inode, struct file *f)
{
- struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
+ struct vhost_net *n;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
int i;
- if (!n)
- return -ENOMEM;
+ n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!n) {
+ n = vmalloc(sizeof *n);
+ if (!n)
+ return -ENOMEM;
+ }
vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
- kfree(n);
+ kvfree(n);
return -ENOMEM;
}
@@ -827,7 +832,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
* since jobs can re-queue themselves. */
vhost_net_flush(n);
kfree(n->dev.vqs);
- kfree(n);
+ kvfree(n);
return 0;
}
@@ -1038,15 +1043,13 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
mutex_unlock(&n->dev.mutex);
return -EFAULT;
}
- n->dev.acked_features = features;
- smp_wmb();
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].vq.mutex);
+ n->vqs[i].vq.acked_features = features;
n->vqs[i].vhost_hlen = vhost_hlen;
n->vqs[i].sock_hlen = sock_hlen;
mutex_unlock(&n->vqs[i].vq.mutex);
}
- vhost_net_flush(n);
mutex_unlock(&n->dev.mutex);
return 0;
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index aeb513108448..69906cacd04f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -57,7 +57,8 @@
#define TCM_VHOST_MAX_CDB_SIZE 32
#define TCM_VHOST_DEFAULT_TAGS 256
#define TCM_VHOST_PREALLOC_SGLS 2048
-#define TCM_VHOST_PREALLOC_PAGES 2048
+#define TCM_VHOST_PREALLOC_UPAGES 2048
+#define TCM_VHOST_PREALLOC_PROT_SGLS 512
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
@@ -79,10 +80,12 @@ struct tcm_vhost_cmd {
u64 tvc_tag;
/* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count;
+ u32 tvc_prot_sgl_count;
/* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
u32 tvc_lun;
/* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist *tvc_sgl;
+ struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
/* Pointer to response */
struct virtio_scsi_cmd_resp __user *tvc_resp;
@@ -166,7 +169,8 @@ enum {
};
enum {
- VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
+ VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
+ (1ULL << VIRTIO_SCSI_F_T10_PI)
};
#define VHOST_SCSI_MAX_TARGET 256
@@ -456,12 +460,16 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
struct tcm_vhost_cmd, tvc_se_cmd);
struct se_session *se_sess = se_cmd->se_sess;
+ int i;
if (tv_cmd->tvc_sgl_count) {
- u32 i;
for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
put_page(sg_page(&tv_cmd->tvc_sgl[i]));
}
+ if (tv_cmd->tvc_prot_sgl_count) {
+ for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
+ put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
+ }
tcm_vhost_put_inflight(tv_cmd->inflight);
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
@@ -606,7 +614,7 @@ tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
again:
vhost_disable_notify(&vs->dev, vq);
- head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+ head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &out, &in,
NULL, NULL);
if (head < 0) {
@@ -713,16 +721,14 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
}
static struct tcm_vhost_cmd *
-vhost_scsi_get_tag(struct vhost_virtqueue *vq,
- struct tcm_vhost_tpg *tpg,
- struct virtio_scsi_cmd_req *v_req,
- u32 exp_data_len,
- int data_direction)
+vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
+ unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
+ u32 exp_data_len, int data_direction)
{
struct tcm_vhost_cmd *cmd;
struct tcm_vhost_nexus *tv_nexus;
struct se_session *se_sess;
- struct scatterlist *sg;
+ struct scatterlist *sg, *prot_sg;
struct page **pages;
int tag;
@@ -741,19 +747,24 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
sg = cmd->tvc_sgl;
+ prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages;
memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
cmd->tvc_sgl = sg;
+ cmd->tvc_prot_sgl = prot_sg;
cmd->tvc_upages = pages;
cmd->tvc_se_cmd.map_tag = tag;
- cmd->tvc_tag = v_req->tag;
- cmd->tvc_task_attr = v_req->task_attr;
+ cmd->tvc_tag = scsi_tag;
+ cmd->tvc_lun = lun;
+ cmd->tvc_task_attr = task_attr;
cmd->tvc_exp_data_len = exp_data_len;
cmd->tvc_data_direction = data_direction;
cmd->tvc_nexus = tv_nexus;
cmd->inflight = tcm_vhost_get_inflight(vq);
+ memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
+
return cmd;
}
@@ -767,35 +778,28 @@ vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
struct scatterlist *sgl,
unsigned int sgl_count,
struct iovec *iov,
- int write)
+ struct page **pages,
+ bool write)
{
unsigned int npages = 0, pages_nr, offset, nbytes;
struct scatterlist *sg = sgl;
void __user *ptr = iov->iov_base;
size_t len = iov->iov_len;
- struct page **pages;
int ret, i;
- if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
- pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than"
- " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
- sgl_count, TCM_VHOST_PREALLOC_SGLS);
- return -ENOBUFS;
- }
-
pages_nr = iov_num_pages(iov);
- if (pages_nr > sgl_count)
+ if (pages_nr > sgl_count) {
+ pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
+ " sgl_count: %u\n", pages_nr, sgl_count);
return -ENOBUFS;
-
- if (pages_nr > TCM_VHOST_PREALLOC_PAGES) {
+ }
+ if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
- " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n",
- pages_nr, TCM_VHOST_PREALLOC_PAGES);
+ " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
+ pages_nr, TCM_VHOST_PREALLOC_UPAGES);
return -ENOBUFS;
}
- pages = tv_cmd->tvc_upages;
-
ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
/* No pages were pinned */
if (ret < 0)
@@ -825,33 +829,32 @@ out:
static int
vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
struct iovec *iov,
- unsigned int niov,
- int write)
+ int niov,
+ bool write)
{
- int ret;
- unsigned int i;
- u32 sgl_count;
- struct scatterlist *sg;
+ struct scatterlist *sg = cmd->tvc_sgl;
+ unsigned int sgl_count = 0;
+ int ret, i;
- /*
- * Find out how long sglist needs to be
- */
- sgl_count = 0;
for (i = 0; i < niov; i++)
sgl_count += iov_num_pages(&iov[i]);
- /* TODO overflow checking */
+ if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
+ pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
+ " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
+ sgl_count, TCM_VHOST_PREALLOC_SGLS);
+ return -ENOBUFS;
+ }
- sg = cmd->tvc_sgl;
pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
sg_init_table(sg, sgl_count);
-
cmd->tvc_sgl_count = sgl_count;
- pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
+ pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
+
for (i = 0; i < niov; i++) {
ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
- write);
+ cmd->tvc_upages, write);
if (ret < 0) {
for (i = 0; i < cmd->tvc_sgl_count; i++)
put_page(sg_page(&cmd->tvc_sgl[i]));
@@ -859,31 +862,70 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
cmd->tvc_sgl_count = 0;
return ret;
}
-
sg += ret;
sgl_count -= ret;
}
return 0;
}
+static int
+vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
+ struct iovec *iov,
+ int niov,
+ bool write)
+{
+ struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
+ unsigned int prot_sgl_count = 0;
+ int ret, i;
+
+ for (i = 0; i < niov; i++)
+ prot_sgl_count += iov_num_pages(&iov[i]);
+
+ if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
+ pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
+ " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
+ prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
+ return -ENOBUFS;
+ }
+
+ pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
+ prot_sg, prot_sgl_count);
+ sg_init_table(prot_sg, prot_sgl_count);
+ cmd->tvc_prot_sgl_count = prot_sgl_count;
+
+ for (i = 0; i < niov; i++) {
+ ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
+ cmd->tvc_upages, write);
+ if (ret < 0) {
+ for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
+ put_page(sg_page(&cmd->tvc_prot_sgl[i]));
+
+ cmd->tvc_prot_sgl_count = 0;
+ return ret;
+ }
+ prot_sg += ret;
+ prot_sgl_count -= ret;
+ }
+ return 0;
+}
+
static void tcm_vhost_submission_work(struct work_struct *work)
{
struct tcm_vhost_cmd *cmd =
container_of(work, struct tcm_vhost_cmd, work);
struct tcm_vhost_nexus *tv_nexus;
struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
- struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
- int rc, sg_no_bidi = 0;
+ struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
+ int rc;
+ /* FIXME: BIDI operation */
if (cmd->tvc_sgl_count) {
sg_ptr = cmd->tvc_sgl;
-/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
-#if 0
- if (se_cmd->se_cmd_flags & SCF_BIDI) {
- sg_bidi_ptr = NULL;
- sg_no_bidi = 0;
- }
-#endif
+
+ if (cmd->tvc_prot_sgl_count)
+ sg_prot_ptr = cmd->tvc_prot_sgl;
+ else
+ se_cmd->prot_pto = true;
} else {
sg_ptr = NULL;
}
@@ -894,7 +936,7 @@ static void tcm_vhost_submission_work(struct work_struct *work)
cmd->tvc_lun, cmd->tvc_exp_data_len,
cmd->tvc_task_attr, cmd->tvc_data_direction,
TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
- sg_bidi_ptr, sg_no_bidi, NULL, 0);
+ NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -926,12 +968,18 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{
struct tcm_vhost_tpg **vs_tpg;
struct virtio_scsi_cmd_req v_req;
+ struct virtio_scsi_cmd_req_pi v_req_pi;
struct tcm_vhost_tpg *tpg;
struct tcm_vhost_cmd *cmd;
- u32 exp_data_len, data_first, data_num, data_direction;
+ u64 tag;
+ u32 exp_data_len, data_first, data_num, data_direction, prot_first;
unsigned out, in, i;
- int head, ret;
- u8 target;
+ int head, ret, data_niov, prot_niov, prot_bytes;
+ size_t req_size;
+ u16 lun;
+ u8 *target, *lunp, task_attr;
+ bool hdr_pi;
+ void *req, *cdb;
mutex_lock(&vq->mutex);
/*
@@ -945,7 +993,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
for (;;) {
- head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+ head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &out, &in,
NULL, NULL);
pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
@@ -962,7 +1010,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
break;
}
-/* FIXME: BIDI operation */
+ /* FIXME: BIDI operation */
if (out == 1 && in == 1) {
data_direction = DMA_NONE;
data_first = 0;
@@ -992,29 +1040,38 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
break;
}
- if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
- vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
- " bytes\n", vq->iov[0].iov_len);
+ if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
+ req = &v_req_pi;
+ lunp = &v_req_pi.lun[0];
+ target = &v_req_pi.lun[1];
+ req_size = sizeof(v_req_pi);
+ hdr_pi = true;
+ } else {
+ req = &v_req;
+ lunp = &v_req.lun[0];
+ target = &v_req.lun[1];
+ req_size = sizeof(v_req);
+ hdr_pi = false;
+ }
+
+ if (unlikely(vq->iov[0].iov_len < req_size)) {
+ pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
+ req_size, vq->iov[0].iov_len);
break;
}
- pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
- " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
- ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
- sizeof(v_req));
+ ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
if (unlikely(ret)) {
vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
break;
}
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
- if (unlikely(v_req.lun[0] != 1)) {
+ if (unlikely(*lunp != 1)) {
vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
}
- /* Extract the tpgt */
- target = v_req.lun[1];
- tpg = ACCESS_ONCE(vs_tpg[target]);
+ tpg = ACCESS_ONCE(vs_tpg[*target]);
/* Target does not exist, fail the request */
if (unlikely(!tpg)) {
@@ -1022,17 +1079,79 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
continue;
}
+ data_niov = data_num;
+ prot_niov = prot_first = prot_bytes = 0;
+ /*
+ * Determine if any protection information iovecs are preceeding
+ * the actual data payload, and adjust data_first + data_niov
+ * values accordingly for vhost_scsi_map_iov_to_sgl() below.
+ *
+ * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
+ */
+ if (hdr_pi) {
+ if (v_req_pi.pi_bytesout) {
+ if (data_direction != DMA_TO_DEVICE) {
+ vq_err(vq, "Received non zero do_pi_niov"
+ ", but wrong data_direction\n");
+ goto err_cmd;
+ }
+ prot_bytes = v_req_pi.pi_bytesout;
+ } else if (v_req_pi.pi_bytesin) {
+ if (data_direction != DMA_FROM_DEVICE) {
+ vq_err(vq, "Received non zero di_pi_niov"
+ ", but wrong data_direction\n");
+ goto err_cmd;
+ }
+ prot_bytes = v_req_pi.pi_bytesin;
+ }
+ if (prot_bytes) {
+ int tmp = 0;
+
+ for (i = 0; i < data_num; i++) {
+ tmp += vq->iov[data_first + i].iov_len;
+ prot_niov++;
+ if (tmp >= prot_bytes)
+ break;
+ }
+ prot_first = data_first;
+ data_first += prot_niov;
+ data_niov = data_num - prot_niov;
+ }
+ tag = v_req_pi.tag;
+ task_attr = v_req_pi.task_attr;
+ cdb = &v_req_pi.cdb[0];
+ lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
+ } else {
+ tag = v_req.tag;
+ task_attr = v_req.task_attr;
+ cdb = &v_req.cdb[0];
+ lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
+ }
exp_data_len = 0;
- for (i = 0; i < data_num; i++)
+ for (i = 0; i < data_niov; i++)
exp_data_len += vq->iov[data_first + i].iov_len;
+ /*
+ * Check that the recieved CDB size does not exceeded our
+ * hardcoded max for vhost-scsi
+ *
+ * TODO what if cdb was too small for varlen cdb header?
+ */
+ if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
+ vq_err(vq, "Received SCSI CDB with command_size: %d that"
+ " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+ scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
+ goto err_cmd;
+ }
- cmd = vhost_scsi_get_tag(vq, tpg, &v_req,
- exp_data_len, data_direction);
+ cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
+ exp_data_len + prot_bytes,
+ data_direction);
if (IS_ERR(cmd)) {
vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
PTR_ERR(cmd));
goto err_cmd;
}
+
pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
": %d\n", cmd, exp_data_len, data_direction);
@@ -1040,40 +1159,28 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
cmd->tvc_vq = vq;
cmd->tvc_resp = vq->iov[out].iov_base;
- /*
- * Copy in the recieved CDB descriptor into cmd->tvc_cdb
- * that will be used by tcm_vhost_new_cmd_map() and down into
- * target_setup_cmd_from_cdb()
- */
- memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
- /*
- * Check that the recieved CDB size does not exceeded our
- * hardcoded max for tcm_vhost
- */
- /* TODO what if cdb was too small for varlen cdb header? */
- if (unlikely(scsi_command_size(cmd->tvc_cdb) >
- TCM_VHOST_MAX_CDB_SIZE)) {
- vq_err(vq, "Received SCSI CDB with command_size: %d that"
- " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
- scsi_command_size(cmd->tvc_cdb),
- TCM_VHOST_MAX_CDB_SIZE);
- goto err_free;
- }
- cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
-
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
cmd->tvc_cdb[0], cmd->tvc_lun);
+ if (prot_niov) {
+ ret = vhost_scsi_map_iov_to_prot(cmd,
+ &vq->iov[prot_first], prot_niov,
+ data_direction == DMA_FROM_DEVICE);
+ if (unlikely(ret)) {
+ vq_err(vq, "Failed to map iov to"
+ " prot_sgl\n");
+ goto err_free;
+ }
+ }
if (data_direction != DMA_NONE) {
ret = vhost_scsi_map_iov_to_sgl(cmd,
- &vq->iov[data_first], data_num,
+ &vq->iov[data_first], data_niov,
data_direction == DMA_FROM_DEVICE);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
goto err_free;
}
}
-
/*
* Save the descriptor from vhost_get_vq_desc() to be used to
* complete the virtio-scsi request in TCM callback context via
@@ -1373,6 +1480,9 @@ err_dev:
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
{
+ struct vhost_virtqueue *vq;
+ int i;
+
if (features & ~VHOST_SCSI_FEATURES)
return -EOPNOTSUPP;
@@ -1382,21 +1492,17 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
mutex_unlock(&vs->dev.mutex);
return -EFAULT;
}
- vs->dev.acked_features = features;
- smp_wmb();
- vhost_scsi_flush(vs);
+
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ vq = &vs->vqs[i].vq;
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
+ }
mutex_unlock(&vs->dev.mutex);
return 0;
}
-static void vhost_scsi_free(struct vhost_scsi *vs)
-{
- if (is_vmalloc_addr(vs))
- vfree(vs);
- else
- kfree(vs);
-}
-
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *vs;
@@ -1436,7 +1542,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
return 0;
err_vqs:
- vhost_scsi_free(vs);
+ kvfree(vs);
err_vs:
return r;
}
@@ -1455,7 +1561,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
vhost_scsi_flush(vs);
kfree(vs->dev.vqs);
- vhost_scsi_free(vs);
+ kvfree(vs);
return 0;
}
@@ -1591,10 +1697,6 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
return;
mutex_lock(&vs->dev.mutex);
- if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
- mutex_unlock(&vs->dev.mutex);
- return;
- }
if (plug)
reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
@@ -1603,8 +1705,9 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
mutex_lock(&vq->mutex);
- tcm_vhost_send_evt(vs, tpg, lun,
- VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
+ if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
+ tcm_vhost_send_evt(vs, tpg, lun,
+ VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
mutex_unlock(&vq->mutex);
mutex_unlock(&vs->dev.mutex);
}
@@ -1712,6 +1815,7 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
kfree(tv_cmd->tvc_sgl);
+ kfree(tv_cmd->tvc_prot_sgl);
kfree(tv_cmd->tvc_upages);
}
}
@@ -1746,7 +1850,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
tv_nexus->tvn_se_sess = transport_init_session_tags(
TCM_VHOST_DEFAULT_TAGS,
sizeof(struct tcm_vhost_cmd),
- TARGET_PROT_NORMAL);
+ TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tv_nexus);
@@ -1765,12 +1869,20 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
}
tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
- TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL);
+ TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
if (!tv_cmd->tvc_upages) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_upages\n");
goto out;
}
+
+ tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
+ TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
+ if (!tv_cmd->tvc_prot_sgl) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
+ goto out;
+ }
}
/*
* Since we are running in 'demo mode' this call with generate a
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index c2a54fbf7f99..d9c501eaa6c3 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -53,7 +53,7 @@ static void handle_vq(struct vhost_test *n)
vhost_disable_notify(&n->dev, vq);
for (;;) {
- head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
+ head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
NULL, NULL);
@@ -241,15 +241,18 @@ done:
static int vhost_test_set_features(struct vhost_test *n, u64 features)
{
+ struct vhost_virtqueue *vq;
+
mutex_lock(&n->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&n->dev)) {
mutex_unlock(&n->dev.mutex);
return -EFAULT;
}
- n->dev.acked_features = features;
- smp_wmb();
- vhost_test_flush(n);
+ vq = &n->vqs[VHOST_TEST_VQ];
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
mutex_unlock(&n->dev.mutex);
return 0;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 78987e481bc6..c90f4374442a 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -18,7 +18,6 @@
#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
-#include <linux/rcupdate.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/highmem.h>
@@ -191,6 +190,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->log_used = false;
vq->log_addr = -1ull;
vq->private_data = NULL;
+ vq->acked_features = 0;
vq->log_base = NULL;
vq->error_ctx = NULL;
vq->error = NULL;
@@ -198,6 +198,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->call_ctx = NULL;
vq->call = NULL;
vq->log_ctx = NULL;
+ vq->memory = NULL;
}
static int vhost_worker(void *data)
@@ -415,11 +416,18 @@ EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
{
+ int i;
+
vhost_dev_cleanup(dev, true);
/* Restore memory to default empty mapping. */
memory->nregions = 0;
- RCU_INIT_POINTER(dev->memory, memory);
+ dev->memory = memory;
+ /* We don't need VQ locks below since vhost_dev_cleanup makes sure
+ * VQs aren't running.
+ */
+ for (i = 0; i < dev->nvqs; ++i)
+ dev->vqs[i]->memory = memory;
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
@@ -462,10 +470,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kfree(rcu_dereference_protected(dev->memory,
- locked ==
- lockdep_is_held(&dev->mutex)));
- RCU_INIT_POINTER(dev->memory, NULL);
+ kfree(dev->memory);
+ dev->memory = NULL;
WARN_ON(!list_empty(&dev->work_list));
if (dev->worker) {
kthread_stop(dev->worker);
@@ -524,11 +530,13 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
for (i = 0; i < d->nvqs; ++i) {
int ok;
+ bool log;
+
mutex_lock(&d->vqs[i]->mutex);
+ log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
/* If ring is inactive, will check when it's enabled. */
if (d->vqs[i]->private_data)
- ok = vq_memory_access_ok(d->vqs[i]->log_base, mem,
- log_all);
+ ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log);
else
ok = 1;
mutex_unlock(&d->vqs[i]->mutex);
@@ -538,12 +546,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
return 1;
}
-static int vq_access_ok(struct vhost_dev *d, unsigned int num,
+static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used)
{
- size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail,
sizeof *avail + num * sizeof *avail->ring + s) &&
@@ -555,26 +563,19 @@ static int vq_access_ok(struct vhost_dev *d, unsigned int num,
/* Caller should have device mutex but not vq mutex */
int vhost_log_access_ok(struct vhost_dev *dev)
{
- struct vhost_memory *mp;
-
- mp = rcu_dereference_protected(dev->memory,
- lockdep_is_held(&dev->mutex));
- return memory_access_ok(dev, mp, 1);
+ return memory_access_ok(dev, dev->memory, 1);
}
EXPORT_SYMBOL_GPL(vhost_log_access_ok);
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
-static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
+static int vq_log_access_ok(struct vhost_virtqueue *vq,
void __user *log_base)
{
- struct vhost_memory *mp;
- size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
- mp = rcu_dereference_protected(vq->dev->memory,
- lockdep_is_held(&vq->mutex));
- return vq_memory_access_ok(log_base, mp,
- vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
+ return vq_memory_access_ok(log_base, vq->memory,
+ vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
vq->num * sizeof *vq->used->ring + s));
@@ -584,8 +585,8 @@ static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
- return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
- vq_log_access_ok(vq->dev, vq, vq->log_base);
+ return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
+ vq_log_access_ok(vq, vq->log_base);
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
@@ -593,6 +594,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem, *oldmem;
unsigned long size = offsetof(struct vhost_memory, regions);
+ int i;
if (copy_from_user(&mem, m, size))
return -EFAULT;
@@ -611,15 +613,19 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EFAULT;
}
- if (!memory_access_ok(d, newmem,
- vhost_has_feature(d, VHOST_F_LOG_ALL))) {
+ if (!memory_access_ok(d, newmem, 0)) {
kfree(newmem);
return -EFAULT;
}
- oldmem = rcu_dereference_protected(d->memory,
- lockdep_is_held(&d->mutex));
- rcu_assign_pointer(d->memory, newmem);
- synchronize_rcu();
+ oldmem = d->memory;
+ d->memory = newmem;
+
+ /* All memory accesses are done under some VQ mutex. */
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i]->mutex);
+ d->vqs[i]->memory = newmem;
+ mutex_unlock(&d->vqs[i]->mutex);
+ }
kfree(oldmem);
return 0;
}
@@ -718,7 +724,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
* If it is not, we don't as size might not have been setup.
* We will verify when backend is configured. */
if (vq->private_data) {
- if (!vq_access_ok(d, vq->num,
+ if (!vq_access_ok(vq, vq->num,
(void __user *)(unsigned long)a.desc_user_addr,
(void __user *)(unsigned long)a.avail_user_addr,
(void __user *)(unsigned long)a.used_user_addr)) {
@@ -858,7 +864,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
vq = d->vqs[i];
mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */
- if (vq->private_data && !vq_log_access_ok(d, vq, base))
+ if (vq->private_data && !vq_log_access_ok(vq, base))
r = -EFAULT;
else
vq->log_base = base;
@@ -1044,7 +1050,7 @@ int vhost_init_used(struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_init_used);
-static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
+static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size)
{
const struct vhost_memory_region *reg;
@@ -1053,9 +1059,7 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
u64 s = 0;
int ret = 0;
- rcu_read_lock();
-
- mem = rcu_dereference(dev->memory);
+ mem = vq->memory;
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
@@ -1077,7 +1081,6 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
++ret;
}
- rcu_read_unlock();
return ret;
}
@@ -1102,7 +1105,7 @@ static unsigned next_desc(struct vring_desc *desc)
return next;
}
-static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+static int get_indirect(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num,
@@ -1121,7 +1124,7 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
return -EINVAL;
}
- ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
+ ret = translate_desc(vq, indirect->addr, indirect->len, vq->indirect,
UIO_MAXIOV);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d in indirect.\n", ret);
@@ -1161,7 +1164,7 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
return -EINVAL;
}
- ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
+ ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
iov_size - iov_count);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d indirect idx %d\n",
@@ -1198,7 +1201,7 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
* This function returns the descriptor number found, or vq->num (which is
* never a valid descriptor number) if none was found. A negative code is
* returned on error. */
-int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num)
@@ -1272,7 +1275,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
return -EFAULT;
}
if (desc.flags & VRING_DESC_F_INDIRECT) {
- ret = get_indirect(dev, vq, iov, iov_size,
+ ret = get_indirect(vq, iov, iov_size,
out_num, in_num,
log, log_num, &desc);
if (unlikely(ret < 0)) {
@@ -1283,7 +1286,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
continue;
}
- ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
+ ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
iov_size - iov_count);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d descriptor idx %d\n",
@@ -1426,11 +1429,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
* interrupts. */
smp_mb();
- if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx))
return true;
- if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__u16 flags;
if (__get_user(flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
@@ -1491,7 +1494,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
- if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r) {
vq_err(vq, "Failed to enable notification at %p: %d\n",
@@ -1528,7 +1531,7 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY;
- if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r)
vq_err(vq, "Failed to enable notification at %p: %d\n",
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 35eeb2a1bada..3eda654b8f5a 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -104,20 +104,18 @@ struct vhost_virtqueue {
struct iovec *indirect;
struct vring_used_elem *heads;
/* Protected by virtqueue mutex. */
+ struct vhost_memory *memory;
void *private_data;
+ unsigned acked_features;
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
};
struct vhost_dev {
- /* Readers use RCU to access memory table pointer
- * log base pointer and features.
- * Writers use mutex below.*/
- struct vhost_memory __rcu *memory;
+ struct vhost_memory *memory;
struct mm_struct *mm;
struct mutex mutex;
- unsigned acked_features;
struct vhost_virtqueue **vqs;
int nvqs;
struct file *log_file;
@@ -140,7 +138,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
int vhost_vq_access_ok(struct vhost_virtqueue *vq);
int vhost_log_access_ok(struct vhost_dev *);
-int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
+int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num);
@@ -174,13 +172,8 @@ enum {
(1ULL << VHOST_F_LOG_ALL),
};
-static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
+static inline int vhost_has_feature(struct vhost_virtqueue *vq, int bit)
{
- unsigned acked_features;
-
- /* TODO: check that we are running from vhost_worker or dev mutex is
- * held? */
- acked_features = rcu_dereference_index_check(dev->acked_features, 1);
- return acked_features & (1 << bit);
+ return vq->acked_features & (1 << bit);
}
#endif
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index c7b4f0f927b1..8bf495ffb020 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -20,6 +20,7 @@ source "drivers/char/agp/Kconfig"
source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/host1x/Kconfig"
+source "drivers/gpu/ipu-v3/Kconfig"
menu "Direct Rendering Manager"
source "drivers/gpu/drm/Kconfig"
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 5a3eb2ecb525..5d449059a556 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -29,7 +29,7 @@ if LCD_CLASS_DEVICE
config LCD_CORGI
tristate "LCD Panel support for SHARP corgi/spitz model"
- depends on SPI_MASTER && PXA_SHARPSL
+ depends on SPI_MASTER && PXA_SHARPSL && BACKLIGHT_CLASS_DEVICE
help
Say y here to support the LCD panels usually found on SHARP
corgi (C7x0) and spitz (Cxx00) models.
@@ -370,7 +370,7 @@ config BACKLIGHT_AAT2870
config BACKLIGHT_LM3630A
tristate "Backlight Driver for LM3630A"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
+ depends on BACKLIGHT_CLASS_DEVICE && I2C && PWM
select REGMAP_I2C
help
This supports TI LM3630A Backlight Driver
@@ -386,14 +386,14 @@ config BACKLIGHT_LM3639
config BACKLIGHT_LP855X
tristate "Backlight driver for TI LP855X"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
+ depends on BACKLIGHT_CLASS_DEVICE && I2C && PWM
help
This supports TI LP8550, LP8551, LP8552, LP8553, LP8555, LP8556 and
LP8557 backlight driver.
config BACKLIGHT_LP8788
tristate "Backlight driver for TI LP8788 MFD"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_LP8788
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_LP8788 && PWM
help
This supports TI LP8788 backlight driver.
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index a2eba12e1cb7..1cea68848f1a 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -38,7 +38,8 @@ static int gpio_backlight_update_status(struct backlight_device *bl)
bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
brightness = 0;
- gpio_set_value(gbl->gpio, brightness ? gbl->active : !gbl->active);
+ gpio_set_value_cansleep(gbl->gpio,
+ brightness ? gbl->active : !gbl->active);
return 0;
}
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index b75201ff46f6..38ca88bc5c3e 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -10,8 +10,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/gpio/consumer.h>
#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -32,8 +32,7 @@ struct pwm_bl_data {
unsigned int *levels;
bool enabled;
struct regulator *power_supply;
- int enable_gpio;
- unsigned long enable_gpio_flags;
+ struct gpio_desc *enable_gpio;
unsigned int scale;
int (*notify)(struct device *,
int brightness);
@@ -54,12 +53,8 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb, int brightness)
if (err < 0)
dev_err(pb->dev, "failed to enable power supply\n");
- if (gpio_is_valid(pb->enable_gpio)) {
- if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW)
- gpio_set_value(pb->enable_gpio, 0);
- else
- gpio_set_value(pb->enable_gpio, 1);
- }
+ if (pb->enable_gpio)
+ gpiod_set_value(pb->enable_gpio, 1);
pwm_enable(pb->pwm);
pb->enabled = true;
@@ -73,12 +68,8 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
pwm_config(pb->pwm, 0, pb->period);
pwm_disable(pb->pwm);
- if (gpio_is_valid(pb->enable_gpio)) {
- if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW)
- gpio_set_value(pb->enable_gpio, 1);
- else
- gpio_set_value(pb->enable_gpio, 0);
- }
+ if (pb->enable_gpio)
+ gpiod_set_value(pb->enable_gpio, 0);
regulator_disable(pb->power_supply);
pb->enabled = false;
@@ -148,7 +139,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
struct platform_pwm_backlight_data *data)
{
struct device_node *node = dev->of_node;
- enum of_gpio_flags flags;
struct property *prop;
int length;
u32 value;
@@ -189,14 +179,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
data->max_brightness--;
}
- data->enable_gpio = of_get_named_gpio_flags(node, "enable-gpios", 0,
- &flags);
- if (data->enable_gpio == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (gpio_is_valid(data->enable_gpio) && (flags & OF_GPIO_ACTIVE_LOW))
- data->enable_gpio_flags |= PWM_BACKLIGHT_GPIO_ACTIVE_LOW;
-
return 0;
}
@@ -256,8 +238,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
} else
pb->scale = data->max_brightness;
- pb->enable_gpio = data->enable_gpio;
- pb->enable_gpio_flags = data->enable_gpio_flags;
pb->notify = data->notify;
pb->notify_after = data->notify_after;
pb->check_fb = data->check_fb;
@@ -265,26 +245,38 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->dev = &pdev->dev;
pb->enabled = false;
- if (gpio_is_valid(pb->enable_gpio)) {
- unsigned long flags;
-
- if (pb->enable_gpio_flags & PWM_BACKLIGHT_GPIO_ACTIVE_LOW)
- flags = GPIOF_OUT_INIT_HIGH;
+ pb->enable_gpio = devm_gpiod_get(&pdev->dev, "enable");
+ if (IS_ERR(pb->enable_gpio)) {
+ ret = PTR_ERR(pb->enable_gpio);
+ if (ret == -ENOENT)
+ pb->enable_gpio = NULL;
else
- flags = GPIOF_OUT_INIT_LOW;
+ goto err_alloc;
+ }
- ret = gpio_request_one(pb->enable_gpio, flags, "enable");
+ /*
+ * Compatibility fallback for drivers still using the integer GPIO
+ * platform data. Must go away soon.
+ */
+ if (!pb->enable_gpio && gpio_is_valid(data->enable_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, data->enable_gpio,
+ GPIOF_OUT_INIT_HIGH, "enable");
if (ret < 0) {
dev_err(&pdev->dev, "failed to request GPIO#%d: %d\n",
- pb->enable_gpio, ret);
+ data->enable_gpio, ret);
goto err_alloc;
}
+
+ pb->enable_gpio = gpio_to_desc(data->enable_gpio);
}
+ if (pb->enable_gpio)
+ gpiod_direction_output(pb->enable_gpio, 1);
+
pb->power_supply = devm_regulator_get(&pdev->dev, "power");
if (IS_ERR(pb->power_supply)) {
ret = PTR_ERR(pb->power_supply);
- goto err_gpio;
+ goto err_alloc;
}
pb->pwm = devm_pwm_get(&pdev->dev, NULL);
@@ -295,7 +287,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
if (IS_ERR(pb->pwm)) {
dev_err(&pdev->dev, "unable to request legacy PWM\n");
ret = PTR_ERR(pb->pwm);
- goto err_gpio;
+ goto err_alloc;
}
}
@@ -304,12 +296,15 @@ static int pwm_backlight_probe(struct platform_device *pdev)
/*
* The DT case will set the pwm_period_ns field to 0 and store the
* period, parsed from the DT, in the PWM device. For the non-DT case,
- * set the period from platform data.
+ * set the period from platform data if it has not already been set
+ * via the PWM lookup table.
*/
- if (data->pwm_period_ns > 0)
+ pb->period = pwm_get_period(pb->pwm);
+ if (!pb->period && (data->pwm_period_ns > 0)) {
+ pb->period = data->pwm_period_ns;
pwm_set_period(pb->pwm, data->pwm_period_ns);
+ }
- pb->period = pwm_get_period(pb->pwm);
pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale);
memset(&props, 0, sizeof(struct backlight_properties));
@@ -320,7 +315,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
ret = PTR_ERR(bl);
- goto err_gpio;
+ goto err_alloc;
}
if (data->dft_brightness > data->max_brightness) {
@@ -336,9 +331,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bl);
return 0;
-err_gpio:
- if (gpio_is_valid(pb->enable_gpio))
- gpio_free(pb->enable_gpio);
err_alloc:
if (data->exit)
data->exit(&pdev->dev);
@@ -359,6 +351,14 @@ static int pwm_backlight_remove(struct platform_device *pdev)
return 0;
}
+static void pwm_backlight_shutdown(struct platform_device *pdev)
+{
+ struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
+
+ pwm_backlight_power_off(pb);
+}
+
#ifdef CONFIG_PM_SLEEP
static int pwm_backlight_suspend(struct device *dev)
{
@@ -404,6 +404,7 @@ static struct platform_driver pwm_backlight_driver = {
},
.probe = pwm_backlight_probe,
.remove = pwm_backlight_remove,
+ .shutdown = pwm_backlight_shutdown,
};
module_platform_driver(pwm_backlight_driver);
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 510a1bcf76f1..2d6d48196c6d 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -703,7 +703,7 @@ static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
struct s6e63m0 *lcd = dev_get_drvdata(dev);
char temp[3];
- sprintf(temp, "%d\n", lcd->gamma_table_count);
+ sprintf(temp, "%u\n", lcd->gamma_table_count);
strcpy(buf, temp);
return strlen(buf);
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index b63860f7beab..40bec8d64b0a 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -77,3 +77,4 @@ const struct consw dummy_con = {
.con_set_palette = DUMMY,
.con_scrolldelta = DUMMY,
};
+EXPORT_SYMBOL_GPL(dummy_con);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index f267284b423b..6e6aa704fe84 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1441,5 +1441,6 @@ const struct consw vga_con = {
.con_build_attr = vgacon_build_attr,
.con_invert_region = vgacon_invert_region,
};
+EXPORT_SYMBOL(vga_con);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index e683b6ef9594..d36e830d6fc6 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1057,6 +1057,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
goto put_display_node;
}
+ INIT_LIST_HEAD(&pdata->pwr_gpios);
ret = -ENOMEM;
for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) {
gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio",
@@ -1082,6 +1083,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
dev_err(dev, "set direction output gpio %d failed\n", gpio);
goto put_display_node;
}
+ list_add(&og->list, &pdata->pwr_gpios);
}
if (is_gpio_power)
diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c
index a54f7f7d763b..8fe41caac38e 100644
--- a/drivers/video/fbdev/bfin_adv7393fb.c
+++ b/drivers/video/fbdev/bfin_adv7393fb.c
@@ -408,7 +408,7 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client,
/* Workaround "PPI Does Not Start Properly In Specific Mode" */
if (ANOMALY_05000400) {
ret = gpio_request_one(P_IDENT(P_PPI0_FS3), GPIOF_OUT_INIT_LOW,
- "PPI0_FS3")
+ "PPI0_FS3");
if (ret) {
dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
ret = -EBUSY;
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 7d44d669d5b6..43a0a52fc527 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -91,15 +91,6 @@ extern boot_infos_t *boot_infos;
#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
-#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp)))
-
-static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
-{
- u32 bpp = info->var.bits_per_pixel;
-
- return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
-}
-
/*
* Set a single color register. The values supplied are already
* rounded down to the hardware's capabilities (according to the
@@ -129,7 +120,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
mask <<= info->var.transp.offset;
value |= mask;
}
- pal[regno] = offb_cmap_byteswap(info, value);
+ pal[regno] = value;
return 0;
}
diff --git a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
index 99af9e88b2d8..2f0822ee3ff9 100644
--- a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
+++ b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
@@ -121,9 +121,11 @@ static void __init omapdss_add_to_list(struct device_node *node, bool root)
{
struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node),
GFP_KERNEL);
- n->node = node;
- n->root = root;
- list_add(&n->list, &dss_conv_list);
+ if (n) {
+ n->node = node;
+ n->root = root;
+ list_add(&n->list, &dss_conv_list);
+ }
}
static bool __init omapdss_list_contains(const struct device_node *node)
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 1501979099dc..c2c8eb668784 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1215,7 +1215,7 @@ static ssize_t sm501fb_crtsrc_store(struct device *dev,
}
/* Prepare the device_attr for registration with sysfs later */
-static DEVICE_ATTR(crt_src, 0666, sm501fb_crtsrc_show, sm501fb_crtsrc_store);
+static DEVICE_ATTR(crt_src, 0664, sm501fb_crtsrc_show, sm501fb_crtsrc_store);
/* sm501fb_show_regs
*
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index a8f2b280f796..a1134c3f6c11 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -474,8 +474,6 @@ static int vt8500lcd_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- kfree(fbi);
-
return 0;
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 1e443629f76d..4d08f45a9c29 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -865,4 +865,19 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_is_broken);
+/*
+ * This should prevent the device from being used, allowing drivers to
+ * recover. You may need to grab appropriate locks to flush.
+ */
+void virtio_break_device(struct virtio_device *dev)
+{
+ struct virtqueue *_vq;
+
+ list_for_each_entry(_vq, &dev->vqs, list) {
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ vq->broken = true;
+ }
+}
+EXPORT_SYMBOL_GPL(virtio_break_device);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 67b067a3e2ab..a5df5e89d456 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -66,7 +66,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
udelay(100);
}
- return !!(reg_val & MXC_W1_CONTROL_PST);
+ return !(reg_val & MXC_W1_CONTROL_PST);
}
/*
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 74ec8fc5cc03..76dd54122f76 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -272,7 +272,7 @@ config PNX4008_WATCHDOG
config IOP_WATCHDOG
tristate "IOP Watchdog"
- depends on PLAT_IOP
+ depends on ARCH_IOP13XX
select WATCHDOG_NOWAYOUT if (ARCH_IOP32X || ARCH_IOP33X)
help
Say Y here if to include support for the watchdog timer
@@ -378,6 +378,8 @@ config MAX63XX_WATCHDOG
config IMX2_WDT
tristate "IMX2+ Watchdog"
depends on ARCH_MXC
+ select REGMAP_MMIO
+ select WATCHDOG_CORE
help
This is the driver for the hardware watchdog
on the Freescale IMX2 and later processors.
@@ -663,6 +665,19 @@ config INTEL_SCU_WATCHDOG
To compile this driver as a module, choose M here.
+config INTEL_MID_WATCHDOG
+ tristate "Intel MID Watchdog Timer"
+ depends on X86_INTEL_MID
+ select WATCHDOG_CORE
+ ---help---
+ Watchdog timer driver built into the Intel SCU for Intel MID
+ Platforms.
+
+ This driver currently supports only the watchdog evolution
+ implementation in SCU, available for Merrifield generation.
+
+ To compile this driver as a module, choose M here.
+
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
@@ -835,7 +850,7 @@ config 60XX_WDT
config SBC8360_WDT
tristate "SBC8360 Watchdog Timer"
- depends on X86
+ depends on X86_32
---help---
This is the driver for the hardware watchdog on the SBC8360 Single
@@ -938,36 +953,6 @@ config W83627HF_WDT
Most people will say N.
-config W83697HF_WDT
- tristate "W83697HF/W83697HG Watchdog Timer"
- depends on X86
- ---help---
- This is the driver for the hardware watchdog on the W83697HF/HG
- chipset as used in Dedibox/VIA motherboards (and likely others).
- This watchdog simply watches your kernel to make sure it doesn't
- freeze, and if it does, it reboots your computer after a certain
- amount of time.
-
- To compile this driver as a module, choose M here: the
- module will be called w83697hf_wdt.
-
- Most people will say N.
-
-config W83697UG_WDT
- tristate "W83697UG/W83697UF Watchdog Timer"
- depends on X86
- ---help---
- This is the driver for the hardware watchdog on the W83697UG/UF
- chipset as used in MSI Fuzzy CX700 VIA motherboards (and likely others).
- This watchdog simply watches your kernel to make sure it doesn't
- freeze, and if it does, it reboots your computer after a certain
- amount of time.
-
- To compile this driver as a module, choose M here: the
- module will be called w83697ug_wdt.
-
- Most people will say N.
-
config W83877F_WDT
tristate "W83877F (EMACS) Watchdog Timer"
depends on X86
@@ -1295,14 +1280,17 @@ config WATCHDOG_RTAS
# S390 Architecture
-config ZVM_WATCHDOG
- tristate "z/VM Watchdog Timer"
+config DIAG288_WATCHDOG
+ tristate "System z diag288 Watchdog"
depends on S390
+ select WATCHDOG_CORE
help
IBM s/390 and zSeries machines running under z/VM 5.1 or later
provide a virtual watchdog timer to their guest that cause a
user define Control Program command to be executed after a
timeout.
+ LPAR provides a very similar interface. This driver handles
+ both.
To compile this driver as a module, choose M here. The module
will be called vmwatchdog.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 1b5f3d5efad5..468c3204c3b1 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -107,13 +107,12 @@ obj-$(CONFIG_SMSC_SCH311X_WDT) += sch311x_wdt.o
obj-$(CONFIG_SMSC37B787_WDT) += smsc37b787_wdt.o
obj-$(CONFIG_VIA_WDT) += via_wdt.o
obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o
-obj-$(CONFIG_W83697HF_WDT) += w83697hf_wdt.o
-obj-$(CONFIG_W83697UG_WDT) += w83697ug_wdt.o
obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
+obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
# M32R Architecture
@@ -154,6 +153,7 @@ obj-$(CONFIG_MEN_A21_WDT) += mena21_wdt.o
obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
# S390 Architecture
+obj-$(CONFIG_DIAG288_WATCHDOG) += diag288_wdt.o
# SUPERH (sh + sh64) Architecture
obj-$(CONFIG_SH_WDT) += shwdt.o
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 399c3fddecf6..41ac4660fb89 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -20,6 +20,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/io.h>
@@ -90,6 +91,15 @@ static inline void ath79_wdt_keepalive(void)
static inline void ath79_wdt_enable(void)
{
ath79_wdt_keepalive();
+
+ /*
+ * Updating the TIMER register requires a few microseconds
+ * on the AR934x SoCs at least. Use a small delay to ensure
+ * that the TIMER register is updated within the hardware
+ * before enabling the watchdog.
+ */
+ udelay(2);
+
ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_FCR);
/* flush write */
ath79_wdt_rr(WDOG_REG_CTRL);
@@ -255,7 +265,7 @@ static int ath79_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt_clk))
return PTR_ERR(wdt_clk);
- err = clk_enable(wdt_clk);
+ err = clk_prepare_enable(wdt_clk);
if (err)
return err;
@@ -286,14 +296,14 @@ static int ath79_wdt_probe(struct platform_device *pdev)
return 0;
err_clk_disable:
- clk_disable(wdt_clk);
+ clk_disable_unprepare(wdt_clk);
return err;
}
static int ath79_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&ath79_wdt_miscdev);
- clk_disable(wdt_clk);
+ clk_disable_unprepare(wdt_clk);
return 0;
}
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index a8dbceb32914..08a785398eac 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -41,6 +41,28 @@ u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
#define WDTP_MASK (TCR_WP_MASK)
#endif
+/* Checks wdt=x and wdt_period=xx command-line option */
+notrace int __init early_parse_wdt(char *p)
+{
+ if (p && strncmp(p, "0", 1) != 0)
+ booke_wdt_enabled = 1;
+
+ return 0;
+}
+early_param("wdt", early_parse_wdt);
+
+int __init early_parse_wdt_period(char *p)
+{
+ unsigned long ret;
+ if (p) {
+ if (!kstrtol(p, 0, &ret))
+ booke_wdt_period = ret;
+ }
+
+ return 0;
+}
+early_param("wdt_period", early_parse_wdt_period);
+
#ifdef CONFIG_PPC_FSL_BOOK3E
/* For the specified period, determine the number of seconds
@@ -103,17 +125,18 @@ static unsigned int sec_to_period(unsigned int secs)
static void __booke_wdt_set(void *data)
{
u32 val;
+ struct watchdog_device *wdog = data;
val = mfspr(SPRN_TCR);
val &= ~WDTP_MASK;
- val |= WDTP(booke_wdt_period);
+ val |= WDTP(sec_to_period(wdog->timeout));
mtspr(SPRN_TCR, val);
}
-static void booke_wdt_set(void)
+static void booke_wdt_set(void *data)
{
- on_each_cpu(__booke_wdt_set, NULL, 0);
+ on_each_cpu(__booke_wdt_set, data, 0);
}
static void __booke_wdt_ping(void *data)
@@ -131,12 +154,13 @@ static int booke_wdt_ping(struct watchdog_device *wdog)
static void __booke_wdt_enable(void *data)
{
u32 val;
+ struct watchdog_device *wdog = data;
/* clear status before enabling watchdog */
__booke_wdt_ping(NULL);
val = mfspr(SPRN_TCR);
val &= ~WDTP_MASK;
- val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period));
+ val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(sec_to_period(wdog->timeout)));
mtspr(SPRN_TCR, val);
}
@@ -162,25 +186,17 @@ static void __booke_wdt_disable(void *data)
}
-static void __booke_wdt_start(struct watchdog_device *wdog)
+static int booke_wdt_start(struct watchdog_device *wdog)
{
- on_each_cpu(__booke_wdt_enable, NULL, 0);
+ on_each_cpu(__booke_wdt_enable, wdog, 0);
pr_debug("watchdog enabled (timeout = %u sec)\n", wdog->timeout);
-}
-static int booke_wdt_start(struct watchdog_device *wdog)
-{
- if (booke_wdt_enabled == 0) {
- booke_wdt_enabled = 1;
- __booke_wdt_start(wdog);
- }
return 0;
}
static int booke_wdt_stop(struct watchdog_device *wdog)
{
on_each_cpu(__booke_wdt_disable, NULL, 0);
- booke_wdt_enabled = 0;
pr_debug("watchdog disabled\n");
return 0;
@@ -191,9 +207,8 @@ static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev,
{
if (timeout > MAX_WDT_TIMEOUT)
return -EINVAL;
- booke_wdt_period = sec_to_period(timeout);
wdt_dev->timeout = timeout;
- booke_wdt_set();
+ booke_wdt_set(wdt_dev);
return 0;
}
@@ -231,10 +246,10 @@ static int __init booke_wdt_init(void)
pr_info("powerpc book-e watchdog driver loaded\n");
booke_wdt_info.firmware_version = cur_cpu_spec->pvr_value;
booke_wdt_set_timeout(&booke_wdt_dev,
- period_to_sec(CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT));
+ period_to_sec(booke_wdt_period));
watchdog_set_nowayout(&booke_wdt_dev, nowayout);
if (booke_wdt_enabled)
- __booke_wdt_start(&booke_wdt_dev);
+ booke_wdt_start(&booke_wdt_dev);
ret = watchdog_register_device(&booke_wdt_dev);
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
new file mode 100644
index 000000000000..429494b6c822
--- /dev/null
+++ b/drivers/watchdog/diag288_wdt.c
@@ -0,0 +1,316 @@
+/*
+ * Watchdog driver for z/VM and LPAR using the diag 288 interface.
+ *
+ * Under z/VM, expiration of the watchdog will send a "system restart" command
+ * to CP.
+ *
+ * The command can be altered using the module parameter "cmd". This is
+ * not recommended because it's only supported on z/VM but not whith LPAR.
+ *
+ * On LPAR, the watchdog will always trigger a system restart. the module
+ * paramter cmd is meaningless here.
+ *
+ *
+ * Copyright IBM Corp. 2004, 2013
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Philipp Hachtmann (phacht@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "diag288_wdt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/suspend.h>
+#include <asm/ebcdic.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#define MAX_CMDLEN 240
+#define DEFAULT_CMD "SYSTEM RESTART"
+
+#define MIN_INTERVAL 15 /* Minimal time supported by diag88 */
+#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */
+
+#define WDT_DEFAULT_TIMEOUT 30
+
+/* Function codes - init, change, cancel */
+#define WDT_FUNC_INIT 0
+#define WDT_FUNC_CHANGE 1
+#define WDT_FUNC_CANCEL 2
+#define WDT_FUNC_CONCEAL 0x80000000
+
+/* Action codes for LPAR watchdog */
+#define LPARWDT_RESTART 0
+
+static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD;
+static bool conceal_on;
+static bool nowayout_info = WATCHDOG_NOWAYOUT;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_AUTHOR("Philipp Hachtmann <phacht@de.ibm.com>");
+
+MODULE_DESCRIPTION("System z diag288 Watchdog Timer");
+
+module_param_string(cmd, wdt_cmd, MAX_CMDLEN, 0644);
+MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers (z/VM only)");
+
+module_param_named(conceal, conceal_on, bool, 0644);
+MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog is active (z/VM only)");
+
+module_param_named(nowayout, nowayout_info, bool, 0444);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = CONFIG_WATCHDOG_NOWAYOUT)");
+
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("vmwatchdog");
+
+static int __diag288(unsigned int func, unsigned int timeout,
+ unsigned long action, unsigned int len)
+{
+ register unsigned long __func asm("2") = func;
+ register unsigned long __timeout asm("3") = timeout;
+ register unsigned long __action asm("4") = action;
+ register unsigned long __len asm("5") = len;
+ int err;
+
+ err = -EINVAL;
+ asm volatile(
+ " diag %1, %3, 0x288\n"
+ "0: la %0, 0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (err) : "d"(__func), "d"(__timeout),
+ "d"(__action), "d"(__len) : "1", "cc");
+ return err;
+}
+
+static int __diag288_vm(unsigned int func, unsigned int timeout,
+ char *cmd, size_t len)
+{
+ return __diag288(func, timeout, virt_to_phys(cmd), len);
+}
+
+static int __diag288_lpar(unsigned int func, unsigned int timeout,
+ unsigned long action)
+{
+ return __diag288(func, timeout, action, 0);
+}
+
+static int wdt_start(struct watchdog_device *dev)
+{
+ char *ebc_cmd;
+ size_t len;
+ int ret;
+ unsigned int func;
+
+ ret = -ENODEV;
+
+ if (MACHINE_IS_VM) {
+ ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
+ if (!ebc_cmd)
+ return -ENOMEM;
+ len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
+ ASCEBC(ebc_cmd, MAX_CMDLEN);
+ EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
+
+ func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
+ : WDT_FUNC_INIT;
+ ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
+ WARN_ON(ret != 0);
+ kfree(ebc_cmd);
+ }
+
+ if (MACHINE_IS_LPAR) {
+ ret = __diag288_lpar(WDT_FUNC_INIT,
+ dev->timeout, LPARWDT_RESTART);
+ }
+
+ if (ret) {
+ pr_err("The watchdog cannot be activated\n");
+ return ret;
+ }
+ pr_info("The watchdog was activated\n");
+ return 0;
+}
+
+static int wdt_stop(struct watchdog_device *dev)
+{
+ int ret;
+
+ ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
+ pr_info("The watchdog was deactivated\n");
+ return ret;
+}
+
+static int wdt_ping(struct watchdog_device *dev)
+{
+ char *ebc_cmd;
+ size_t len;
+ int ret;
+ unsigned int func;
+
+ ret = -ENODEV;
+
+ if (MACHINE_IS_VM) {
+ ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
+ if (!ebc_cmd)
+ return -ENOMEM;
+ len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
+ ASCEBC(ebc_cmd, MAX_CMDLEN);
+ EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
+
+ /*
+ * It seems to be ok to z/VM to use the init function to
+ * retrigger the watchdog. On LPAR WDT_FUNC_CHANGE must
+ * be used when the watchdog is running.
+ */
+ func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
+ : WDT_FUNC_INIT;
+
+ ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
+ WARN_ON(ret != 0);
+ kfree(ebc_cmd);
+ }
+
+ if (MACHINE_IS_LPAR)
+ ret = __diag288_lpar(WDT_FUNC_CHANGE, dev->timeout, 0);
+
+ if (ret)
+ pr_err("The watchdog timer cannot be started or reset\n");
+ return ret;
+}
+
+static int wdt_set_timeout(struct watchdog_device * dev, unsigned int new_to)
+{
+ dev->timeout = new_to;
+ return wdt_ping(dev);
+}
+
+static struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_start,
+ .stop = wdt_stop,
+ .ping = wdt_ping,
+ .set_timeout = wdt_set_timeout,
+};
+
+static struct watchdog_info wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = "z Watchdog",
+};
+
+static struct watchdog_device wdt_dev = {
+ .parent = NULL,
+ .info = &wdt_info,
+ .ops = &wdt_ops,
+ .bootstatus = 0,
+ .timeout = WDT_DEFAULT_TIMEOUT,
+ .min_timeout = MIN_INTERVAL,
+ .max_timeout = MAX_INTERVAL,
+};
+
+/*
+ * It makes no sense to go into suspend while the watchdog is running.
+ * Depending on the memory size, the watchdog might trigger, while we
+ * are still saving the memory.
+ * We reuse the open flag to ensure that suspend and watchdog open are
+ * exclusive operations
+ */
+static int wdt_suspend(void)
+{
+ if (test_and_set_bit(WDOG_DEV_OPEN, &wdt_dev.status)) {
+ pr_err("Linux cannot be suspended while the watchdog is in use\n");
+ return notifier_from_errno(-EBUSY);
+ }
+ if (test_bit(WDOG_ACTIVE, &wdt_dev.status)) {
+ clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
+ pr_err("Linux cannot be suspended while the watchdog is in use\n");
+ return notifier_from_errno(-EBUSY);
+ }
+ return NOTIFY_DONE;
+}
+
+static int wdt_resume(void)
+{
+ clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
+ return NOTIFY_DONE;
+}
+
+static int wdt_power_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ switch (event) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ return wdt_resume();
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ return wdt_suspend();
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block wdt_power_notifier = {
+ .notifier_call = wdt_power_event,
+};
+
+static int __init diag288_init(void)
+{
+ int ret;
+ char ebc_begin[] = {
+ 194, 197, 199, 201, 213
+ };
+
+ watchdog_set_nowayout(&wdt_dev, nowayout_info);
+
+ if (MACHINE_IS_VM) {
+ pr_info("The watchdog device driver detected a z/VM environment\n");
+ if (__diag288_vm(WDT_FUNC_INIT, 15,
+ ebc_begin, sizeof(ebc_begin)) != 0) {
+ pr_err("The watchdog cannot be initialized\n");
+ return -EINVAL;
+ }
+ } else if (MACHINE_IS_LPAR) {
+ pr_info("The watchdog device driver detected an LPAR environment\n");
+ if (__diag288_lpar(WDT_FUNC_INIT, 30, LPARWDT_RESTART)) {
+ pr_err("The watchdog cannot be initialized\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("Linux runs in an environment that does not support the diag288 watchdog\n");
+ return -ENODEV;
+ }
+
+ if (__diag288_lpar(WDT_FUNC_CANCEL, 0, 0)) {
+ pr_err("The watchdog cannot be deactivated\n");
+ return -EINVAL;
+ }
+
+ ret = register_pm_notifier(&wdt_power_notifier);
+ if (ret)
+ return ret;
+
+ ret = watchdog_register_device(&wdt_dev);
+ if (ret)
+ unregister_pm_notifier(&wdt_power_notifier);
+
+ return ret;
+}
+
+static void __exit diag288_exit(void)
+{
+ watchdog_unregister_device(&wdt_dev);
+ unregister_pm_notifier(&wdt_power_notifier);
+}
+
+module_init(diag288_init);
+module_exit(diag288_exit);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index dd51d9539b33..9d4874f09948 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -21,19 +21,17 @@
* Halt on suspend: Manual Can be automatic
*/
+#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
-#include <linux/watchdog.h>
-#include <linux/clk.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
+#include <linux/regmap.h>
#include <linux/timer.h>
-#include <linux/jiffies.h>
+#include <linux/watchdog.h>
#define DRIVER_NAME "imx2-wdt"
@@ -55,19 +53,12 @@
#define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
-#define IMX2_WDT_STATUS_OPEN 0
-#define IMX2_WDT_STATUS_STARTED 1
-#define IMX2_WDT_EXPECT_CLOSE 2
-
-static struct {
+struct imx2_wdt_device {
struct clk *clk;
- void __iomem *base;
- unsigned timeout;
- unsigned long status;
+ struct regmap *regmap;
struct timer_list timer; /* Pings the watchdog when closed */
-} imx2_wdt;
-
-static struct miscdevice imx2_wdt_miscdev;
+ struct watchdog_device wdog;
+};
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
@@ -85,9 +76,12 @@ static const struct watchdog_info imx2_wdt_info = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
};
-static inline void imx2_wdt_setup(void)
+static inline void imx2_wdt_setup(struct watchdog_device *wdog)
{
- u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+ u32 val;
+
+ regmap_read(wdev->regmap, IMX2_WDT_WCR, &val);
/* Suspend timer in low power mode, write once-only */
val |= IMX2_WDT_WCR_WDZST;
@@ -98,227 +92,199 @@ static inline void imx2_wdt_setup(void)
/* Keep Watchdog Disabled */
val &= ~IMX2_WDT_WCR_WDE;
/* Set the watchdog's Time-Out value */
- val |= WDOG_SEC_TO_COUNT(imx2_wdt.timeout);
+ val |= WDOG_SEC_TO_COUNT(wdog->timeout);
- __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR);
+ regmap_write(wdev->regmap, IMX2_WDT_WCR, val);
/* enable the watchdog */
val |= IMX2_WDT_WCR_WDE;
- __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR);
+ regmap_write(wdev->regmap, IMX2_WDT_WCR, val);
}
-static inline void imx2_wdt_ping(void)
+static inline bool imx2_wdt_is_running(struct imx2_wdt_device *wdev)
{
- __raw_writew(IMX2_WDT_SEQ1, imx2_wdt.base + IMX2_WDT_WSR);
- __raw_writew(IMX2_WDT_SEQ2, imx2_wdt.base + IMX2_WDT_WSR);
-}
+ u32 val;
-static void imx2_wdt_timer_ping(unsigned long arg)
-{
- /* ping it every imx2_wdt.timeout / 2 seconds to prevent reboot */
- imx2_wdt_ping();
- mod_timer(&imx2_wdt.timer, jiffies + imx2_wdt.timeout * HZ / 2);
+ regmap_read(wdev->regmap, IMX2_WDT_WCR, &val);
+
+ return val & IMX2_WDT_WCR_WDE;
}
-static void imx2_wdt_start(void)
+static int imx2_wdt_ping(struct watchdog_device *wdog)
{
- if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
- /* at our first start we enable clock and do initialisations */
- clk_prepare_enable(imx2_wdt.clk);
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
- imx2_wdt_setup();
- } else /* delete the timer that pings the watchdog after close */
- del_timer_sync(&imx2_wdt.timer);
-
- /* Watchdog is enabled - time to reload the timeout value */
- imx2_wdt_ping();
+ regmap_write(wdev->regmap, IMX2_WDT_WSR, IMX2_WDT_SEQ1);
+ regmap_write(wdev->regmap, IMX2_WDT_WSR, IMX2_WDT_SEQ2);
+ return 0;
}
-static void imx2_wdt_stop(void)
+static void imx2_wdt_timer_ping(unsigned long arg)
{
- /* we don't need a clk_disable, it cannot be disabled once started.
- * We use a timer to ping the watchdog while /dev/watchdog is closed */
- imx2_wdt_timer_ping(0);
+ struct watchdog_device *wdog = (struct watchdog_device *)arg;
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+ /* ping it every wdog->timeout / 2 seconds to prevent reboot */
+ imx2_wdt_ping(wdog);
+ mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
}
-static void imx2_wdt_set_timeout(int new_timeout)
+static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int new_timeout)
{
- u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
- /* set the new timeout value in the WSR */
- val &= ~IMX2_WDT_WCR_WT;
- val |= WDOG_SEC_TO_COUNT(new_timeout);
- __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR);
+ regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT,
+ WDOG_SEC_TO_COUNT(new_timeout));
+ return 0;
}
-static int imx2_wdt_open(struct inode *inode, struct file *file)
+static int imx2_wdt_start(struct watchdog_device *wdog)
{
- if (test_and_set_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status))
- return -EBUSY;
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+ if (imx2_wdt_is_running(wdev)) {
+ /* delete the timer that pings the watchdog after close */
+ del_timer_sync(&wdev->timer);
+ imx2_wdt_set_timeout(wdog, wdog->timeout);
+ } else
+ imx2_wdt_setup(wdog);
- imx2_wdt_start();
- return nonseekable_open(inode, file);
+ return imx2_wdt_ping(wdog);
}
-static int imx2_wdt_close(struct inode *inode, struct file *file)
+static int imx2_wdt_stop(struct watchdog_device *wdog)
{
- if (test_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status) && !nowayout)
- imx2_wdt_stop();
- else {
- dev_crit(imx2_wdt_miscdev.parent,
- "Unexpected close: Expect reboot!\n");
- imx2_wdt_ping();
- }
-
- clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status);
- clear_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status);
+ /*
+ * We don't need a clk_disable, it cannot be disabled once started.
+ * We use a timer to ping the watchdog while /dev/watchdog is closed
+ */
+ imx2_wdt_timer_ping((unsigned long)wdog);
return 0;
}
-static long imx2_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static inline void imx2_wdt_ping_if_active(struct watchdog_device *wdog)
{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_value;
- u16 val;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &imx2_wdt_info,
- sizeof(struct watchdog_info)) ? -EFAULT : 0;
-
- case WDIOC_GETSTATUS:
- return put_user(0, p);
-
- case WDIOC_GETBOOTSTATUS:
- val = __raw_readw(imx2_wdt.base + IMX2_WDT_WRSR);
- new_value = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
- return put_user(new_value, p);
-
- case WDIOC_KEEPALIVE:
- imx2_wdt_ping();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_value, p))
- return -EFAULT;
- if ((new_value < 1) || (new_value > IMX2_WDT_MAX_TIME))
- return -EINVAL;
- imx2_wdt_set_timeout(new_value);
- imx2_wdt.timeout = new_value;
- imx2_wdt_ping();
-
- /* Fallthrough to return current value */
- case WDIOC_GETTIMEOUT:
- return put_user(imx2_wdt.timeout, p);
-
- default:
- return -ENOTTY;
- }
-}
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
-static ssize_t imx2_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- size_t i;
- char c;
-
- if (len == 0) /* Can we see this even ? */
- return 0;
-
- clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status);
- /* scan to see whether or not we got the magic character */
- for (i = 0; i != len; i++) {
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- set_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status);
+ if (imx2_wdt_is_running(wdev)) {
+ imx2_wdt_set_timeout(wdog, wdog->timeout);
+ imx2_wdt_timer_ping((unsigned long)wdog);
}
-
- imx2_wdt_ping();
- return len;
}
-static const struct file_operations imx2_wdt_fops = {
+static struct watchdog_ops imx2_wdt_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = imx2_wdt_ioctl,
- .open = imx2_wdt_open,
- .release = imx2_wdt_close,
- .write = imx2_wdt_write,
+ .start = imx2_wdt_start,
+ .stop = imx2_wdt_stop,
+ .ping = imx2_wdt_ping,
+ .set_timeout = imx2_wdt_set_timeout,
};
-static struct miscdevice imx2_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &imx2_wdt_fops,
+static struct regmap_config imx2_wdt_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x8,
};
static int __init imx2_wdt_probe(struct platform_device *pdev)
{
- int ret;
+ struct imx2_wdt_device *wdev;
+ struct watchdog_device *wdog;
struct resource *res;
+ void __iomem *base;
+ int ret;
+ u32 val;
+
+ wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
+ if (!wdev)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(imx2_wdt.base))
- return PTR_ERR(imx2_wdt.base);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ wdev->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
+ &imx2_wdt_regmap_config);
+ if (IS_ERR(wdev->regmap)) {
+ dev_err(&pdev->dev, "regmap init failed\n");
+ return PTR_ERR(wdev->regmap);
+ }
- imx2_wdt.clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(imx2_wdt.clk)) {
+ wdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(wdev->clk)) {
dev_err(&pdev->dev, "can't get Watchdog clock\n");
- return PTR_ERR(imx2_wdt.clk);
+ return PTR_ERR(wdev->clk);
}
- imx2_wdt.timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME);
- if (imx2_wdt.timeout != timeout)
- dev_warn(&pdev->dev, "Initial timeout out of range! "
- "Clamped from %u to %u\n", timeout, imx2_wdt.timeout);
+ wdog = &wdev->wdog;
+ wdog->info = &imx2_wdt_info;
+ wdog->ops = &imx2_wdt_ops;
+ wdog->min_timeout = 1;
+ wdog->max_timeout = IMX2_WDT_MAX_TIME;
- setup_timer(&imx2_wdt.timer, imx2_wdt_timer_ping, 0);
+ clk_prepare_enable(wdev->clk);
- imx2_wdt_miscdev.parent = &pdev->dev;
- ret = misc_register(&imx2_wdt_miscdev);
- if (ret)
- goto fail;
+ regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val);
+ wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
- dev_info(&pdev->dev,
- "IMX2+ Watchdog Timer enabled. timeout=%ds (nowayout=%d)\n",
- imx2_wdt.timeout, nowayout);
- return 0;
+ wdog->timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME);
+ if (wdog->timeout != timeout)
+ dev_warn(&pdev->dev, "Initial timeout out of range! Clamped from %u to %u\n",
+ timeout, wdog->timeout);
+
+ platform_set_drvdata(pdev, wdog);
+ watchdog_set_drvdata(wdog, wdev);
+ watchdog_set_nowayout(wdog, nowayout);
+ watchdog_init_timeout(wdog, timeout, &pdev->dev);
+
+ setup_timer(&wdev->timer, imx2_wdt_timer_ping, (unsigned long)wdog);
+
+ imx2_wdt_ping_if_active(wdog);
-fail:
- imx2_wdt_miscdev.parent = NULL;
- return ret;
+ ret = watchdog_register_device(wdog);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register watchdog device\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "timeout %d sec (nowayout=%d)\n",
+ wdog->timeout, nowayout);
+
+ return 0;
}
static int __exit imx2_wdt_remove(struct platform_device *pdev)
{
- misc_deregister(&imx2_wdt_miscdev);
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
- if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
- del_timer_sync(&imx2_wdt.timer);
+ watchdog_unregister_device(wdog);
- dev_crit(imx2_wdt_miscdev.parent,
- "Device removed: Expect reboot!\n");
+ if (imx2_wdt_is_running(wdev)) {
+ del_timer_sync(&wdev->timer);
+ imx2_wdt_ping(wdog);
+ dev_crit(&pdev->dev, "Device removed: Expect reboot!\n");
}
-
- imx2_wdt_miscdev.parent = NULL;
return 0;
}
static void imx2_wdt_shutdown(struct platform_device *pdev)
{
- if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
- /* we are running, we need to delete the timer but will give
- * max timeout before reboot will take place */
- del_timer_sync(&imx2_wdt.timer);
- imx2_wdt_set_timeout(IMX2_WDT_MAX_TIME);
- imx2_wdt_ping();
-
- dev_crit(imx2_wdt_miscdev.parent,
- "Device shutdown: Expect reboot!\n");
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+ if (imx2_wdt_is_running(wdev)) {
+ /*
+ * We are running, we need to delete the timer but will
+ * give max timeout before reboot will take place
+ */
+ del_timer_sync(&wdev->timer);
+ imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+ imx2_wdt_ping(wdog);
+ dev_crit(&pdev->dev, "Device shutdown: Expect reboot!\n");
}
}
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
new file mode 100644
index 000000000000..ca66e8e74635
--- /dev/null
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -0,0 +1,184 @@
+/*
+ * intel-mid_wdt: generic Intel MID SCU watchdog driver
+ *
+ * Platforms supported so far:
+ * - Merrifield only
+ *
+ * Copyright (C) 2014 Intel Corporation. All rights reserved.
+ * Contact: David Cohen <david.a.cohen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/nmi.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/platform_data/intel-mid_wdt.h>
+
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+
+#define IPC_WATCHDOG 0xf8
+
+#define MID_WDT_PRETIMEOUT 15
+#define MID_WDT_TIMEOUT_MIN (1 + MID_WDT_PRETIMEOUT)
+#define MID_WDT_TIMEOUT_MAX 170
+#define MID_WDT_DEFAULT_TIMEOUT 90
+
+/* SCU watchdog messages */
+enum {
+ SCU_WATCHDOG_START = 0,
+ SCU_WATCHDOG_STOP,
+ SCU_WATCHDOG_KEEPALIVE,
+};
+
+static inline int wdt_command(int sub, u32 *in, int inlen)
+{
+ return intel_scu_ipc_command(IPC_WATCHDOG, sub, in, inlen, NULL, 0);
+}
+
+static int wdt_start(struct watchdog_device *wd)
+{
+ int ret, in_size;
+ int timeout = wd->timeout;
+ struct ipc_wd_start {
+ u32 pretimeout;
+ u32 timeout;
+ } ipc_wd_start = { timeout - MID_WDT_PRETIMEOUT, timeout };
+
+ /*
+ * SCU expects the input size for watchdog IPC to
+ * be based on 4 bytes
+ */
+ in_size = DIV_ROUND_UP(sizeof(ipc_wd_start), 4);
+
+ ret = wdt_command(SCU_WATCHDOG_START, (u32 *)&ipc_wd_start, in_size);
+ if (ret) {
+ struct device *dev = watchdog_get_drvdata(wd);
+ dev_crit(dev, "error starting watchdog: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int wdt_ping(struct watchdog_device *wd)
+{
+ int ret;
+
+ ret = wdt_command(SCU_WATCHDOG_KEEPALIVE, NULL, 0);
+ if (ret) {
+ struct device *dev = watchdog_get_drvdata(wd);
+ dev_crit(dev, "Error executing keepalive: 0x%x\n", ret);
+ }
+
+ return ret;
+}
+
+static int wdt_stop(struct watchdog_device *wd)
+{
+ int ret;
+
+ ret = wdt_command(SCU_WATCHDOG_STOP, NULL, 0);
+ if (ret) {
+ struct device *dev = watchdog_get_drvdata(wd);
+ dev_crit(dev, "Error stopping watchdog: 0x%x\n", ret);
+ }
+
+ return ret;
+}
+
+static irqreturn_t mid_wdt_irq(int irq, void *dev_id)
+{
+ panic("Kernel Watchdog");
+
+ /* This code should not be reached */
+ return IRQ_HANDLED;
+}
+
+static const struct watchdog_info mid_wdt_info = {
+ .identity = "Intel MID SCU watchdog",
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+};
+
+static const struct watchdog_ops mid_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_start,
+ .stop = wdt_stop,
+ .ping = wdt_ping,
+};
+
+static int mid_wdt_probe(struct platform_device *pdev)
+{
+ struct watchdog_device *wdt_dev;
+ struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdata->probe) {
+ ret = pdata->probe(pdev);
+ if (ret)
+ return ret;
+ }
+
+ wdt_dev = devm_kzalloc(&pdev->dev, sizeof(*wdt_dev), GFP_KERNEL);
+ if (!wdt_dev)
+ return -ENOMEM;
+
+ wdt_dev->info = &mid_wdt_info;
+ wdt_dev->ops = &mid_wdt_ops;
+ wdt_dev->min_timeout = MID_WDT_TIMEOUT_MIN;
+ wdt_dev->max_timeout = MID_WDT_TIMEOUT_MAX;
+ wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT;
+
+ watchdog_set_drvdata(wdt_dev, &pdev->dev);
+ platform_set_drvdata(pdev, wdt_dev);
+
+ ret = devm_request_irq(&pdev->dev, pdata->irq, mid_wdt_irq,
+ IRQF_SHARED | IRQF_NO_SUSPEND, "watchdog",
+ wdt_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "error requesting warning irq %d\n",
+ pdata->irq);
+ return ret;
+ }
+
+ ret = watchdog_register_device(wdt_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "error registering watchdog device\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Intel MID watchdog device probed\n");
+
+ return 0;
+}
+
+static int mid_wdt_remove(struct platform_device *pdev)
+{
+ struct watchdog_device *wd = platform_get_drvdata(pdev);
+ watchdog_unregister_device(wd);
+ return 0;
+}
+
+static struct platform_driver mid_wdt_driver = {
+ .probe = mid_wdt_probe,
+ .remove = mid_wdt_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "intel_mid_wdt",
+ },
+};
+
+module_platform_driver(mid_wdt_driver);
+
+MODULE_AUTHOR("David Cohen <david.a.cohen@linux.intel.com>");
+MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 20dc73844737..d9c1a1601926 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -162,7 +162,7 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
kempld_get_mutex(pld);
stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
stage_cfg &= ~STAGE_CFG_PRESCALER_MASK;
- stage_cfg |= STAGE_CFG_SET_PRESCALER(prescaler);
+ stage_cfg |= STAGE_CFG_SET_PRESCALER(PRESCALER_21);
kempld_write8(pld, KEMPLD_WDT_STAGE_CFG(stage->id), stage_cfg);
kempld_write32(pld, KEMPLD_WDT_STAGE_TIMEOUT(stage->id),
stage_timeout);
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 57ccae8327ff..1e6e28df5d7b 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -225,7 +225,7 @@ static int xwdt_remove(struct platform_device *pdev)
}
/* Match table for of_platform binding */
-static struct of_device_id xwdt_of_match[] = {
+static const struct of_device_id xwdt_of_match[] = {
{ .compatible = "xlnx,xps-timebase-wdt-1.00.a", },
{ .compatible = "xlnx,xps-timebase-wdt-1.01.a", },
{},
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 9b3c41d18703..00d0741228fc 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -55,15 +55,19 @@ struct orion_watchdog_data {
int wdt_counter_offset;
int wdt_enable_bit;
int rstout_enable_bit;
+ int rstout_mask_bit;
int (*clock_init)(struct platform_device *,
struct orion_watchdog *);
+ int (*enabled)(struct orion_watchdog *);
int (*start)(struct watchdog_device *);
+ int (*stop)(struct watchdog_device *);
};
struct orion_watchdog {
struct watchdog_device wdt;
void __iomem *reg;
void __iomem *rstout;
+ void __iomem *rstout_mask;
unsigned long clk_rate;
struct clk *clk;
const struct orion_watchdog_data *data;
@@ -142,9 +146,35 @@ static int orion_wdt_ping(struct watchdog_device *wdt_dev)
return 0;
}
+static int armada375_start(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+ u32 reg;
+
+ /* Set watchdog duration */
+ writel(dev->clk_rate * wdt_dev->timeout,
+ dev->reg + dev->data->wdt_counter_offset);
+
+ /* Clear the watchdog expiration bit */
+ atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
+
+ /* Enable watchdog timer */
+ atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
+ dev->data->wdt_enable_bit);
+
+ /* Enable reset on watchdog */
+ reg = readl(dev->rstout);
+ reg |= dev->data->rstout_enable_bit;
+ writel(reg, dev->rstout);
+
+ atomic_io_modify(dev->rstout_mask, dev->data->rstout_mask_bit, 0);
+ return 0;
+}
+
static int armada370_start(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+ u32 reg;
/* Set watchdog duration */
writel(dev->clk_rate * wdt_dev->timeout,
@@ -157,8 +187,10 @@ static int armada370_start(struct watchdog_device *wdt_dev)
atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
dev->data->wdt_enable_bit);
- atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit,
- dev->data->rstout_enable_bit);
+ /* Enable reset on watchdog */
+ reg = readl(dev->rstout);
+ reg |= dev->data->rstout_enable_bit;
+ writel(reg, dev->rstout);
return 0;
}
@@ -189,7 +221,7 @@ static int orion_wdt_start(struct watchdog_device *wdt_dev)
return dev->data->start(wdt_dev);
}
-static int orion_wdt_stop(struct watchdog_device *wdt_dev)
+static int orion_stop(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
@@ -202,7 +234,48 @@ static int orion_wdt_stop(struct watchdog_device *wdt_dev)
return 0;
}
-static int orion_wdt_enabled(struct orion_watchdog *dev)
+static int armada375_stop(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+ u32 reg;
+
+ /* Disable reset on watchdog */
+ atomic_io_modify(dev->rstout_mask, dev->data->rstout_mask_bit,
+ dev->data->rstout_mask_bit);
+ reg = readl(dev->rstout);
+ reg &= ~dev->data->rstout_enable_bit;
+ writel(reg, dev->rstout);
+
+ /* Disable watchdog timer */
+ atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
+
+ return 0;
+}
+
+static int armada370_stop(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+ u32 reg;
+
+ /* Disable reset on watchdog */
+ reg = readl(dev->rstout);
+ reg &= ~dev->data->rstout_enable_bit;
+ writel(reg, dev->rstout);
+
+ /* Disable watchdog timer */
+ atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
+
+ return 0;
+}
+
+static int orion_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+
+ return dev->data->stop(wdt_dev);
+}
+
+static int orion_enabled(struct orion_watchdog *dev)
{
bool enabled, running;
@@ -212,6 +285,24 @@ static int orion_wdt_enabled(struct orion_watchdog *dev)
return enabled && running;
}
+static int armada375_enabled(struct orion_watchdog *dev)
+{
+ bool masked, enabled, running;
+
+ masked = readl(dev->rstout_mask) & dev->data->rstout_mask_bit;
+ enabled = readl(dev->rstout) & dev->data->rstout_enable_bit;
+ running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit;
+
+ return !masked && enabled && running;
+}
+
+static int orion_wdt_enabled(struct watchdog_device *wdt_dev)
+{
+ struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
+
+ return dev->data->enabled(dev);
+}
+
static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
@@ -262,10 +353,6 @@ static void __iomem *orion_wdt_ioremap_rstout(struct platform_device *pdev,
return devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- /* This workaround works only for "orion-wdt", DT-enabled */
- if (!of_device_is_compatible(pdev->dev.of_node, "marvell,orion-wdt"))
- return NULL;
-
rstout = internal_regs + ORION_RSTOUT_MASK_OFFSET;
WARN(1, FW_BUG "falling back to harcoded RSTOUT reg %pa\n", &rstout);
@@ -277,7 +364,9 @@ static const struct orion_watchdog_data orion_data = {
.wdt_enable_bit = BIT(4),
.wdt_counter_offset = 0x24,
.clock_init = orion_wdt_clock_init,
+ .enabled = orion_enabled,
.start = orion_start,
+ .stop = orion_stop,
};
static const struct orion_watchdog_data armada370_data = {
@@ -285,7 +374,9 @@ static const struct orion_watchdog_data armada370_data = {
.wdt_enable_bit = BIT(8),
.wdt_counter_offset = 0x34,
.clock_init = armada370_wdt_clock_init,
+ .enabled = orion_enabled,
.start = armada370_start,
+ .stop = armada370_stop,
};
static const struct orion_watchdog_data armadaxp_data = {
@@ -293,7 +384,31 @@ static const struct orion_watchdog_data armadaxp_data = {
.wdt_enable_bit = BIT(8),
.wdt_counter_offset = 0x34,
.clock_init = armadaxp_wdt_clock_init,
+ .enabled = orion_enabled,
.start = armada370_start,
+ .stop = armada370_stop,
+};
+
+static const struct orion_watchdog_data armada375_data = {
+ .rstout_enable_bit = BIT(8),
+ .rstout_mask_bit = BIT(10),
+ .wdt_enable_bit = BIT(8),
+ .wdt_counter_offset = 0x34,
+ .clock_init = armada370_wdt_clock_init,
+ .enabled = armada375_enabled,
+ .start = armada375_start,
+ .stop = armada375_stop,
+};
+
+static const struct orion_watchdog_data armada380_data = {
+ .rstout_enable_bit = BIT(8),
+ .rstout_mask_bit = BIT(10),
+ .wdt_enable_bit = BIT(8),
+ .wdt_counter_offset = 0x34,
+ .clock_init = armadaxp_wdt_clock_init,
+ .enabled = armada375_enabled,
+ .start = armada375_start,
+ .stop = armada375_stop,
};
static const struct of_device_id orion_wdt_of_match_table[] = {
@@ -309,16 +424,78 @@ static const struct of_device_id orion_wdt_of_match_table[] = {
.compatible = "marvell,armada-xp-wdt",
.data = &armadaxp_data,
},
+ {
+ .compatible = "marvell,armada-375-wdt",
+ .data = &armada375_data,
+ },
+ {
+ .compatible = "marvell,armada-380-wdt",
+ .data = &armada380_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table);
+static int orion_wdt_get_regs(struct platform_device *pdev,
+ struct orion_watchdog *dev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ dev->reg = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!dev->reg)
+ return -ENOMEM;
+
+ /* Each supported compatible has some RSTOUT register quirk */
+ if (of_device_is_compatible(node, "marvell,orion-wdt")) {
+
+ dev->rstout = orion_wdt_ioremap_rstout(pdev, res->start &
+ INTERNAL_REGS_MASK);
+ if (!dev->rstout)
+ return -ENODEV;
+
+ } else if (of_device_is_compatible(node, "marvell,armada-370-wdt") ||
+ of_device_is_compatible(node, "marvell,armada-xp-wdt")) {
+
+ /* Dedicated RSTOUT register, can be requested. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ dev->rstout = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->rstout))
+ return PTR_ERR(dev->rstout);
+
+ } else if (of_device_is_compatible(node, "marvell,armada-375-wdt") ||
+ of_device_is_compatible(node, "marvell,armada-380-wdt")) {
+
+ /* Dedicated RSTOUT register, can be requested. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ dev->rstout = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->rstout))
+ return PTR_ERR(dev->rstout);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res)
+ return -ENODEV;
+ dev->rstout_mask = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!dev->rstout_mask)
+ return -ENOMEM;
+
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int orion_wdt_probe(struct platform_device *pdev)
{
struct orion_watchdog *dev;
const struct of_device_id *match;
unsigned int wdt_max_duration; /* (seconds) */
- struct resource *res;
int ret, irq;
dev = devm_kzalloc(&pdev->dev, sizeof(struct orion_watchdog),
@@ -336,19 +513,9 @@ static int orion_wdt_probe(struct platform_device *pdev)
dev->wdt.min_timeout = 1;
dev->data = match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- dev->reg = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!dev->reg)
- return -ENOMEM;
-
- dev->rstout = orion_wdt_ioremap_rstout(pdev, res->start &
- INTERNAL_REGS_MASK);
- if (!dev->rstout)
- return -ENODEV;
+ ret = orion_wdt_get_regs(pdev, dev);
+ if (ret)
+ return ret;
ret = dev->data->clock_init(pdev, dev);
if (ret) {
@@ -371,7 +538,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
* removed and re-insterted, or if the bootloader explicitly
* set a running watchdog before booting the kernel.
*/
- if (!orion_wdt_enabled(dev))
+ if (!orion_wdt_enabled(&dev->wdt))
orion_wdt_stop(&dev->wdt);
/* Request the IRQ only after the watchdog is disabled */
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index d04d02b41c32..061756e36cf8 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -282,8 +282,6 @@ static int sh_wdt_probe(struct platform_device *pdev)
wdt->timer.data = (unsigned long)wdt;
wdt->timer.expires = next_ping_period(clock_division_ratio);
- platform_set_drvdata(pdev, wdt);
-
dev_info(&pdev->dev, "initialized.\n");
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 47629d268e0a..c1b03f4235b9 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -59,7 +59,6 @@
* @adev: amba device structure of wdt
* @status: current status of wdt
* @load_val: load value to be set for current timeout
- * @timeout: current programmed timeout
*/
struct sp805_wdt {
struct watchdog_device wdd;
@@ -68,7 +67,6 @@ struct sp805_wdt {
struct clk *clk;
struct amba_device *adev;
unsigned int load_val;
- unsigned int timeout;
};
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -98,7 +96,7 @@ static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
spin_lock(&wdt->lock);
wdt->load_val = load;
/* roundup timeout to closest positive integer value */
- wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
+ wdd->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
spin_unlock(&wdt->lock);
return 0;
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index cd00a7836cdc..693b9d2c6e39 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -57,17 +57,17 @@ struct sunxi_wdt_dev {
*/
static const int wdt_timeout_map[] = {
- [1] = 0b0001, /* 1s */
- [2] = 0b0010, /* 2s */
- [3] = 0b0011, /* 3s */
- [4] = 0b0100, /* 4s */
- [5] = 0b0101, /* 5s */
- [6] = 0b0110, /* 6s */
- [8] = 0b0111, /* 8s */
- [10] = 0b1000, /* 10s */
- [12] = 0b1001, /* 12s */
- [14] = 0b1010, /* 14s */
- [16] = 0b1011, /* 16s */
+ [1] = 0x1, /* 1s */
+ [2] = 0x2, /* 2s */
+ [3] = 0x3, /* 3s */
+ [4] = 0x4, /* 4s */
+ [5] = 0x5, /* 5s */
+ [6] = 0x6, /* 6s */
+ [8] = 0x7, /* 8s */
+ [10] = 0x8, /* 10s */
+ [12] = 0x9, /* 12s */
+ [14] = 0xA, /* 14s */
+ [16] = 0xB, /* 16s */
};
static int sunxi_wdt_ping(struct watchdog_device *wdt_dev)
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index d2cd9f0bcb9a..56369c4f1961 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -232,7 +232,7 @@ err_out_disable_device:
static void wdt_remove(struct pci_dev *pdev)
{
watchdog_unregister_device(&wdt_dev);
- del_timer(&timer);
+ del_timer_sync(&timer);
iounmap(wdt_mem);
release_mem_region(mmio, VIA_WDT_MMIO_LEN);
release_resource(&wdt_res);
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index b1da0c18fd1a..7165704a3e33 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -64,6 +64,10 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static int early_disable;
+module_param(early_disable, int, 0);
+MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
+
/*
* Kernel methods.
*/
@@ -208,9 +212,14 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
t = superio_inb(cr_wdt_timeout);
if (t != 0) {
- pr_info("Watchdog already running. Resetting timeout to %d sec\n",
- wdog->timeout);
- superio_outb(cr_wdt_timeout, wdog->timeout);
+ if (early_disable) {
+ pr_warn("Stopping previously enabled watchdog until userland kicks in\n");
+ superio_outb(cr_wdt_timeout, 0);
+ } else {
+ pr_info("Watchdog already running. Resetting timeout to %d sec\n",
+ wdog->timeout);
+ superio_outb(cr_wdt_timeout, wdog->timeout);
+ }
}
/* set second mode & disable keyboard turning off watchdog */
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
deleted file mode 100644
index e9ea856b8ff2..000000000000
--- a/drivers/watchdog/w83697hf_wdt.c
+++ /dev/null
@@ -1,460 +0,0 @@
-/*
- * w83697hf/hg WDT driver
- *
- * (c) Copyright 2006 Samuel Tardieu <sam@rfc1149.net>
- * (c) Copyright 2006 Marcus Junker <junker@anduras.de>
- *
- * Based on w83627hf_wdt.c which is based on advantechwdt.c
- * which is based on wdt.c.
- * Original copyright messages:
- *
- * (c) Copyright 2003 Pádraig Brady <P@draigBrady.com>
- *
- * (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl>
- *
- * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Neither Marcus Junker nor ANDURAS AG admit liability nor provide
- * warranty for any of this software. This material is provided
- * "AS-IS" and at no charge.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/fs.h>
-#include <linux/ioport.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-
-#define WATCHDOG_NAME "w83697hf/hg WDT"
-#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
-#define WATCHDOG_EARLY_DISABLE 1 /* Disable until userland kicks in */
-
-static unsigned long wdt_is_open;
-static char expect_close;
-static DEFINE_SPINLOCK(io_lock);
-
-/* You must set this - there is no sane way to probe for this board. */
-static int wdt_io = 0x2e;
-module_param(wdt_io, int, 0);
-MODULE_PARM_DESC(wdt_io,
- "w83697hf/hg WDT io port (default 0x2e, 0 = autodetect)");
-
-static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
-module_param(timeout, int, 0);
-MODULE_PARM_DESC(timeout,
- "Watchdog timeout in seconds. 1<= timeout <=255 (default="
- __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
- "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-static int early_disable = WATCHDOG_EARLY_DISABLE;
-module_param(early_disable, int, 0);
-MODULE_PARM_DESC(early_disable,
- "Watchdog gets disabled at boot time (default="
- __MODULE_STRING(WATCHDOG_EARLY_DISABLE) ")");
-
-/*
- * Kernel methods.
- */
-
-#define W83697HF_EFER (wdt_io + 0) /* Extended Function Enable Register */
-#define W83697HF_EFIR (wdt_io + 0) /* Extended Function Index Register
- (same as EFER) */
-#define W83697HF_EFDR (wdt_io + 1) /* Extended Function Data Register */
-
-static inline void w83697hf_unlock(void)
-{
- outb_p(0x87, W83697HF_EFER); /* Enter extended function mode */
- outb_p(0x87, W83697HF_EFER); /* Again according to manual */
-}
-
-static inline void w83697hf_lock(void)
-{
- outb_p(0xAA, W83697HF_EFER); /* Leave extended function mode */
-}
-
-/*
- * The three functions w83697hf_get_reg(), w83697hf_set_reg() and
- * w83697hf_write_timeout() must be called with the device unlocked.
- */
-
-static unsigned char w83697hf_get_reg(unsigned char reg)
-{
- outb_p(reg, W83697HF_EFIR);
- return inb_p(W83697HF_EFDR);
-}
-
-static void w83697hf_set_reg(unsigned char reg, unsigned char data)
-{
- outb_p(reg, W83697HF_EFIR);
- outb_p(data, W83697HF_EFDR);
-}
-
-static void w83697hf_write_timeout(int timeout)
-{
- /* Write Timeout counter to CRF4 */
- w83697hf_set_reg(0xF4, timeout);
-}
-
-static void w83697hf_select_wdt(void)
-{
- w83697hf_unlock();
- w83697hf_set_reg(0x07, 0x08); /* Switch to logic device 8 (GPIO2) */
-}
-
-static inline void w83697hf_deselect_wdt(void)
-{
- w83697hf_lock();
-}
-
-static void w83697hf_init(void)
-{
- unsigned char bbuf;
-
- w83697hf_select_wdt();
-
- bbuf = w83697hf_get_reg(0x29);
- bbuf &= ~0x60;
- bbuf |= 0x20;
-
- /* Set pin 119 to WDTO# mode (= CR29, WDT0) */
- w83697hf_set_reg(0x29, bbuf);
-
- bbuf = w83697hf_get_reg(0xF3);
- bbuf &= ~0x04;
- w83697hf_set_reg(0xF3, bbuf); /* Count mode is seconds */
-
- w83697hf_deselect_wdt();
-}
-
-static void wdt_ping(void)
-{
- spin_lock(&io_lock);
- w83697hf_select_wdt();
-
- w83697hf_write_timeout(timeout);
-
- w83697hf_deselect_wdt();
- spin_unlock(&io_lock);
-}
-
-static void wdt_enable(void)
-{
- spin_lock(&io_lock);
- w83697hf_select_wdt();
-
- w83697hf_write_timeout(timeout);
- w83697hf_set_reg(0x30, 1); /* Enable timer */
-
- w83697hf_deselect_wdt();
- spin_unlock(&io_lock);
-}
-
-static void wdt_disable(void)
-{
- spin_lock(&io_lock);
- w83697hf_select_wdt();
-
- w83697hf_set_reg(0x30, 0); /* Disable timer */
- w83697hf_write_timeout(0);
-
- w83697hf_deselect_wdt();
- spin_unlock(&io_lock);
-}
-
-static unsigned char wdt_running(void)
-{
- unsigned char t;
-
- spin_lock(&io_lock);
- w83697hf_select_wdt();
-
- t = w83697hf_get_reg(0xF4); /* Read timer */
-
- w83697hf_deselect_wdt();
- spin_unlock(&io_lock);
-
- return t;
-}
-
-static int wdt_set_heartbeat(int t)
-{
- if (t < 1 || t > 255)
- return -EINVAL;
-
- timeout = t;
- return 0;
-}
-
-static ssize_t wdt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- if (count) {
- if (!nowayout) {
- size_t i;
-
- expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 42;
- }
- }
- wdt_ping();
- }
- return count;
-}
-
-static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_timeout;
- static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
- | WDIOF_MAGICCLOSE,
- .firmware_version = 1,
- .identity = "W83697HF WDT",
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- if (copy_to_user(argp, &ident, sizeof(ident)))
- return -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_SETOPTIONS:
- {
- int options, retval = -EINVAL;
-
- if (get_user(options, p))
- return -EFAULT;
-
- if (options & WDIOS_DISABLECARD) {
- wdt_disable();
- retval = 0;
- }
-
- if (options & WDIOS_ENABLECARD) {
- wdt_enable();
- retval = 0;
- }
-
- return retval;
- }
-
- case WDIOC_KEEPALIVE:
- wdt_ping();
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_timeout, p))
- return -EFAULT;
- if (wdt_set_heartbeat(new_timeout))
- return -EINVAL;
- wdt_ping();
- /* Fall */
-
- case WDIOC_GETTIMEOUT:
- return put_user(timeout, p);
-
- default:
- return -ENOTTY;
- }
- return 0;
-}
-
-static int wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(0, &wdt_is_open))
- return -EBUSY;
- /*
- * Activate
- */
-
- wdt_enable();
- return nonseekable_open(inode, file);
-}
-
-static int wdt_close(struct inode *inode, struct file *file)
-{
- if (expect_close == 42)
- wdt_disable();
- else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- wdt_ping();
- }
- expect_close = 0;
- clear_bit(0, &wdt_is_open);
- return 0;
-}
-
-/*
- * Notifier for system down
- */
-
-static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
- void *unused)
-{
- if (code == SYS_DOWN || code == SYS_HALT)
- wdt_disable(); /* Turn the WDT off */
-
- return NOTIFY_DONE;
-}
-
-/*
- * Kernel Interfaces
- */
-
-static const struct file_operations wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = wdt_write,
- .unlocked_ioctl = wdt_ioctl,
- .open = wdt_open,
- .release = wdt_close,
-};
-
-static struct miscdevice wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &wdt_fops,
-};
-
-/*
- * The WDT needs to learn about soft shutdowns in order to
- * turn the timebomb registers off.
- */
-
-static struct notifier_block wdt_notifier = {
- .notifier_call = wdt_notify_sys,
-};
-
-static int w83697hf_check_wdt(void)
-{
- if (!request_region(wdt_io, 2, WATCHDOG_NAME)) {
- pr_err("I/O address 0x%x already in use\n", wdt_io);
- return -EIO;
- }
-
- pr_debug("Looking for watchdog at address 0x%x\n", wdt_io);
- w83697hf_unlock();
- if (w83697hf_get_reg(0x20) == 0x60) {
- pr_info("watchdog found at address 0x%x\n", wdt_io);
- w83697hf_lock();
- return 0;
- }
- /* Reprotect in case it was a compatible device */
- w83697hf_lock();
-
- pr_info("watchdog not found at address 0x%x\n", wdt_io);
- release_region(wdt_io, 2);
- return -EIO;
-}
-
-static int w83697hf_ioports[] = { 0x2e, 0x4e, 0x00 };
-
-static int __init wdt_init(void)
-{
- int ret, i, found = 0;
-
- pr_info("WDT driver for W83697HF/HG initializing\n");
-
- if (wdt_io == 0) {
- /* we will autodetect the W83697HF/HG watchdog */
- for (i = 0; ((!found) && (w83697hf_ioports[i] != 0)); i++) {
- wdt_io = w83697hf_ioports[i];
- if (!w83697hf_check_wdt())
- found++;
- }
- } else {
- if (!w83697hf_check_wdt())
- found++;
- }
-
- if (!found) {
- pr_err("No W83697HF/HG could be found\n");
- ret = -ENODEV;
- goto out;
- }
-
- w83697hf_init();
- if (early_disable) {
- if (wdt_running())
- pr_warn("Stopping previously enabled watchdog until userland kicks in\n");
- wdt_disable();
- }
-
- if (wdt_set_heartbeat(timeout)) {
- wdt_set_heartbeat(WATCHDOG_TIMEOUT);
- pr_info("timeout value must be 1 <= timeout <= 255, using %d\n",
- WATCHDOG_TIMEOUT);
- }
-
- ret = register_reboot_notifier(&wdt_notifier);
- if (ret != 0) {
- pr_err("cannot register reboot notifier (err=%d)\n", ret);
- goto unreg_regions;
- }
-
- ret = misc_register(&wdt_miscdev);
- if (ret != 0) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
- goto unreg_reboot;
- }
-
- pr_info("initialized. timeout=%d sec (nowayout=%d)\n",
- timeout, nowayout);
-
-out:
- return ret;
-unreg_reboot:
- unregister_reboot_notifier(&wdt_notifier);
-unreg_regions:
- release_region(wdt_io, 2);
- goto out;
-}
-
-static void __exit wdt_exit(void)
-{
- misc_deregister(&wdt_miscdev);
- unregister_reboot_notifier(&wdt_notifier);
- release_region(wdt_io, 2);
-}
-
-module_init(wdt_init);
-module_exit(wdt_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marcus Junker <junker@anduras.de>");
-MODULE_AUTHOR("Samuel Tardieu <sam@rfc1149.net>");
-MODULE_DESCRIPTION("w83697hf/hg WDT driver");
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
deleted file mode 100644
index ff58cb74671f..000000000000
--- a/drivers/watchdog/w83697ug_wdt.c
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * w83697ug/uf WDT driver
- *
- * (c) Copyright 2008 Flemming Fransen <ff@nrvissing.net>
- * reused original code to support w83697ug/uf.
- *
- * Based on w83627hf_wdt.c which is based on advantechwdt.c
- * which is based on wdt.c.
- * Original copyright messages:
- *
- * (c) Copyright 2007 Vlad Drukker <vlad@storewiz.com>
- * added support for W83627THF.
- *
- * (c) Copyright 2003 Pádraig Brady <P@draigBrady.com>
- *
- * (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl>
- *
- * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
- * http://www.redhat.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
- * warranty for any of this software. This material is provided
- * "AS-IS" and at no charge.
- *
- * (c) Copyright 1995 Alan Cox <alan@redhat.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/fs.h>
-#include <linux/ioport.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-
-#define WATCHDOG_NAME "w83697ug/uf WDT"
-#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
-
-static unsigned long wdt_is_open;
-static char expect_close;
-static DEFINE_SPINLOCK(io_lock);
-
-static int wdt_io = 0x2e;
-module_param(wdt_io, int, 0);
-MODULE_PARM_DESC(wdt_io, "w83697ug/uf WDT io port (default 0x2e)");
-
-static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
-module_param(timeout, int, 0);
-MODULE_PARM_DESC(timeout,
- "Watchdog timeout in seconds. 1<= timeout <=255 (default="
- __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
- "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-/*
- * Kernel methods.
- */
-
-#define WDT_EFER (wdt_io+0) /* Extended Function Enable Registers */
-#define WDT_EFIR (wdt_io+0) /* Extended Function Index Register
- (same as EFER) */
-#define WDT_EFDR (WDT_EFIR+1) /* Extended Function Data Register */
-
-static int w83697ug_select_wd_register(void)
-{
- unsigned char c;
- unsigned char version;
-
- outb_p(0x87, WDT_EFER); /* Enter extended function mode */
- outb_p(0x87, WDT_EFER); /* Again according to manual */
-
- outb(0x20, WDT_EFER); /* check chip version */
- version = inb(WDT_EFDR);
-
- if (version == 0x68) { /* W83697UG */
- pr_info("Watchdog chip version 0x%02x = W83697UG/UF found at 0x%04x\n",
- version, wdt_io);
-
- outb_p(0x2b, WDT_EFER);
- c = inb_p(WDT_EFDR); /* select WDT0 */
- c &= ~0x04;
- outb_p(0x2b, WDT_EFER);
- outb_p(c, WDT_EFDR); /* set pin118 to WDT0 */
-
- } else {
- pr_err("No W83697UG/UF could be found\n");
- return -ENODEV;
- }
-
- outb_p(0x07, WDT_EFER); /* point to logical device number reg */
- outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */
- outb_p(0x30, WDT_EFER); /* select CR30 */
- c = inb_p(WDT_EFDR);
- outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
-
- return 0;
-}
-
-static void w83697ug_unselect_wd_register(void)
-{
- outb_p(0xAA, WDT_EFER); /* Leave extended function mode */
-}
-
-static int w83697ug_init(void)
-{
- int ret;
- unsigned char t;
-
- ret = w83697ug_select_wd_register();
- if (ret != 0)
- return ret;
-
- outb_p(0xF6, WDT_EFER); /* Select CRF6 */
- t = inb_p(WDT_EFDR); /* read CRF6 */
- if (t != 0) {
- pr_info("Watchdog already running. Resetting timeout to %d sec\n",
- timeout);
- outb_p(timeout, WDT_EFDR); /* Write back to CRF6 */
- }
- outb_p(0xF5, WDT_EFER); /* Select CRF5 */
- t = inb_p(WDT_EFDR); /* read CRF5 */
- t &= ~0x0C; /* set second mode &
- disable keyboard turning off watchdog */
- outb_p(t, WDT_EFDR); /* Write back to CRF5 */
-
- w83697ug_unselect_wd_register();
- return 0;
-}
-
-static void wdt_ctrl(int timeout)
-{
- spin_lock(&io_lock);
-
- if (w83697ug_select_wd_register() < 0) {
- spin_unlock(&io_lock);
- return;
- }
-
- outb_p(0xF4, WDT_EFER); /* Select CRF4 */
- outb_p(timeout, WDT_EFDR); /* Write Timeout counter to CRF4 */
-
- w83697ug_unselect_wd_register();
-
- spin_unlock(&io_lock);
-}
-
-static int wdt_ping(void)
-{
- wdt_ctrl(timeout);
- return 0;
-}
-
-static int wdt_disable(void)
-{
- wdt_ctrl(0);
- return 0;
-}
-
-static int wdt_set_heartbeat(int t)
-{
- if (t < 1 || t > 255)
- return -EINVAL;
-
- timeout = t;
- return 0;
-}
-
-static ssize_t wdt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- if (count) {
- if (!nowayout) {
- size_t i;
-
- expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 42;
- }
- }
- wdt_ping();
- }
- return count;
-}
-
-static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_timeout;
- static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING |
- WDIOF_SETTIMEOUT |
- WDIOF_MAGICCLOSE,
- .firmware_version = 1,
- .identity = "W83697UG WDT",
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- if (copy_to_user(argp, &ident, sizeof(ident)))
- return -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_SETOPTIONS:
- {
- int options, retval = -EINVAL;
-
- if (get_user(options, p))
- return -EFAULT;
-
- if (options & WDIOS_DISABLECARD) {
- wdt_disable();
- retval = 0;
- }
-
- if (options & WDIOS_ENABLECARD) {
- wdt_ping();
- retval = 0;
- }
-
- return retval;
- }
-
- case WDIOC_KEEPALIVE:
- wdt_ping();
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_timeout, p))
- return -EFAULT;
- if (wdt_set_heartbeat(new_timeout))
- return -EINVAL;
- wdt_ping();
- /* Fall */
-
- case WDIOC_GETTIMEOUT:
- return put_user(timeout, p);
-
- default:
- return -ENOTTY;
- }
- return 0;
-}
-
-static int wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(0, &wdt_is_open))
- return -EBUSY;
- /*
- * Activate
- */
-
- wdt_ping();
- return nonseekable_open(inode, file);
-}
-
-static int wdt_close(struct inode *inode, struct file *file)
-{
- if (expect_close == 42)
- wdt_disable();
- else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- wdt_ping();
- }
- expect_close = 0;
- clear_bit(0, &wdt_is_open);
- return 0;
-}
-
-/*
- * Notifier for system down
- */
-
-static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
- void *unused)
-{
- if (code == SYS_DOWN || code == SYS_HALT)
- wdt_disable(); /* Turn the WDT off */
-
- return NOTIFY_DONE;
-}
-
-/*
- * Kernel Interfaces
- */
-
-static const struct file_operations wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = wdt_write,
- .unlocked_ioctl = wdt_ioctl,
- .open = wdt_open,
- .release = wdt_close,
-};
-
-static struct miscdevice wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &wdt_fops,
-};
-
-/*
- * The WDT needs to learn about soft shutdowns in order to
- * turn the timebomb registers off.
- */
-
-static struct notifier_block wdt_notifier = {
- .notifier_call = wdt_notify_sys,
-};
-
-static int __init wdt_init(void)
-{
- int ret;
-
- pr_info("WDT driver for the Winbond(TM) W83697UG/UF Super I/O chip initialising\n");
-
- if (wdt_set_heartbeat(timeout)) {
- wdt_set_heartbeat(WATCHDOG_TIMEOUT);
- pr_info("timeout value must be 1<=timeout<=255, using %d\n",
- WATCHDOG_TIMEOUT);
- }
-
- if (!request_region(wdt_io, 1, WATCHDOG_NAME)) {
- pr_err("I/O address 0x%04x already in use\n", wdt_io);
- ret = -EIO;
- goto out;
- }
-
- ret = w83697ug_init();
- if (ret != 0)
- goto unreg_regions;
-
- ret = register_reboot_notifier(&wdt_notifier);
- if (ret != 0) {
- pr_err("cannot register reboot notifier (err=%d)\n", ret);
- goto unreg_regions;
- }
-
- ret = misc_register(&wdt_miscdev);
- if (ret != 0) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
- goto unreg_reboot;
- }
-
- pr_info("initialized. timeout=%d sec (nowayout=%d)\n",
- timeout, nowayout);
-
-out:
- return ret;
-unreg_reboot:
- unregister_reboot_notifier(&wdt_notifier);
-unreg_regions:
- release_region(wdt_io, 1);
- goto out;
-}
-
-static void __exit wdt_exit(void)
-{
- misc_deregister(&wdt_miscdev);
- unregister_reboot_notifier(&wdt_notifier);
- release_region(wdt_io, 1);
-}
-
-module_init(wdt_init);
-module_exit(wdt_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Flemming Frandsen <ff@nrvissing.net>");
-MODULE_DESCRIPTION("w83697ug/uf WDT driver");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 6d325bda76da..5d4de88fe5b8 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1168,7 +1168,8 @@ int gnttab_resume(void)
int gnttab_suspend(void)
{
- gnttab_interface->unmap_frames();
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ gnttab_interface->unmap_frames();
return 0;
}